From b8600035768da179adc709814f4b455b844982cc Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Mon, 27 Nov 2023 12:43:51 -0500 Subject: [PATCH 001/260] Add doctest to is_safe function (#11183) --- backtracking/n_queens.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 0f237d95e7c8..2cd8c703fc72 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -24,6 +24,10 @@ def is_safe(board: list[list[int]], row: int, column: int) -> bool: Returns: Boolean Value + >>> is_safe([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + True + >>> is_safe([[1, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + False """ n = len(board) # Size of the board From 0ac97f359f2c4b1a4b96db6a083fac95ca0cfe97 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 19:13:24 +0100 Subject: [PATCH 002/260] [pre-commit.ci] pre-commit autoupdate (#11184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/mirrors-mypy: v1.7.0 → v1.7.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.7.0...v1.7.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9a0f78fdde5a..28f83a638d7b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.0 + rev: v1.7.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 438950325380..ea0ba22bcc13 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -428,12 +428,16 @@ * [Haversine Distance](geodesy/haversine_distance.py) * [Lamberts Ellipsoidal Distance](geodesy/lamberts_ellipsoidal_distance.py) +## Geometry + * [Geometry](geometry/geometry.py) + ## Graphics * [Bezier Curve](graphics/bezier_curve.py) * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) ## Graphs * [A Star](graphs/a_star.py) + * [Ant Colony Optimization Algorithms](graphs/ant_colony_optimization_algorithms.py) * [Articulation Points](graphs/articulation_points.py) * [Basic Graphs](graphs/basic_graphs.py) * [Bellman Ford](graphs/bellman_ford.py) @@ -718,6 +722,7 @@ * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) + * [Spearman Rank Correlation Coefficient](maths/spearman_rank_correlation_coefficient.py) * Special Numbers * [Armstrong Numbers](maths/special_numbers/armstrong_numbers.py) * [Automorphic Number](maths/special_numbers/automorphic_number.py) From 82e539dc8226abe803aa562402cfe9f19ded9e22 Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Fri, 1 Dec 2023 11:53:47 -0500 Subject: [PATCH 003/260] Create smallestRange.py (#11179) * Create smallestRange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update smallestRange.py * Update smallestRange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update smallestRange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename smallestRange.py to smallestrange.py * Update smallestrange.py * Update smallestrange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update smallestrange.py * Rename smallestrange.py to smallest_range.py * Update smallest_range.py * Update smallest_range.py * Update smallest_range.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- greedy_methods/smallest_range.py | 71 ++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 greedy_methods/smallest_range.py diff --git a/greedy_methods/smallest_range.py b/greedy_methods/smallest_range.py new file mode 100644 index 000000000000..e2b7f8d7e96a --- /dev/null +++ b/greedy_methods/smallest_range.py @@ -0,0 +1,71 @@ +""" +smallest_range function takes a list of sorted integer lists and finds the smallest +range that includes at least one number from each list, using a min heap for efficiency. +""" + +from heapq import heappop, heappush +from sys import maxsize + + +def smallest_range(nums: list[list[int]]) -> list[int]: + """ + Find the smallest range from each list in nums. + + Uses min heap for efficiency. The range includes at least one number from each list. + + Args: + nums: List of k sorted integer lists. + + Returns: + list: Smallest range as a two-element list. + + Examples: + >>> smallest_range([[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]) + [20, 24] + >>> smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + [1, 1] + >>> smallest_range(((1, 2, 3), (1, 2, 3), (1, 2, 3))) + [1, 1] + >>> smallest_range(((-3, -2, -1), (0, 0, 0), (1, 2, 3))) + [-1, 1] + >>> smallest_range([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + [3, 7] + >>> smallest_range([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + [0, 0] + >>> smallest_range([[], [], []]) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + + min_heap: list[tuple[int, int, int]] = [] + current_max = -maxsize - 1 + + for i, items in enumerate(nums): + heappush(min_heap, (items[0], i, 0)) + current_max = max(current_max, items[0]) + + # Initialize smallest_range with large integer values + smallest_range = [-maxsize - 1, maxsize] + + while min_heap: + current_min, list_index, element_index = heappop(min_heap) + + if current_max - current_min < smallest_range[1] - smallest_range[0]: + smallest_range = [current_min, current_max] + + if element_index == len(nums[list_index]) - 1: + break + + next_element = nums[list_index][element_index + 1] + heappush(min_heap, (next_element, list_index, element_index + 1)) + current_max = max(current_max, next_element) + + return smallest_range + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"{smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]])}") # Output: [1, 1] From a73f37b2ecf29aeee1b0417ac53016f5ad0fbeee Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:00:34 +0100 Subject: [PATCH 004/260] [pre-commit.ci] pre-commit autoupdate (#11195) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 1.5.1 → 1.5.3](https://github.com/tox-dev/pyproject-fmt/compare/1.5.1...1.5.3) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 28f83a638d7b..5ec7a5765817 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.5.1" + rev: "1.5.3" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index ea0ba22bcc13..2ee72df37f3f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -507,6 +507,7 @@ * [Minimum Coin Change](greedy_methods/minimum_coin_change.py) * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) + * [Smallest Range](greedy_methods/smallest_range.py) ## Hashes * [Adler32](hashes/adler32.py) From c14a580c9e7340ee1d826a52af0f95c077b564b4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 19:27:15 +0100 Subject: [PATCH 005/260] [pre-commit.ci] pre-commit autoupdate (#11210) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.6 → v0.1.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.6...v0.1.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5ec7a5765817..9688f1cbb5fc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.6 + rev: v0.1.7 hooks: - id: ruff From 2d0ed135a08dbe7da8c696d70ee7fb1a01f2cc91 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 21:17:46 +0100 Subject: [PATCH 006/260] [pre-commit.ci] pre-commit autoupdate (#11215) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.7 → v0.1.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.7...v0.1.8) - [github.com/psf/black: 23.11.0 → 23.12.0](https://github.com/psf/black/compare/23.11.0...23.12.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9688f1cbb5fc..c8a11e38aeab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.1.8 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.11.0 + rev: 23.12.0 hooks: - id: black From b46fc1de04350f91971187d831d8e3292ea0bace Mon Sep 17 00:00:00 2001 From: Indrajeet Mishra Date: Wed, 20 Dec 2023 04:35:27 +0530 Subject: [PATCH 007/260] Corrected the Python Doctest command in equilibrium_index_in_array.py script (#11212) Co-authored-by: Indrajeet Mishra --- data_structures/arrays/equilibrium_index_in_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/arrays/equilibrium_index_in_array.py b/data_structures/arrays/equilibrium_index_in_array.py index 8802db6206bb..0717a45d9f4b 100644 --- a/data_structures/arrays/equilibrium_index_in_array.py +++ b/data_structures/arrays/equilibrium_index_in_array.py @@ -3,7 +3,7 @@ Reference: https://www.geeksforgeeks.org/equilibrium-index-of-an-array/ Python doctest can be run with the following command: -python -m doctest -v equilibrium_index.py +python -m doctest -v equilibrium_index_in_array.py Given a sequence arr[] of size n, this function returns an equilibrium index (if any) or -1 if no equilibrium index exists. From 7b9f82cc447c2d2ce91373c097bf610d5b0f906a Mon Sep 17 00:00:00 2001 From: Tushar Pamnani <121151091+tusharpamnani@users.noreply.github.com> Date: Wed, 20 Dec 2023 07:29:51 +0530 Subject: [PATCH 008/260] optimize quicksort implementation (#11196) * optimize quicksort implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update quick_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- sorts/quick_sort.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index b79d3eac3e48..6b95fc144426 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -13,10 +13,10 @@ def quick_sort(collection: list) -> list: - """A pure Python implementation of quick sort algorithm + """A pure Python implementation of quicksort algorithm. :param collection: a mutable collection of comparable items - :return: the same collection ordered by ascending + :return: the same collection ordered in ascending order Examples: >>> quick_sort([0, 5, 3, 2, 2]) @@ -26,23 +26,26 @@ def quick_sort(collection: list) -> list: >>> quick_sort([-2, 5, 0, -45]) [-45, -2, 0, 5] """ + # Base case: if the collection has 0 or 1 elements, it is already sorted if len(collection) < 2: return collection - pivot_index = randrange(len(collection)) # Use random element as pivot - pivot = collection[pivot_index] - greater: list[int] = [] # All elements greater than pivot - lesser: list[int] = [] # All elements less than or equal to pivot - for element in collection[:pivot_index]: - (greater if element > pivot else lesser).append(element) + # Randomly select a pivot index and remove the pivot element from the collection + pivot_index = randrange(len(collection)) + pivot = collection.pop(pivot_index) - for element in collection[pivot_index + 1 :]: - (greater if element > pivot else lesser).append(element) + # Partition the remaining elements into two groups: lesser or equal, and greater + lesser = [item for item in collection if item <= pivot] + greater = [item for item in collection if item > pivot] + # Recursively sort the lesser and greater groups, and combine with the pivot return [*quick_sort(lesser), pivot, *quick_sort(greater)] if __name__ == "__main__": + # Get user input and convert it into a list of integers user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] + + # Print the result of sorting the user-provided list print(quick_sort(unsorted)) From 94c8e1ab73032d27bc8c60b733bb93393b9f1b02 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 19:24:39 +0100 Subject: [PATCH 009/260] [pre-commit.ci] pre-commit autoupdate (#11223) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.8 → v0.1.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.8...v0.1.9) - [github.com/psf/black: 23.12.0 → 23.12.1](https://github.com/psf/black/compare/23.12.0...23.12.1) - [github.com/pre-commit/mirrors-mypy: v1.7.1 → v1.8.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.7.1...v1.8.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c8a11e38aeab..61ec3a54a69c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.8 + rev: v0.1.9 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.12.0 + rev: 23.12.1 hooks: - id: black @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.1 + rev: v1.8.0 hooks: - id: mypy args: From 51c5c87b9ab4eb04c3825cd20cfdba0f31a098f5 Mon Sep 17 00:00:00 2001 From: Param Thakkar <128291516+ParamThakkar123@users.noreply.github.com> Date: Wed, 27 Dec 2023 14:05:29 +0530 Subject: [PATCH 010/260] File moved to neural_network/activation_functions (#11216) * added GELU activation functions file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_error_linear_unit.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_error_linear_unit.py * Delete neural_network/activation_functions/gaussian_error_linear_unit.py * Rename maths/gaussian_error_linear_unit.py to neural_network/activation_functions/gaussian_error_linear_unit.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../activation_functions}/gaussian_error_linear_unit.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {maths => neural_network/activation_functions}/gaussian_error_linear_unit.py (100%) diff --git a/maths/gaussian_error_linear_unit.py b/neural_network/activation_functions/gaussian_error_linear_unit.py similarity index 100% rename from maths/gaussian_error_linear_unit.py rename to neural_network/activation_functions/gaussian_error_linear_unit.py From 9caf4784aada17dc75348f77cc8c356df503c0f3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 19:16:25 +0100 Subject: [PATCH 011/260] [pre-commit.ci] pre-commit autoupdate (#11231) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.9 → v0.1.11](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.9...v0.1.11) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 61ec3a54a69c..0e06ba7a5250 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.1.11 hooks: - id: ruff diff --git a/DIRECTORY.md b/DIRECTORY.md index 2ee72df37f3f..b5392fd09114 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -631,7 +631,6 @@ * [Floor](maths/floor.py) * [Gamma](maths/gamma.py) * [Gaussian](maths/gaussian.py) - * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) @@ -791,6 +790,7 @@ * Activation Functions * [Binary Step](neural_network/activation_functions/binary_step.py) * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) + * [Gaussian Error Linear Unit](neural_network/activation_functions/gaussian_error_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) From 227944eb2933b22a102eb88703b4a0b648f39af5 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Fri, 12 Jan 2024 17:12:15 +0100 Subject: [PATCH 012/260] fix: consider months and days in `years_old` (#11234) * fix: do not consider months in `calculate_age` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_top_billionaires.py * Update get_top_billionaires.py * Update get_top_billionaires.py * TODAY = datetime.utcnow() * Update get_top_billionaires.py * Update build.yml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 2 +- web_programming/get_top_billionaires.py | 72 ++++++++++++------------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 60c1d6d119d0..1631feb2ba06 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.12 allow-prereleases: true diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 6f986acb9181..703b635eef82 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -3,7 +3,7 @@ This works for some of us but fails for others. """ -from datetime import UTC, datetime, timedelta +from datetime import UTC, date, datetime import requests from rich import box @@ -11,8 +11,7 @@ from rich import table as rich_table LIMIT = 10 -TODAY = datetime.now() - +TODAY = datetime.now(tz=UTC) API_URL = ( "https://www.forbes.com/forbesapi/person/rtb/0/position/true.json" "?fields=personName,gender,source,countryOfCitizenship,birthDate,finalWorth" @@ -20,40 +19,40 @@ ) -def calculate_age(unix_date: float) -> str: - """Calculates age from given unix time format. +def years_old(birth_timestamp: int, today: date | None = None) -> int: + """ + Calculate the age in years based on the given birth date. Only the year, month, + and day are used in the calculation. The time of day is ignored. + + Args: + birth_timestamp: The date of birth. + today: (useful for writing tests) or if None then datetime.date.today(). Returns: - Age as string - - >>> from datetime import datetime, UTC - >>> years_since_create = datetime.now(tz=UTC).year - 2022 - >>> int(calculate_age(-657244800000)) - years_since_create - 73 - >>> int(calculate_age(46915200000)) - years_since_create - 51 + int: The age in years. + + Examples: + >>> today = date(2024, 1, 12) + >>> years_old(birth_timestamp=datetime(1959, 11, 20).timestamp(), today=today) + 64 + >>> years_old(birth_timestamp=datetime(1970, 2, 13).timestamp(), today=today) + 53 + >>> all( + ... years_old(datetime(today.year - i, 1, 12).timestamp(), today=today) == i + ... for i in range(1, 111) + ... ) + True """ - # Convert date from milliseconds to seconds - unix_date /= 1000 - - if unix_date < 0: - # Handle timestamp before epoch - epoch = datetime.fromtimestamp(0, tz=UTC) - seconds_since_epoch = (datetime.now(tz=UTC) - epoch).seconds - birthdate = ( - epoch - timedelta(seconds=abs(unix_date) - seconds_since_epoch) - ).date() - else: - birthdate = datetime.fromtimestamp(unix_date, tz=UTC).date() - return str( - TODAY.year - - birthdate.year - - ((TODAY.month, TODAY.day) < (birthdate.month, birthdate.day)) + today = today or TODAY.date() + birth_date = datetime.fromtimestamp(birth_timestamp, tz=UTC).date() + return (today.year - birth_date.year) - ( + (today.month, today.day) < (birth_date.month, birth_date.day) ) -def get_forbes_real_time_billionaires() -> list[dict[str, str]]: - """Get top 10 realtime billionaires using forbes API. +def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: + """ + Get the top 10 real-time billionaires using Forbes API. Returns: List of top 10 realtime billionaires data. @@ -66,21 +65,22 @@ def get_forbes_real_time_billionaires() -> list[dict[str, str]]: "Country": person["countryOfCitizenship"], "Gender": person["gender"], "Worth ($)": f"{person['finalWorth'] / 1000:.1f} Billion", - "Age": calculate_age(person["birthDate"]), + "Age": years_old(person["birthDate"]), } for person in response_json["personList"]["personsLists"] ] -def display_billionaires(forbes_billionaires: list[dict[str, str]]) -> None: - """Display Forbes real time billionaires in a rich table. +def display_billionaires(forbes_billionaires: list[dict[str, int | str]]) -> None: + """ + Display Forbes real-time billionaires in a rich table. Args: - forbes_billionaires (list): Forbes top 10 real time billionaires + forbes_billionaires (list): Forbes top 10 real-time billionaires """ table = rich_table.Table( - title=f"Forbes Top {LIMIT} Real Time Billionaires at {TODAY:%Y-%m-%d %H:%M}", + title=f"Forbes Top {LIMIT} Real-Time Billionaires at {TODAY:%Y-%m-%d %H:%M}", style="green", highlight=True, box=box.SQUARE, From a56f24e83d971c8f49d194b859b9b7acbf7df084 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Fri, 12 Jan 2024 17:46:26 +0100 Subject: [PATCH 013/260] fix: use `GITHUB_ACTOR` in `git config` (#11233) --- .github/workflows/directory_writer.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index 702c15f1e29b..e92c93604904 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -15,8 +15,8 @@ jobs: - name: Write DIRECTORY.md run: | scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - git config --global user.name github-actions - git config --global user.email '${GITHUB_ACTOR}@users.noreply.github.com' + git config --global user.name "$GITHUB_ACTOR" + git config --global user.email "$GITHUB_ACTOR@users.noreply.github.com" git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY - name: Update DIRECTORY.md run: | From ffb93adf46971be35699e7642d79e90284b3c7f1 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Fri, 12 Jan 2024 18:25:59 +0100 Subject: [PATCH 014/260] chore: update `actions/setup-python` to `v5` (#11236) --- .github/workflows/directory_writer.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index e92c93604904..55d89f455a25 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -9,7 +9,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - name: Write DIRECTORY.md diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 7bbccf76e192..59e1208a650d 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - name: Install pytest and pytest-cov @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - name: Install pytest and requests From 13559aee437dab6ed88ecb1a6737cb39094c9e24 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Sat, 13 Jan 2024 12:24:58 +0100 Subject: [PATCH 015/260] style: use proper indentation in `ruff.yml` (#11237) * style: use proper indentation in `ruff.yml` * chore: run `prettier` on `yml` files * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml * chore: run prettier on workflow files --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 10 +++++----- .github/workflows/ruff.yml | 6 +++--- .pre-commit-config.yaml | 8 +++++++- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1631feb2ba06..906edfdae1ed 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,10 +25,10 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/q_fourier_transform.py - --ignore=project_euler/ - --ignore=scripts/validate_solutions.py - --cov-report=term-missing:skip-covered - --cov=. . + --ignore=quantum/q_fourier_transform.py + --ignore=project_euler/ + --ignore=scripts/validate_solutions.py + --cov-report=term-missing:skip-covered + --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 496f1460e074..9ebabed3600a 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -11,6 +11,6 @@ jobs: ruff: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - run: pip install --user ruff - - run: ruff --output-format=github . + - uses: actions/checkout@v4 + - run: pip install --user ruff + - run: ruff --output-format=github . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0e06ba7a5250..31e141049441 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: - repo: https://github.com/MarcoGorelli/auto-walrus rev: v0.2.2 hooks: - - id: auto-walrus + - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.11 @@ -59,3 +59,9 @@ repos: - --install-types # See mirrors-mypy README.md - --non-interactive additional_dependencies: [types-requests] + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.1.0" + hooks: + - id: prettier + types_or: [toml, yaml] From dd47651bfca06b31941827ed3f41517bf5718508 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 19:19:36 +0100 Subject: [PATCH 016/260] [pre-commit.ci] pre-commit autoupdate (#11246) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.11 → v0.1.13](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.11...v0.1.13) - [github.com/tox-dev/pyproject-fmt: 1.5.3 → 1.6.0](https://github.com/tox-dev/pyproject-fmt/compare/1.5.3...1.6.0) - [github.com/pre-commit/mirrors-prettier: v3.1.0 → v4.0.0-alpha.8](https://github.com/pre-commit/mirrors-prettier/compare/v3.1.0...v4.0.0-alpha.8) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31e141049441..97603510b426 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.11 + rev: v0.1.13 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.5.3" + rev: "1.6.0" hooks: - id: pyproject-fmt @@ -61,7 +61,7 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/pre-commit/mirrors-prettier - rev: "v3.1.0" + rev: "v4.0.0-alpha.8" hooks: - id: prettier types_or: [toml, yaml] From 4b6f688344b8347f555f10ca04b80ee36b5a1e82 Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Tue, 16 Jan 2024 16:39:54 +0800 Subject: [PATCH 017/260] Use compiled black as the pre-commit formatter (#11247) * Use compiled black as the pre-commit formatter * ruff-format * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Keep GitHub Actions up to date with Dependabot --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/.github/dependabot.yml | 8 ++++++++ .pre-commit-config.yaml | 6 +----- audio_filters/butterworth_filter.py | 16 ++++++++++++---- conversions/convert_number_to_words.py | 2 +- digital_image_processing/filters/gabor_filter.py | 6 +++--- ...rian_path_and_circuit_for_undirected_graph.py | 2 +- physics/n_body_simulation.py | 4 +--- project_euler/problem_056/sol1.py | 4 +--- 8 files changed, 28 insertions(+), 20 deletions(-) create mode 100644 .github/.github/dependabot.yml diff --git a/.github/.github/dependabot.yml b/.github/.github/dependabot.yml new file mode 100644 index 000000000000..15e494ec867e --- /dev/null +++ b/.github/.github/dependabot.yml @@ -0,0 +1,8 @@ +# Keep GitHub Actions up to date with Dependabot... +# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97603510b426..38cc7c8fc3ff 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,11 +19,7 @@ repos: rev: v0.1.13 hooks: - id: ruff - - - repo: https://github.com/psf/black - rev: 23.12.1 - hooks: - - id: black + - id: ruff-format - repo: https://github.com/codespell-project/codespell rev: v2.2.6 diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index cffedb7a68fd..6449bc3f3dce 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -11,7 +11,9 @@ def make_lowpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a low-pass filter @@ -39,7 +41,9 @@ def make_lowpass( def make_highpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a high-pass filter @@ -67,7 +71,9 @@ def make_highpass( def make_bandpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a band-pass filter @@ -96,7 +102,9 @@ def make_bandpass( def make_allpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates an all-pass filter diff --git a/conversions/convert_number_to_words.py b/conversions/convert_number_to_words.py index 0c428928b31d..dbab44c72e1f 100644 --- a/conversions/convert_number_to_words.py +++ b/conversions/convert_number_to_words.py @@ -41,7 +41,7 @@ def max_value(cls, system: str) -> int: >>> NumberingSystem.max_value("indian") == 10**19 - 1 True """ - match (system_enum := cls[system.upper()]): + match system_enum := cls[system.upper()]: case cls.SHORT: max_exp = system_enum.value[0][0] + 3 case cls.LONG: diff --git a/digital_image_processing/filters/gabor_filter.py b/digital_image_processing/filters/gabor_filter.py index 8f9212a35a79..aaec567f4c99 100644 --- a/digital_image_processing/filters/gabor_filter.py +++ b/digital_image_processing/filters/gabor_filter.py @@ -48,9 +48,9 @@ def gabor_filter_kernel( _y = -sin_theta * px + cos_theta * py # fill kernel - gabor[y, x] = np.exp( - -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) - ) * np.cos(2 * np.pi * _x / lambd + psi) + gabor[y, x] = np.exp(-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos( + 2 * np.pi * _x / lambd + psi + ) return gabor diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py index 6b4ea8e21e8b..5b146eaa845b 100644 --- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py +++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py @@ -56,7 +56,7 @@ def main(): g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} g5 = { 1: [], - 2: [] + 2: [], # all degree is zero } max_node = 10 diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 46330844df61..ec008784ba62 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -165,9 +165,7 @@ def update_system(self, delta_time: float) -> None: # Calculation of the distance using Pythagoras's theorem # Extra factor due to the softening technique - distance = (dif_x**2 + dif_y**2 + self.softening_factor) ** ( - 1 / 2 - ) + distance = (dif_x**2 + dif_y**2 + self.softening_factor) ** (1 / 2) # Newton's law of universal gravitation. force_x += ( diff --git a/project_euler/problem_056/sol1.py b/project_euler/problem_056/sol1.py index c772bec58692..828dbd3a8ddf 100644 --- a/project_euler/problem_056/sol1.py +++ b/project_euler/problem_056/sol1.py @@ -30,9 +30,7 @@ def solution(a: int = 100, b: int = 100) -> int: # RETURN the MAXIMUM from the list of SUMs of the list of INT converted from STR of # BASE raised to the POWER return max( - sum(int(x) for x in str(base**power)) - for base in range(a) - for power in range(b) + sum(int(x) for x in str(base**power)) for base in range(a) for power in range(b) ) From 0101dd42dc83f567bddebc386b17f2b4f6bbaa36 Mon Sep 17 00:00:00 2001 From: Ataf Fazledin Ahamed Date: Tue, 16 Jan 2024 14:43:33 +0600 Subject: [PATCH 018/260] Fixed Inappropriate Logical Expression (#11203) Signed-off-by: fazledyn-or --- data_structures/binary_tree/red_black_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 4ebe0e927ca0..fc299301da8a 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -451,7 +451,7 @@ def is_left(self) -> bool: """Returns true iff this node is the left child of its parent.""" if self.parent is None: return False - return self.parent.left is self.parent.left is self + return self.parent.left is self def is_right(self) -> bool: """Returns true iff this node is the right child of its parent.""" From 05a5cdacc3cfd9814ad6f5cb2d4dec86109b640a Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 18 Jan 2024 18:09:27 +0530 Subject: [PATCH 019/260] Added doctest to skew_heap.py (#11147) * Added doctest to skew_heap.py * Update skew_heap.py * Update data_structures/heap/skew_heap.py Co-authored-by: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> * Update skew_heap.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update skew_heap.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/heap/skew_heap.py | 45 +++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py index c4c13b08276a..0839db711cb1 100644 --- a/data_structures/heap/skew_heap.py +++ b/data_structures/heap/skew_heap.py @@ -21,14 +21,55 @@ def __init__(self, value: T) -> None: @property def value(self) -> T: - """Return the value of the node.""" + """ + Return the value of the node. + + >>> SkewNode(0).value + 0 + >>> SkewNode(3.14159).value + 3.14159 + >>> SkewNode("hello").value + 'hello' + >>> SkewNode(None).value + + >>> SkewNode(True).value + True + >>> SkewNode([]).value + [] + >>> SkewNode({}).value + {} + >>> SkewNode(set()).value + set() + >>> SkewNode(0.0).value + 0.0 + >>> SkewNode(-1e-10).value + -1e-10 + >>> SkewNode(10).value + 10 + >>> SkewNode(-10.5).value + -10.5 + >>> SkewNode().value + Traceback (most recent call last): + ... + TypeError: SkewNode.__init__() missing 1 required positional argument: 'value' + """ return self._value @staticmethod def merge( root1: SkewNode[T] | None, root2: SkewNode[T] | None ) -> SkewNode[T] | None: - """Merge 2 nodes together.""" + """ + Merge 2 nodes together. + >>> SkewNode.merge(SkewNode(10),SkewNode(-10.5)).value + -10.5 + >>> SkewNode.merge(SkewNode(10),SkewNode(10.5)).value + 10 + >>> SkewNode.merge(SkewNode(10),SkewNode(10)).value + 10 + >>> SkewNode.merge(SkewNode(-100),SkewNode(-10.5)).value + -100 + """ if not root1: return root2 From 3952ba703a5b84a37891a001037c5c366d20941a Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Thu, 18 Jan 2024 20:41:29 +0800 Subject: [PATCH 020/260] Add categorical focal cross-entropy loss algorithm (#11248) --- machine_learning/loss_functions.py | 102 +++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 36a760326f3d..f05fa0cbe686 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -148,6 +148,108 @@ def categorical_cross_entropy( return -np.sum(y_true * np.log(y_pred)) +def categorical_focal_cross_entropy( + y_true: np.ndarray, + y_pred: np.ndarray, + alpha: np.ndarray = None, + gamma: float = 2.0, + epsilon: float = 1e-15, +) -> float: + """ + Calculate the mean categorical focal cross-entropy (CFCE) loss between true + labels and predicted probabilities for multi-class classification. + + CFCE loss is a generalization of binary focal cross-entropy for multi-class + classification. It addresses class imbalance by focusing on hard examples. + + CFCE = -Σ alpha * (1 - y_pred)**gamma * y_true * log(y_pred) + + Reference: [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf) + + Parameters: + - y_true: True labels in one-hot encoded form. + - y_pred: Predicted probabilities for each class. + - alpha: Array of weighting factors for each class. + - gamma: Focusing parameter for modulating the loss (default: 2.0). + - epsilon: Small constant to avoid numerical instability. + + Returns: + - The mean categorical focal cross-entropy loss. + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> alpha = np.array([0.6, 0.2, 0.7]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + 0.0025966118981496423 + + >>> true_labels = np.array([[0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) + >>> alpha = np.array([0.25, 0.25, 0.25]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + 0.23315276982014324 + + >>> true_labels = np.array([[1, 0], [0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same shape. + + >>> true_labels = np.array([[2, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> true_labels = np.array([[1, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Predicted probabilities must sum to approximately 1. + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> alpha = np.array([0.6, 0.2]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + Traceback (most recent call last): + ... + ValueError: Length of alpha must match the number of classes. + """ + if y_true.shape != y_pred.shape: + raise ValueError("Shape of y_true and y_pred must be the same.") + + if alpha is None: + alpha = np.ones(y_true.shape[1]) + + if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): + raise ValueError("y_true must be one-hot encoded.") + + if len(alpha) != y_true.shape[1]: + raise ValueError("Length of alpha must match the number of classes.") + + if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): + raise ValueError("Predicted probabilities must sum to approximately 1.") + + # Clip predicted probabilities to avoid log(0) + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) + + # Calculate loss for each class and sum across classes + cfce_loss = -np.sum( + alpha * np.power(1 - y_pred, gamma) * y_true * np.log(y_pred), axis=1 + ) + + return np.mean(cfce_loss) + + def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: """ Calculate the mean hinge loss for between true labels and predicted probabilities From b01571dc4f5754d3da44b8a0b6dabb44986c666e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 19:20:43 +0100 Subject: [PATCH 021/260] [pre-commit.ci] pre-commit autoupdate (#11255) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.13 → v0.1.14](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.13...v0.1.14) - [github.com/tox-dev/pyproject-fmt: 1.6.0 → 1.7.0](https://github.com/tox-dev/pyproject-fmt/compare/1.6.0...1.7.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 38cc7c8fc3ff..7fae092d043c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.13 + rev: v0.1.14 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.6.0" + rev: "1.7.0" hooks: - id: pyproject-fmt From b092d7755cf94b4758440b68bc97ac30154f4c55 Mon Sep 17 00:00:00 2001 From: Geoffrey Logovi <52314615+geoffreylgv@users.noreply.github.com> Date: Wed, 24 Jan 2024 06:15:39 +0000 Subject: [PATCH 022/260] fixes #11256 : computer vision link update in .computer_vision/README.md (#11257) --- computer_vision/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/README.md b/computer_vision/README.md index 8d2f4a130d05..1657128fd25e 100644 --- a/computer_vision/README.md +++ b/computer_vision/README.md @@ -8,4 +8,4 @@ Image processing and computer vision are a little different from each other. Ima While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision). * -* +* From c0e700c91c63c1b3ea50575b10a6c1665dfd6404 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 29 Jan 2024 21:00:37 +0100 Subject: [PATCH 023/260] [pre-commit.ci] pre-commit autoupdate (#11261) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/abravalheri/validate-pyproject: v0.15 → v0.16](https://github.com/abravalheri/validate-pyproject/compare/v0.15...v0.16) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7fae092d043c..0d13745a5a47 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.15 + rev: v0.16 hooks: - id: validate-pyproject From c1d29ba459648bf8111e19e32988cb36ee8a94b0 Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Tue, 30 Jan 2024 16:18:56 +0800 Subject: [PATCH 024/260] Add smooth l1 loss algorithm (#11239) --- machine_learning/loss_functions.py | 56 ++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index f05fa0cbe686..16e5a3278b73 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -573,6 +573,62 @@ def perplexity_loss( return np.mean(perp_losses) +def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> float: + """ + Calculate the Smooth L1 Loss between y_true and y_pred. + + The Smooth L1 Loss is less sensitive to outliers than the L2 Loss and is often used + in regression problems, such as object detection. + + Smooth L1 Loss = + 0.5 * (x - y)^2 / beta, if |x - y| < beta + |x - y| - 0.5 * beta, otherwise + + Reference: + https://pytorch.org/docs/stable/generated/torch.nn.SmoothL1Loss.html + + Args: + y_true: Array of true values. + y_pred: Array of predicted values. + beta: Specifies the threshold at which to change between L1 and L2 loss. + + Returns: + The calculated Smooth L1 Loss between y_true and y_pred. + + Raises: + ValueError: If the length of the two arrays is not the same. + + >>> y_true = np.array([3, 5, 2, 7]) + >>> y_pred = np.array([2.9, 4.8, 2.1, 7.2]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + 0.012500000000000022 + + >>> y_true = np.array([2, 4, 6]) + >>> y_pred = np.array([1, 5, 7]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + 0.5 + + >>> y_true = np.array([1, 3, 5, 7]) + >>> y_pred = np.array([1, 3, 5, 7]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + 0.0 + + >>> y_true = np.array([1, 3, 5]) + >>> y_pred = np.array([1, 3, 5, 7]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + Traceback (most recent call last): + ... + ValueError: The length of the two arrays should be the same. + """ + + if len(y_true) != len(y_pred): + raise ValueError("The length of the two arrays should be the same.") + + diff = np.abs(y_true - y_pred) + loss = np.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta) + return np.mean(loss) + + if __name__ == "__main__": import doctest From 8995f45cb505e9cb1aafe3b35c6a00d9aff5f871 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 1 Feb 2024 07:10:35 +0100 Subject: [PATCH 025/260] Rename .github/.github/dependabot.yml to .github/dependabot.yml (#11264) * Rename .github/.github/dependabot.yml to .github/dependabot.yml * runs-on: macos-14 # ubuntu-latest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update build.yml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/{.github => }/dependabot.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{.github => }/dependabot.yml (100%) diff --git a/.github/.github/dependabot.yml b/.github/dependabot.yml similarity index 100% rename from .github/.github/dependabot.yml rename to .github/dependabot.yml From 6a169740e8c71c6c8236b09eb7b523895fedcfbd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Feb 2024 08:11:41 +0100 Subject: [PATCH 026/260] Bump actions/cache from 3 to 4 (#11265) Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 906edfdae1ed..a113b4608678 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ jobs: with: python-version: 3.12 allow-prereleases: true - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} From 4128f19170135cf7ccadb2afa8b2ab0a464c5765 Mon Sep 17 00:00:00 2001 From: Anthony Klarman <148516349+tonguegrease@users.noreply.github.com> Date: Fri, 2 Feb 2024 03:22:58 -0500 Subject: [PATCH 027/260] Fixed lines that needed to be uncommented after Hacktoberfest (#11267) * uncommented lines * uncommented lines * Update CODEOWNERS --------- Co-authored-by: Christian Clauss --- .github/CODEOWNERS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a0531cdeec69..d2ac43c7df31 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -21,15 +21,15 @@ # /cellular_automata/ -# /ciphers/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /ciphers/ # /compression/ # /computer_vision/ -# /conversions/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /conversions/ -# /data_structures/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /data_structures/ # /digital_image_processing/ @@ -67,7 +67,7 @@ # /neural_network/ -# /other/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /other/ # /project_euler/ @@ -81,7 +81,7 @@ # /sorts/ -# /strings/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /strings/ # /traversals/ From ed8d9209daff975eb3be6e0bf8cfa13e330347ca Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 5 Feb 2024 20:48:10 +0100 Subject: [PATCH 028/260] [pre-commit.ci] pre-commit autoupdate (#11275) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.14 → v0.2.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.14...v0.2.0) * Upgrade pyproject.toml * Revert sudoku_solver.py RUF017 Avoid quadratic list summation --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- ciphers/mixed_keyword_cypher.py | 2 +- data_structures/arrays/sudoku_solver.py | 2 +- data_structures/linked_list/is_palindrome.py | 4 +--- .../filters/gaussian_filter.py | 4 +--- electronics/resistor_equivalence.py | 8 ++------ hashes/hamming_code.py | 18 ++++++------------ machine_learning/k_means_clust.py | 2 +- .../sequential_minimum_optimization.py | 2 +- neural_network/convolution_neural_network.py | 6 +++--- pyproject.toml | 14 +++++++------- 11 files changed, 25 insertions(+), 39 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0d13745a5a47..c29c6982643e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.14 + rev: v0.2.0 hooks: - id: ruff - id: ruff-format diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index b984808fced6..1b186108a73e 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -67,7 +67,7 @@ def mixed_keyword( if verbose: print(mapping) # create the encrypted text by mapping the plaintext to the modified alphabet - return "".join(mapping[char] if char in mapping else char for char in plaintext) + return "".join(mapping.get(char, char) for char in plaintext) if __name__ == "__main__": diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 8d38bd7295ea..20ac32e3b071 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -22,7 +22,7 @@ def cross(items_a, items_b): + [cross(rs, cs) for rs in ("ABC", "DEF", "GHI") for cs in ("123", "456", "789")] ) units = {s: [u for u in unitlist if s in u] for s in squares} -peers = {s: set(sum(units[s], [])) - {s} for s in squares} +peers = {s: set(sum(units[s], [])) - {s} for s in squares} # noqa: RUF017 def test(): diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index f949d9a2f201..da788e3e5045 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -171,11 +171,9 @@ def is_palindrome_dict(head: ListNode | None) -> bool: if len(v) % 2 != 0: middle += 1 else: - step = 0 - for i in range(len(v)): + for step, i in enumerate(range(len(v))): if v[i] + v[len(v) - 1 - step] != checksum: return False - step += 1 if middle > 1: return False return True diff --git a/digital_image_processing/filters/gaussian_filter.py b/digital_image_processing/filters/gaussian_filter.py index 87fa67fb65ea..634d836e5edc 100644 --- a/digital_image_processing/filters/gaussian_filter.py +++ b/digital_image_processing/filters/gaussian_filter.py @@ -22,11 +22,9 @@ def gaussian_filter(image, k_size, sigma): # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows image_array = zeros((dst_height * dst_width, k_size * k_size)) - row = 0 - for i, j in product(range(dst_height), range(dst_width)): + for row, (i, j) in enumerate(product(range(dst_height), range(dst_width))): window = ravel(image[i : i + k_size, j : j + k_size]) image_array[row, :] = window - row += 1 # turn the kernel into shape(k*k, 1) gaussian_kernel = gen_gaussian_kernel(k_size, sigma) diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py index 55e7f2d6b5d2..c4ea7d4b757e 100644 --- a/electronics/resistor_equivalence.py +++ b/electronics/resistor_equivalence.py @@ -20,13 +20,11 @@ def resistor_parallel(resistors: list[float]) -> float: """ first_sum = 0.00 - index = 0 - for resistor in resistors: + for index, resistor in enumerate(resistors): if resistor <= 0: msg = f"Resistor at index {index} has a negative or zero value!" raise ValueError(msg) first_sum += 1 / float(resistor) - index += 1 return 1 / first_sum @@ -44,13 +42,11 @@ def resistor_series(resistors: list[float]) -> float: ValueError: Resistor at index 2 has a negative value! """ sum_r = 0.00 - index = 0 - for resistor in resistors: + for index, resistor in enumerate(resistors): sum_r += resistor if resistor < 0: msg = f"Resistor at index {index} has a negative value!" raise ValueError(msg) - index += 1 return sum_r diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index 4a6efcf23f63..b34fdd4c7a74 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -123,8 +123,7 @@ def emitter_converter(size_par, data): # Bit counter one for a given parity cont_bo = 0 # counter to control the loop reading - cont_loop = 0 - for x in data_ord: + for cont_loop, x in enumerate(data_ord): if x is not None: try: aux = (bin_pos[cont_loop])[-1 * (bp)] @@ -132,7 +131,6 @@ def emitter_converter(size_par, data): aux = "0" if aux == "1" and x == "1": cont_bo += 1 - cont_loop += 1 parity.append(cont_bo % 2) qtd_bp += 1 @@ -164,10 +162,10 @@ def receptor_converter(size_par, data): parity_received = [] data_output = [] - for x in range(1, len(data) + 1): + for i, item in enumerate(data, 1): # Performs a template of bit positions - who should be given, # and who should be parity - if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): + if qtd_bp < size_par and (np.log(i) / np.log(2)).is_integer(): data_out_gab.append("P") qtd_bp = qtd_bp + 1 else: @@ -175,10 +173,9 @@ def receptor_converter(size_par, data): # Sorts the data to the new output size if data_out_gab[-1] == "D": - data_output.append(data[cont_data]) + data_output.append(item) else: - parity_received.append(data[cont_data]) - cont_data += 1 + parity_received.append(item) # -----------calculates the parity with the data data_out = [] @@ -215,9 +212,7 @@ def receptor_converter(size_par, data): for bp in range(1, size_par + 1): # Bit counter one for a certain parity cont_bo = 0 - # Counter to control loop reading - cont_loop = 0 - for x in data_ord: + for cont_loop, x in enumerate(data_ord): if x is not None: try: aux = (bin_pos[cont_loop])[-1 * (bp)] @@ -225,7 +220,6 @@ def receptor_converter(size_par, data): aux = "0" if aux == "1" and x == "1": cont_bo += 1 - cont_loop += 1 parity.append(str(cont_bo % 2)) qtd_bp += 1 diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index ebad66ac8e8f..4a219edc3bb1 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -237,7 +237,7 @@ def report_generator( [ ("sum", "sum"), ("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))), - ("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()), + ("mean_without_zeros", lambda x: x.replace(0, np.nan).mean()), ( "mean_25-75", lambda x: np.mean( diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 9e2304859f8d..9ee8c52fb2e9 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -589,7 +589,7 @@ def plot_partition_boundary( ax.contour( xrange, yrange, - np.mat(grid).T, + np.asmatrix(grid).T, levels=(-1, 0, 1), linestyles=("--", "-", "--"), linewidths=(1, 1, 1), diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index f2e88fe7bd88..e9726a0cb4a7 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -41,11 +41,11 @@ def __init__( self.rate_weight = rate_w self.rate_thre = rate_t self.w_conv1 = [ - np.mat(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) + np.asmatrix(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) for i in range(self.conv1[1]) ] - self.wkj = np.mat(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) - self.vji = np.mat(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) + self.wkj = np.asmatrix(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) + self.vji = np.asmatrix(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1 self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1 self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1 diff --git a/pyproject.toml b/pyproject.toml index c7163dc78371..2e7da519da8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [tool.ruff] -ignore = [ # `ruff rule S101` for a description of that rule +lint.ignore = [ # `ruff rule S101` for a description of that rule "ARG001", # Unused function argument `amount` -- FIX ME? "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME @@ -31,7 +31,7 @@ ignore = [ # `ruff rule S101` for a description of that rule "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -select = [ # https://beta.ruff.rs/docs/rules +lint.select = [ # https://beta.ruff.rs/docs/rules "A", # flake8-builtins "ARG", # flake8-unused-arguments "ASYNC", # flake8-async @@ -84,13 +84,13 @@ select = [ # https://beta.ruff.rs/docs/rules # "TCH", # flake8-type-checking # "TRY", # tryceratops ] -show-source = true -target-version = "py311" +output-format = "full" +target-version = "py312" -[tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE +[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE max-complexity = 17 # default: 10 -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] "audio_filters/show_response.py" = ["ARG002"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] @@ -110,7 +110,7 @@ max-complexity = 17 # default: 10 "project_euler/problem_099/sol1.py" = ["SIM115"] "sorts/external_sort.py" = ["SIM115"] -[tool.ruff.pylint] # DO NOT INCREASE THESE VALUES +[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES allow-magic-value-types = ["float", "int", "str"] max-args = 10 # default: 5 max-branches = 20 # default: 12 From 5d6846b2bd1fa16edfc89025e00f69a802774faa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 21:53:20 +0100 Subject: [PATCH 029/260] [pre-commit.ci] pre-commit autoupdate (#11292) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.2.0 → v0.2.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.2.0...v0.2.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c29c6982643e..79d7d58d0863 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.0 + rev: v0.2.1 hooks: - id: ruff - id: ruff-format From c6ca1942e14a6e88c7ea1b96ef3a6d17ca843f52 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 23:00:06 +0100 Subject: [PATCH 030/260] [pre-commit.ci] pre-commit autoupdate (#11296) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.2.1 → v0.2.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.2.1...v0.2.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 79d7d58d0863..be8364a7fc0b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.1 + rev: v0.2.2 hooks: - id: ruff - id: ruff-format From fd27953d44416a5f1541ed6e6923844b6070d086 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 12 Mar 2024 11:35:49 +0300 Subject: [PATCH 031/260] Reenable files when TensorFlow supports the current Python (#11318) * Remove python_version < '3.12' for tensorflow * Reenable dynamic_programming/k_means_clustering_tensorflow.py * updating DIRECTORY.md * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix ruff * Reenable machine_learning/lstm/lstm_prediction.py * updating DIRECTORY.md * Try to fix ruff * Reenable computer_vision/cnn_classification.py * updating DIRECTORY.md * Reenable neural_network/input_data.py * updating DIRECTORY.md * Try to fix ruff * Try to fix ruff * Try to fix mypy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 5 +++++ ciphers/rsa_cipher.py | 17 +++++++---------- ...on.py.DISABLED.txt => cnn_classification.py} | 0 ...LED.txt => k_means_clustering_tensorflow.py} | 0 ...ction.py.DISABLED.txt => lstm_prediction.py} | 8 ++++---- ...put_data.py.DEPRECATED.txt => input_data.py} | 9 +++++++-- other/lfu_cache.py | 5 +++-- requirements.txt | 2 +- 8 files changed, 27 insertions(+), 19 deletions(-) rename computer_vision/{cnn_classification.py.DISABLED.txt => cnn_classification.py} (100%) rename dynamic_programming/{k_means_clustering_tensorflow.py.DISABLED.txt => k_means_clustering_tensorflow.py} (100%) rename machine_learning/lstm/{lstm_prediction.py.DISABLED.txt => lstm_prediction.py} (90%) rename neural_network/{input_data.py.DEPRECATED.txt => input_data.py} (98%) diff --git a/DIRECTORY.md b/DIRECTORY.md index b5392fd09114..2f828aa512a9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -134,6 +134,7 @@ * [Run Length Encoding](compression/run_length_encoding.py) ## Computer Vision + * [Cnn Classification](computer_vision/cnn_classification.py) * [Flip Augmentation](computer_vision/flip_augmentation.py) * [Haralick Descriptors](computer_vision/haralick_descriptors.py) * [Harris Corner](computer_vision/harris_corner.py) @@ -344,6 +345,7 @@ * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) + * [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py) * [Knapsack](dynamic_programming/knapsack.py) * [Largest Divisible Subset](dynamic_programming/largest_divisible_subset.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) @@ -571,6 +573,8 @@ * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](machine_learning/logistic_regression.py) * [Loss Functions](machine_learning/loss_functions.py) + * Lstm + * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) @@ -801,6 +805,7 @@ * [Swish](neural_network/activation_functions/swish.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) + * [Input Data](neural_network/input_data.py) * [Simple Neural Network](neural_network/simple_neural_network.py) ## Other diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index 9c41cdc5d472..3bc2ebe5fc74 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -76,11 +76,9 @@ def encrypt_and_write_to_file( key_size, n, e = read_key_file(key_filename) if key_size < block_size * 8: sys.exit( - "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " - "requires the block size to be equal to or greater than the key size. " - "Either decrease the block size or use different keys.".format( - block_size * 8, key_size - ) + f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} " + "bits. The RSA cipher requires the block size to be equal to or greater " + "than the key size. Either decrease the block size or use different keys." ) encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)] @@ -102,11 +100,10 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: if key_size < block_size * 8: sys.exit( - "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " - "requires the block size to be equal to or greater than the key size. " - "Did you specify the correct key file and encrypted file?".format( - block_size * 8, key_size - ) + f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} " + "bits. The RSA cipher requires the block size to be equal to or greater " + "than the key size. Did you specify the correct key file and encrypted " + "file?" ) encrypted_blocks = [] diff --git a/computer_vision/cnn_classification.py.DISABLED.txt b/computer_vision/cnn_classification.py similarity index 100% rename from computer_vision/cnn_classification.py.DISABLED.txt rename to computer_vision/cnn_classification.py diff --git a/dynamic_programming/k_means_clustering_tensorflow.py.DISABLED.txt b/dynamic_programming/k_means_clustering_tensorflow.py similarity index 100% rename from dynamic_programming/k_means_clustering_tensorflow.py.DISABLED.txt rename to dynamic_programming/k_means_clustering_tensorflow.py diff --git a/machine_learning/lstm/lstm_prediction.py.DISABLED.txt b/machine_learning/lstm/lstm_prediction.py similarity index 90% rename from machine_learning/lstm/lstm_prediction.py.DISABLED.txt rename to machine_learning/lstm/lstm_prediction.py index 16530e935ea7..ecbd451266ad 100644 --- a/machine_learning/lstm/lstm_prediction.py.DISABLED.txt +++ b/machine_learning/lstm/lstm_prediction.py @@ -17,11 +17,11 @@ make sure you set the price column on line number 21. Here we use a dataset which have the price on 3rd column. """ - df = pd.read_csv("sample_data.csv", header=None) - len_data = df.shape[:1][0] + sample_data = pd.read_csv("sample_data.csv", header=None) + len_data = sample_data.shape[:1][0] # If you're using some other dataset input the target column - actual_data = df.iloc[:, 1:2] - actual_data = actual_data.values.reshape(len_data, 1) + actual_data = sample_data.iloc[:, 1:2] + actual_data = actual_data.to_numpy().reshape(len_data, 1) actual_data = MinMaxScaler().fit_transform(actual_data) look_back = 10 forward_days = 5 diff --git a/neural_network/input_data.py.DEPRECATED.txt b/neural_network/input_data.py similarity index 98% rename from neural_network/input_data.py.DEPRECATED.txt rename to neural_network/input_data.py index a58e64907e45..2128449c03e9 100644 --- a/neural_network/input_data.py.DEPRECATED.txt +++ b/neural_network/input_data.py @@ -18,9 +18,9 @@ """ -import collections import gzip import os +import typing import urllib import numpy @@ -28,7 +28,12 @@ from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated -_Datasets = collections.namedtuple("_Datasets", ["train", "validation", "test"]) + +class _Datasets(typing.NamedTuple): + train: "_DataSet" + validation: "_DataSet" + test: "_DataSet" + # CVDF mirror of http://yann.lecun.com/exdb/mnist/ DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/" diff --git a/other/lfu_cache.py b/other/lfu_cache.py index b68ba3a4605c..788fdf19bb60 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -24,8 +24,9 @@ def __init__(self, key: T | None, val: U | None): self.prev: DoubleLinkedListNode[T, U] | None = None def __repr__(self) -> str: - return "Node: key: {}, val: {}, freq: {}, has next: {}, has prev: {}".format( - self.key, self.val, self.freq, self.next is not None, self.prev is not None + return ( + f"Node: key: {self.key}, val: {self.val}, freq: {self.freq}, " + f"has next: {self.next is not None}, has prev: {self.prev is not None}" ) diff --git a/requirements.txt b/requirements.txt index 8937f6bb0dae..bb3d671393b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,7 +17,7 @@ rich scikit-learn statsmodels sympy -tensorflow ; python_version < '3.12' +tensorflow tweepy # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions From 5f95d6f805088aa7e21849b1ba97cdcf059333a9 Mon Sep 17 00:00:00 2001 From: guangwu Date: Tue, 12 Mar 2024 16:40:32 +0800 Subject: [PATCH 032/260] fix: function name typo (#11319) * fix: function name typo Signed-off-by: guoguangwu * lfu_cache.py: Use f-strings * rsa_cipher.py: Use f-strings --------- Signed-off-by: guoguangwu Co-authored-by: Christian Clauss --- ciphers/rsa_cipher.py | 3 +-- machine_learning/astar.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index 3bc2ebe5fc74..ac9782a49fff 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -102,8 +102,7 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: sys.exit( f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} " "bits. The RSA cipher requires the block size to be equal to or greater " - "than the key size. Did you specify the correct key file and encrypted " - "file?" + "than the key size. Were the correct key file and encrypted file specified?" ) encrypted_blocks = [] diff --git a/machine_learning/astar.py b/machine_learning/astar.py index 7a60ed225a2d..ff5208266343 100644 --- a/machine_learning/astar.py +++ b/machine_learning/astar.py @@ -57,7 +57,7 @@ def __init__(self, world_size=(5, 5)): def show(self): print(self.w) - def get_neigbours(self, cell): + def get_neighbours(self, cell): """ Return the neighbours of cell """ @@ -110,7 +110,7 @@ def astar(world, start, goal): _closed.append(_open.pop(min_f)) if current == goal: break - for n in world.get_neigbours(current): + for n in world.get_neighbours(current): for c in _closed: if c == n: continue From bc8df6de3143b417c4d174200fd7edd0dbba4ce3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 07:52:41 +0100 Subject: [PATCH 033/260] [pre-commit.ci] pre-commit autoupdate (#11322) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.2.2 → v0.3.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.2.2...v0.3.2) - [github.com/pre-commit/mirrors-mypy: v1.8.0 → v1.9.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.8.0...v1.9.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +- backtracking/all_combinations.py | 7 +- backtracking/all_permutations.py | 9 ++- backtracking/all_subsequences.py | 1 + backtracking/coloring.py | 8 +- backtracking/hamiltonian_cycle.py | 10 +-- backtracking/minimax.py | 1 + backtracking/n_queens.py | 11 +-- backtracking/n_queens_math.py | 1 + backtracking/sudoku.py | 1 + backtracking/sum_of_subsets.py | 11 +-- boolean_algebra/nor_gate.py | 1 + cellular_automata/conways_game_of_life.py | 1 + cellular_automata/game_of_life.py | 3 +- cellular_automata/nagel_schrekenberg.py | 1 + ciphers/a1z26.py | 1 + ciphers/atbash.py | 3 +- ciphers/base32.py | 1 + ciphers/enigma_machine2.py | 1 + ciphers/fractionated_morse_cipher.py | 1 + ciphers/hill_cipher.py | 1 + ciphers/permutation_cipher.py | 1 + ciphers/rail_fence_cipher.py | 2 +- ciphers/rsa_factorization.py | 1 + ciphers/xor_cipher.py | 33 ++++---- compression/burrows_wheeler.py | 1 + compression/lempel_ziv.py | 4 +- compression/lempel_ziv_decompress.py | 4 +- compression/lz77.py | 1 - computer_vision/haralick_descriptors.py | 1 + computer_vision/horn_schunck.py | 16 ++-- conversions/decimal_to_hexadecimal.py | 2 +- conversions/prefix_conversions.py | 1 + conversions/temperature_conversions.py | 2 +- .../arrays/pairs_with_given_sum.py | 1 + data_structures/arrays/sparse_table.py | 15 ++-- data_structures/arrays/sudoku_solver.py | 1 + data_structures/binary_tree/avl_tree.py | 1 + .../binary_tree/binary_search_tree.py | 1 + .../binary_search_tree_recursive.py | 1 + .../binary_tree/binary_tree_node_sum.py | 1 - .../binary_tree/diameter_of_binary_tree.py | 1 + .../flatten_binarytree_to_linkedlist.py | 1 + .../binary_tree/floor_and_ceiling.py | 1 + data_structures/binary_tree/is_sorted.py | 1 + data_structures/binary_tree/is_sum_tree.py | 1 + .../binary_tree/merge_two_binary_trees.py | 1 + .../binary_tree/mirror_binary_tree.py | 1 + .../binary_tree/non_recursive_segment_tree.py | 1 + .../number_of_possible_binary_trees.py | 1 + data_structures/binary_tree/red_black_tree.py | 1 + .../binary_tree/segment_tree_other.py | 1 + data_structures/binary_tree/symmetric_tree.py | 1 + data_structures/binary_tree/wavelet_tree.py | 1 + data_structures/disjoint_set/disjoint_set.py | 4 +- data_structures/hashing/bloom_filter.py | 1 + data_structures/hashing/double_hash.py | 1 + data_structures/hashing/hash_map.py | 1 + .../hashing/number_theory/prime_numbers.py | 2 +- data_structures/linked_list/__init__.py | 1 + .../linked_list/merge_two_lists.py | 1 + data_structures/linked_list/skip_list.py | 1 + data_structures/queue/double_ended_queue.py | 1 + data_structures/queue/linked_queue.py | 3 +- .../queue/queue_on_pseudo_stack.py | 1 + .../stacks/dijkstras_two_stack_algorithm.py | 1 + .../stacks/stack_with_singly_linked_list.py | 3 +- .../convert_to_negative.py | 3 +- digital_image_processing/dithering/burkes.py | 1 + .../filters/bilateral_filter.py | 1 + .../filters/gaussian_filter.py | 1 + .../filters/median_filter.py | 1 + .../histogram_stretch.py | 1 + digital_image_processing/resize/resize.py | 3 +- digital_image_processing/sepia.py | 3 +- .../test_digital_image_processing.py | 1 + divide_and_conquer/convex_hull.py | 1 + divide_and_conquer/kth_order_statistic.py | 1 + divide_and_conquer/max_subarray.py | 1 + divide_and_conquer/peak.py | 1 + dynamic_programming/all_construct.py | 1 + dynamic_programming/bitmask.py | 1 + dynamic_programming/fast_fibonacci.py | 1 + .../iterating_through_submasks.py | 1 + .../longest_increasing_subsequence.py | 1 + .../matrix_chain_multiplication.py | 1 + dynamic_programming/max_subarray_sum.py | 1 + electronics/charging_capacitor.py | 1 + electronics/charging_inductor.py | 1 + electronics/resistor_color_code.py | 1 + financial/exponential_moving_average.py | 16 ++-- financial/simple_moving_average.py | 1 + fractals/koch_snowflake.py | 1 - fractals/mandelbrot.py | 1 - fractals/sierpinski_triangle.py | 1 + graphs/bi_directional_dijkstra.py | 1 - graphs/bidirectional_a_star.py | 1 + graphs/bidirectional_breadth_first_search.py | 1 + graphs/boruvka.py | 37 ++++----- graphs/breadth_first_search.py | 3 +- graphs/breadth_first_search_2.py | 1 + graphs/breadth_first_search_shortest_path.py | 1 + .../breadth_first_search_shortest_path_2.py | 9 ++- ...dth_first_search_zero_one_shortest_path.py | 1 + graphs/deep_clone_graph.py | 1 + graphs/depth_first_search.py | 1 + graphs/depth_first_search_2.py | 2 +- graphs/dijkstra.py | 1 + graphs/even_tree.py | 1 + graphs/frequent_pattern_graph_miner.py | 1 + graphs/graph_adjacency_list.py | 1 + graphs/graph_adjacency_matrix.py | 1 + graphs/graphs_floyd_warshall.py | 4 +- graphs/minimum_spanning_tree_prims2.py | 1 + graphs/page_rank.py | 1 + graphs/prim.py | 4 +- greedy_methods/gas_station.py | 1 + hashes/adler32.py | 12 +-- hashes/hamming_code.py | 76 +++++++++---------- hashes/luhn.py | 3 +- hashes/sdbm.py | 30 ++++---- hashes/sha1.py | 1 + knapsack/knapsack.py | 5 +- knapsack/tests/test_knapsack.py | 1 + linear_algebra/gaussian_elimination.py | 1 - linear_algebra/jacobi_iteration_method.py | 1 + linear_algebra/lu_decomposition.py | 1 + linear_algebra/src/conjugate_gradient.py | 1 + linear_algebra/src/lib.py | 13 ++-- linear_algebra/src/rayleigh_quotient.py | 1 + linear_algebra/src/test_linear_algebra.py | 1 + linear_algebra/src/transformations_2d.py | 1 + linear_programming/simplex.py | 1 + machine_learning/apriori_algorithm.py | 1 + machine_learning/astar.py | 1 + machine_learning/automatic_differentiation.py | 1 + machine_learning/data_transformations.py | 1 + machine_learning/decision_tree.py | 1 + machine_learning/frequent_pattern_growth.py | 1 + machine_learning/gradient_descent.py | 1 + machine_learning/k_means_clust.py | 1 + .../linear_discriminant_analysis.py | 65 ++++++++-------- machine_learning/linear_regression.py | 1 + machine_learning/logistic_regression.py | 1 + machine_learning/lstm/lstm_prediction.py | 9 ++- machine_learning/mfcc.py | 1 - machine_learning/self_organizing_map.py | 1 + .../sequential_minimum_optimization.py | 1 - machine_learning/similarity_search.py | 1 + maths/allocation_number.py | 1 + maths/area.py | 1 + maths/area_under_curve.py | 1 + maths/basic_maths.py | 1 + maths/binomial_distribution.py | 3 +- maths/chinese_remainder_theorem.py | 1 + maths/continued_fraction.py | 1 - maths/entropy.py | 1 + maths/gamma.py | 1 + maths/gaussian.py | 1 + maths/interquartile_range.py | 1 + maths/is_square_free.py | 1 + maths/karatsuba.py | 2 +- maths/lucas_lehmer_primality_test.py | 14 ++-- maths/maclaurin_series.py | 1 + maths/max_sum_sliding_window.py | 1 + maths/modular_exponential.py | 8 +- maths/monte_carlo.py | 1 + maths/numerical_analysis/adams_bashforth.py | 1 + maths/numerical_analysis/nevilles_method.py | 14 ++-- maths/numerical_analysis/newton_raphson.py | 1 + .../numerical_integration.py | 1 + maths/numerical_analysis/runge_kutta_gills.py | 1 + maths/numerical_analysis/secant_method.py | 1 + maths/prime_factors.py | 1 + maths/series/geometric_series.py | 1 - maths/series/p_series.py | 1 - maths/sieve_of_eratosthenes.py | 1 + maths/solovay_strassen_primality_test.py | 1 - maths/special_numbers/armstrong_numbers.py | 1 + maths/special_numbers/weird_number.py | 1 + maths/tanh.py | 1 + maths/triplet_sum.py | 1 + maths/two_pointer.py | 1 + maths/two_sum.py | 1 + maths/volume.py | 1 + matrix/matrix_multiplication_recursion.py | 1 + networking_flow/ford_fulkerson.py | 1 + .../activation_functions/binary_step.py | 1 - .../rectified_linear_unit.py | 1 + .../soboleva_modified_hyperbolic_tangent.py | 1 - .../back_propagation_neural_network.py | 1 + neural_network/convolution_neural_network.py | 27 +++---- neural_network/input_data.py | 1 - other/davis_putnam_logemann_loveland.py | 1 + other/fischer_yates_shuffle.py | 1 + other/gauss_easter.py | 1 + other/majority_vote_algorithm.py | 1 + other/quine.py | 1 + other/word_search.py | 1 - .../archimedes_principle_of_buoyant_force.py | 1 - physics/center_of_mass.py | 1 + physics/in_static_equilibrium.py | 1 + physics/n_body_simulation.py | 1 - physics/rms_speed_of_molecule.py | 1 - project_euler/problem_002/sol4.py | 1 + project_euler/problem_003/sol1.py | 1 + project_euler/problem_006/sol3.py | 1 + project_euler/problem_007/sol2.py | 1 + project_euler/problem_007/sol3.py | 1 + project_euler/problem_008/sol2.py | 1 + project_euler/problem_008/sol3.py | 1 + project_euler/problem_010/sol2.py | 1 + project_euler/problem_013/sol1.py | 1 + project_euler/problem_014/sol2.py | 1 + project_euler/problem_015/sol1.py | 1 + project_euler/problem_018/solution.py | 1 + project_euler/problem_020/sol2.py | 1 + project_euler/problem_020/sol3.py | 1 + project_euler/problem_021/sol1.py | 1 + project_euler/problem_022/sol1.py | 1 + project_euler/problem_022/sol2.py | 1 + project_euler/problem_024/sol1.py | 1 + project_euler/problem_025/sol2.py | 1 + project_euler/problem_030/sol1.py | 3 +- project_euler/problem_032/sol32.py | 1 + project_euler/problem_033/sol1.py | 1 + project_euler/problem_035/sol1.py | 1 + project_euler/problem_036/sol1.py | 1 + project_euler/problem_038/sol1.py | 1 + project_euler/problem_041/sol1.py | 1 + project_euler/problem_042/solution42.py | 1 + project_euler/problem_043/sol1.py | 1 - project_euler/problem_050/sol1.py | 1 + project_euler/problem_051/sol1.py | 1 + project_euler/problem_053/sol1.py | 1 + project_euler/problem_054/sol1.py | 1 + project_euler/problem_058/sol1.py | 1 + project_euler/problem_059/sol1.py | 1 + project_euler/problem_067/sol1.py | 1 + project_euler/problem_067/sol2.py | 1 + project_euler/problem_070/sol1.py | 1 + project_euler/problem_074/sol1.py | 1 - project_euler/problem_074/sol2.py | 1 + project_euler/problem_077/sol1.py | 1 + project_euler/problem_079/sol1.py | 1 + project_euler/problem_080/sol1.py | 1 + project_euler/problem_081/sol1.py | 1 + project_euler/problem_085/sol1.py | 1 + project_euler/problem_086/sol1.py | 1 - project_euler/problem_091/sol1.py | 1 - project_euler/problem_101/sol1.py | 1 + project_euler/problem_102/sol1.py | 1 + project_euler/problem_107/sol1.py | 1 + project_euler/problem_123/sol1.py | 1 + project_euler/problem_144/sol1.py | 1 - project_euler/problem_145/sol1.py | 1 + project_euler/problem_173/sol1.py | 1 - project_euler/problem_180/sol1.py | 1 + project_euler/problem_191/sol1.py | 1 - project_euler/problem_203/sol1.py | 1 + project_euler/problem_551/sol1.py | 1 - scheduling/highest_response_ratio_next.py | 1 + scheduling/job_sequence_with_deadline.py | 1 + .../non_preemptive_shortest_job_first.py | 1 - scheduling/round_robin.py | 1 + scheduling/shortest_job_first.py | 1 + searches/binary_search.py | 1 + searches/binary_tree_traversal.py | 1 + searches/fibonacci_search.py | 1 + searches/jump_search.py | 3 +- searches/quick_select.py | 1 + searches/simple_binary_search.py | 1 + searches/tabu_search.py | 1 + searches/ternary_search.py | 1 + sorts/bitonic_sort.py | 1 + sorts/bucket_sort.py | 1 + sorts/dutch_national_flag_sort.py | 1 - sorts/insertion_sort.py | 3 +- sorts/intro_sort.py | 1 + sorts/msd_radix_sort.py | 1 + sorts/odd_even_transposition_parallel.py | 1 + sorts/pigeon_sort.py | 15 ++-- sorts/quick_sort.py | 1 + sorts/radix_sort.py | 1 + sorts/recursive_insertion_sort.py | 1 + sorts/slowsort.py | 1 + sorts/tree_sort.py | 1 + strings/boyer_moore_search.py | 1 + strings/check_anagrams.py | 1 + strings/top_k_frequent_words.py | 1 - web_programming/co2_emission.py | 1 + web_programming/emails_from_url.py | 1 + web_programming/fetch_github_info.py | 1 + web_programming/fetch_jobs.py | 1 + web_programming/get_amazon_product_data.py | 1 - web_programming/recaptcha_verification.py | 1 + web_programming/search_books_by_isbn.py | 1 + 297 files changed, 498 insertions(+), 295 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index be8364a7fc0b..a17c4c323c30 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.2 + rev: v0.3.2 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.9.0 hooks: - id: mypy args: diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index 407304948c39..390decf3a05b 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -1,9 +1,10 @@ """ - In this problem, we want to determine all possible combinations of k - numbers out of 1 ... n. We use backtracking to solve this problem. +In this problem, we want to determine all possible combinations of k +numbers out of 1 ... n. We use backtracking to solve this problem. - Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))), +Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))), """ + from __future__ import annotations from itertools import combinations diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index ff8a53e0dd0e..c483cd62c99b 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -1,10 +1,11 @@ """ - In this problem, we want to determine all possible permutations - of the given sequence. We use backtracking to solve this problem. +In this problem, we want to determine all possible permutations +of the given sequence. We use backtracking to solve this problem. - Time complexity: O(n! * n), - where n denotes the length of the given sequence. +Time complexity: O(n! * n), +where n denotes the length of the given sequence. """ + from __future__ import annotations diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py index c465fc542407..7844a829d046 100644 --- a/backtracking/all_subsequences.py +++ b/backtracking/all_subsequences.py @@ -5,6 +5,7 @@ Time complexity: O(2^n), where n denotes the length of the given sequence. """ + from __future__ import annotations from typing import Any diff --git a/backtracking/coloring.py b/backtracking/coloring.py index 9d539de8a3c4..f10cdbcf9d26 100644 --- a/backtracking/coloring.py +++ b/backtracking/coloring.py @@ -1,9 +1,9 @@ """ - Graph Coloring also called "m coloring problem" - consists of coloring a given graph with at most m colors - such that no adjacent vertices are assigned the same color +Graph Coloring also called "m coloring problem" +consists of coloring a given graph with at most m colors +such that no adjacent vertices are assigned the same color - Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring +Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring """ diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index e9916f83f861..f6e4212e47f4 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -1,10 +1,10 @@ """ - A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle - through a graph that visits each node exactly once. - Determining whether such paths and cycles exist in graphs - is the 'Hamiltonian path problem', which is NP-complete. +A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle +through a graph that visits each node exactly once. +Determining whether such paths and cycles exist in graphs +is the 'Hamiltonian path problem', which is NP-complete. - Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path +Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path """ diff --git a/backtracking/minimax.py b/backtracking/minimax.py index 6dece2990a1c..4eef90b75483 100644 --- a/backtracking/minimax.py +++ b/backtracking/minimax.py @@ -7,6 +7,7 @@ leaves of game tree is stored in scores[] height is maximum height of Game tree """ + from __future__ import annotations import math diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 2cd8c703fc72..81668b17a0ac 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -1,12 +1,13 @@ """ - The nqueens problem is of placing N queens on a N * N - chess board such that no queen can attack any other queens placed - on that chess board. - This means that one queen cannot have any other queen on its horizontal, vertical and - diagonal lines. +The nqueens problem is of placing N queens on a N * N +chess board such that no queen can attack any other queens placed +on that chess board. +This means that one queen cannot have any other queen on its horizontal, vertical and +diagonal lines. """ + from __future__ import annotations solution = [] diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index f3b08ab0a05f..287d1f090373 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -75,6 +75,7 @@ for another one or vice versa. """ + from __future__ import annotations diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 6e4e3e8780f2..8f5459c76d45 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -9,6 +9,7 @@ have solved the puzzle. else, we backtrack and place another number in that cell and repeat this process. """ + from __future__ import annotations Matrix = list[list[int]] diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index c5e23321cb0c..f34d3ca34339 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -1,11 +1,12 @@ """ - The sum-of-subsetsproblem states that a set of non-negative integers, and a - value M, determine all possible subsets of the given set whose summation sum - equal to given M. +The sum-of-subsetsproblem states that a set of non-negative integers, and a +value M, determine all possible subsets of the given set whose summation sum +equal to given M. - Summation of the chosen numbers must be equal to given number M and one number - can be used only once. +Summation of the chosen numbers must be equal to given number M and one number +can be used only once. """ + from __future__ import annotations diff --git a/boolean_algebra/nor_gate.py b/boolean_algebra/nor_gate.py index 0c8ab1c0af61..d4d6f0da23ea 100644 --- a/boolean_algebra/nor_gate.py +++ b/boolean_algebra/nor_gate.py @@ -12,6 +12,7 @@ Code provided by Akshaj Vishwanathan https://www.geeksforgeeks.org/logic-gates-in-python """ + from collections.abc import Callable diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index 84f4d5be40da..364a34c3aba6 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -2,6 +2,7 @@ Conway's Game of Life implemented in Python. https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life """ + from __future__ import annotations from PIL import Image diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index d691a2b73af0..67e647d6475b 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -26,7 +26,8 @@ 4. Any dead cell with exactly three live neighbours be- comes a live cell, as if by reproduction. - """ +""" + import random import sys diff --git a/cellular_automata/nagel_schrekenberg.py b/cellular_automata/nagel_schrekenberg.py index 3fd6afca0153..bcdca902afee 100644 --- a/cellular_automata/nagel_schrekenberg.py +++ b/cellular_automata/nagel_schrekenberg.py @@ -24,6 +24,7 @@ >>> simulate(construct_highway(5, 2, -2), 3, 0, 2) [[0, -1, 0, -1, 0], [0, -1, 0, -1, -1], [0, -1, -1, 1, -1], [-1, 1, -1, 0, -1]] """ + from random import randint, random diff --git a/ciphers/a1z26.py b/ciphers/a1z26.py index 0f0eb7c5c083..a1377ea6d397 100644 --- a/ciphers/a1z26.py +++ b/ciphers/a1z26.py @@ -5,6 +5,7 @@ https://www.dcode.fr/letter-number-cipher http://bestcodes.weebly.com/a1z26.html """ + from __future__ import annotations diff --git a/ciphers/atbash.py b/ciphers/atbash.py index 0a86a800c51a..4e8f663ed02d 100644 --- a/ciphers/atbash.py +++ b/ciphers/atbash.py @@ -1,4 +1,5 @@ -""" https://en.wikipedia.org/wiki/Atbash """ +"""https://en.wikipedia.org/wiki/Atbash""" + import string diff --git a/ciphers/base32.py b/ciphers/base32.py index 1924d1e185d7..911afa2452c0 100644 --- a/ciphers/base32.py +++ b/ciphers/base32.py @@ -3,6 +3,7 @@ https://en.wikipedia.org/wiki/Base32 """ + B32_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index ec0d44e4a6c6..163aa7172c11 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -14,6 +14,7 @@ Created by TrapinchO """ + from __future__ import annotations RotorPositionT = tuple[int, int, int] diff --git a/ciphers/fractionated_morse_cipher.py b/ciphers/fractionated_morse_cipher.py index c1d5dc6d50aa..6c4c415abac1 100644 --- a/ciphers/fractionated_morse_cipher.py +++ b/ciphers/fractionated_morse_cipher.py @@ -8,6 +8,7 @@ http://practicalcryptography.com/ciphers/fractionated-morse-cipher/ """ + import string MORSE_CODE_DICT = { diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index 1201fda901e5..ea337a72dc04 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -35,6 +35,7 @@ https://www.youtube.com/watch?v=4RhLNDqcjpA """ + import string import numpy diff --git a/ciphers/permutation_cipher.py b/ciphers/permutation_cipher.py index c3f3fd1f7f94..9e1c64a7b4ea 100644 --- a/ciphers/permutation_cipher.py +++ b/ciphers/permutation_cipher.py @@ -7,6 +7,7 @@ For more info: https://www.nku.edu/~christensen/1402%20permutation%20ciphers.pdf """ + import random diff --git a/ciphers/rail_fence_cipher.py b/ciphers/rail_fence_cipher.py index 47ee7db89831..5b2311a115e4 100644 --- a/ciphers/rail_fence_cipher.py +++ b/ciphers/rail_fence_cipher.py @@ -1,4 +1,4 @@ -""" https://en.wikipedia.org/wiki/Rail_fence_cipher """ +"""https://en.wikipedia.org/wiki/Rail_fence_cipher""" def encrypt(input_string: str, key: int) -> str: diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index 9ee52777ed83..0a358a4fc2d4 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -7,6 +7,7 @@ More readable source: https://www.di-mgt.com.au/rsa_factorize_n.html large number can take minutes to factor, therefore are not included in doctest. """ + from __future__ import annotations import math diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index e30955d41ff1..24d88a0fd588 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -1,21 +1,22 @@ """ - author: Christian Bender - date: 21.12.2017 - class: XORCipher - - This class implements the XOR-cipher algorithm and provides - some useful methods for encrypting and decrypting strings and - files. - - Overview about methods - - - encrypt : list of char - - decrypt : list of char - - encrypt_string : str - - decrypt_string : str - - encrypt_file : boolean - - decrypt_file : boolean +author: Christian Bender +date: 21.12.2017 +class: XORCipher + +This class implements the XOR-cipher algorithm and provides +some useful methods for encrypting and decrypting strings and +files. + +Overview about methods + +- encrypt : list of char +- decrypt : list of char +- encrypt_string : str +- decrypt_string : str +- encrypt_file : boolean +- decrypt_file : boolean """ + from __future__ import annotations diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 52bb045d9398..ce493a70c8f9 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -10,6 +10,7 @@ original character. The BWT is thus a "free" method of improving the efficiency of text compression algorithms, costing only some extra computation. """ + from __future__ import annotations from typing import TypedDict diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index ea6f33944a91..ac3f0c6cfc06 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -1,6 +1,6 @@ """ - One of the several implementations of Lempel–Ziv–Welch compression algorithm - https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch +One of the several implementations of Lempel–Ziv–Welch compression algorithm +https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ import math diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py index ddedc3d6d32a..0e49c83fb790 100644 --- a/compression/lempel_ziv_decompress.py +++ b/compression/lempel_ziv_decompress.py @@ -1,6 +1,6 @@ """ - One of the several implementations of Lempel–Ziv–Welch decompression algorithm - https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch +One of the several implementations of Lempel–Ziv–Welch decompression algorithm +https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ import math diff --git a/compression/lz77.py b/compression/lz77.py index 1b201c59f186..09b8b021e9d5 100644 --- a/compression/lz77.py +++ b/compression/lz77.py @@ -28,7 +28,6 @@ en.wikipedia.org/wiki/LZ77_and_LZ78 """ - from dataclasses import dataclass __version__ = "0.1" diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 007421e34263..712bd49668f8 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -2,6 +2,7 @@ https://en.wikipedia.org/wiki/Image_texture https://en.wikipedia.org/wiki/Co-occurrence_matrix#Application_to_image_analysis """ + import imageio.v2 as imageio import numpy as np diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py index b63e0268294c..f33b5b1c794b 100644 --- a/computer_vision/horn_schunck.py +++ b/computer_vision/horn_schunck.py @@ -1,12 +1,12 @@ """ - The Horn-Schunck method estimates the optical flow for every single pixel of - a sequence of images. - It works by assuming brightness constancy between two consecutive frames - and smoothness in the optical flow. - - Useful resources: - Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method - Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf +The Horn-Schunck method estimates the optical flow for every single pixel of +a sequence of images. +It works by assuming brightness constancy between two consecutive frames +and smoothness in the optical flow. + +Useful resources: +Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method +Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf """ from typing import SupportsIndex diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py index b1fb4f082242..ee79592de5ca 100644 --- a/conversions/decimal_to_hexadecimal.py +++ b/conversions/decimal_to_hexadecimal.py @@ -1,4 +1,4 @@ -""" Convert Base 10 (Decimal) Values to Hexadecimal Representations """ +"""Convert Base 10 (Decimal) Values to Hexadecimal Representations""" # set decimal value for each hexadecimal digit values = { diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py index 06b759e355a7..714677f3b242 100644 --- a/conversions/prefix_conversions.py +++ b/conversions/prefix_conversions.py @@ -1,6 +1,7 @@ """ Convert International System of Units (SI) and Binary prefixes """ + from __future__ import annotations from enum import Enum diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py index f7af6c8f1e2b..dde1d2f0f166 100644 --- a/conversions/temperature_conversions.py +++ b/conversions/temperature_conversions.py @@ -1,4 +1,4 @@ -""" Convert between different units of temperature """ +"""Convert between different units of temperature""" def celsius_to_fahrenheit(celsius: float, ndigits: int = 2) -> float: diff --git a/data_structures/arrays/pairs_with_given_sum.py b/data_structures/arrays/pairs_with_given_sum.py index c4a5ceeae456..b27bd78e1e0f 100644 --- a/data_structures/arrays/pairs_with_given_sum.py +++ b/data_structures/arrays/pairs_with_given_sum.py @@ -6,6 +6,7 @@ https://practice.geeksforgeeks.org/problems/count-pairs-with-given-sum5022/0 """ + from itertools import combinations diff --git a/data_structures/arrays/sparse_table.py b/data_structures/arrays/sparse_table.py index a15d5649e712..4606fe908607 100644 --- a/data_structures/arrays/sparse_table.py +++ b/data_structures/arrays/sparse_table.py @@ -1,15 +1,16 @@ """ - Sparse table is a data structure that allows answering range queries on - a static number list, i.e. the elements do not change throughout all the queries. +Sparse table is a data structure that allows answering range queries on +a static number list, i.e. the elements do not change throughout all the queries. - The implementation below will solve the problem of Range Minimum Query: - Finding the minimum value of a subset [L..R] of a static number list. +The implementation below will solve the problem of Range Minimum Query: +Finding the minimum value of a subset [L..R] of a static number list. - Overall time complexity: O(nlogn) - Overall space complexity: O(nlogn) +Overall time complexity: O(nlogn) +Overall space complexity: O(nlogn) - Wikipedia link: https://en.wikipedia.org/wiki/Range_minimum_query +Wikipedia link: https://en.wikipedia.org/wiki/Range_minimum_query """ + from math import log2 diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 20ac32e3b071..c9dffcde2379 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -3,6 +3,7 @@ only minimal changes to work with modern versions of Python. If you have improvements, please make them in a separate file. """ + import random import time diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 4c1fb17afe86..041ed7e36d16 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -5,6 +5,7 @@ For testing run: python avl_tree.py """ + from __future__ import annotations import math diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 9071f03dcc8c..08a60a12065d 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -88,6 +88,7 @@ >>> not t True """ + from __future__ import annotations from collections.abc import Iterable, Iterator diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 13b9b392175c..6af1b053f42c 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -7,6 +7,7 @@ To run an example: python binary_search_tree_recursive.py """ + from __future__ import annotations import unittest diff --git a/data_structures/binary_tree/binary_tree_node_sum.py b/data_structures/binary_tree/binary_tree_node_sum.py index 5a13e74e3c9f..066617b616c4 100644 --- a/data_structures/binary_tree/binary_tree_node_sum.py +++ b/data_structures/binary_tree/binary_tree_node_sum.py @@ -8,7 +8,6 @@ frames that could be in memory is `n` """ - from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/diameter_of_binary_tree.py b/data_structures/binary_tree/diameter_of_binary_tree.py index bbe70b028d24..75e5e7373323 100644 --- a/data_structures/binary_tree/diameter_of_binary_tree.py +++ b/data_structures/binary_tree/diameter_of_binary_tree.py @@ -2,6 +2,7 @@ The diameter/width of a tree is defined as the number of nodes on the longest path between two end nodes. """ + from __future__ import annotations from dataclasses import dataclass diff --git a/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py index 8820a509ecba..9b2c7b9af24b 100644 --- a/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py +++ b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py @@ -10,6 +10,7 @@ Author: Arunkumar A Date: 04/09/2023 """ + from __future__ import annotations diff --git a/data_structures/binary_tree/floor_and_ceiling.py b/data_structures/binary_tree/floor_and_ceiling.py index f8a1adbd967b..b464aefad3a2 100644 --- a/data_structures/binary_tree/floor_and_ceiling.py +++ b/data_structures/binary_tree/floor_and_ceiling.py @@ -9,6 +9,7 @@ Author : Arunkumar Date : 14th October 2023 """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/is_sorted.py b/data_structures/binary_tree/is_sorted.py index 5876c5a9c96a..509a426611e5 100644 --- a/data_structures/binary_tree/is_sorted.py +++ b/data_structures/binary_tree/is_sorted.py @@ -13,6 +13,7 @@ Runtime: O(n) Space: O(1) """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/is_sum_tree.py b/data_structures/binary_tree/is_sum_tree.py index 3f9cf1d560a6..846bea0fe0f2 100644 --- a/data_structures/binary_tree/is_sum_tree.py +++ b/data_structures/binary_tree/is_sum_tree.py @@ -3,6 +3,7 @@ of the values of its left and right subtrees? https://www.geeksforgeeks.org/check-if-a-given-binary-tree-is-sumtree """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/merge_two_binary_trees.py b/data_structures/binary_tree/merge_two_binary_trees.py index 3380f8c5fb31..6bbb30428704 100644 --- a/data_structures/binary_tree/merge_two_binary_trees.py +++ b/data_structures/binary_tree/merge_two_binary_trees.py @@ -5,6 +5,7 @@ both nodes to the new value of the merged node. Otherwise, the NOT null node will be used as the node of new tree. """ + from __future__ import annotations diff --git a/data_structures/binary_tree/mirror_binary_tree.py b/data_structures/binary_tree/mirror_binary_tree.py index 39305c2a9da2..62e2f08dd4e0 100644 --- a/data_structures/binary_tree/mirror_binary_tree.py +++ b/data_structures/binary_tree/mirror_binary_tree.py @@ -3,6 +3,7 @@ Leetcode problem reference: https://leetcode.com/problems/mirror-binary-tree/ """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 04164e5cba4e..42c78a3a1be0 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -35,6 +35,7 @@ >>> st.query(0, 2) [1, 2, 3] """ + from __future__ import annotations from collections.abc import Callable diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py index 684c518b1eb6..1c3dff37e7d9 100644 --- a/data_structures/binary_tree/number_of_possible_binary_trees.py +++ b/data_structures/binary_tree/number_of_possible_binary_trees.py @@ -6,6 +6,7 @@ Further details at Wikipedia: https://en.wikipedia.org/wiki/Catalan_number """ + """ Our Contribution: Basically we Create the 2 function: diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index fc299301da8a..3b5845cd957b 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -2,6 +2,7 @@ psf/black : true ruff : passed """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/segment_tree_other.py b/data_structures/binary_tree/segment_tree_other.py index cc77c4951f1a..95f21ddd4777 100644 --- a/data_structures/binary_tree/segment_tree_other.py +++ b/data_structures/binary_tree/segment_tree_other.py @@ -3,6 +3,7 @@ allowing queries to be done later in log(N) time function takes 2 values and returns a same type value """ + from collections.abc import Sequence from queue import Queue diff --git a/data_structures/binary_tree/symmetric_tree.py b/data_structures/binary_tree/symmetric_tree.py index 331a25849c1c..98a766cab988 100644 --- a/data_structures/binary_tree/symmetric_tree.py +++ b/data_structures/binary_tree/symmetric_tree.py @@ -4,6 +4,7 @@ Leetcode reference: https://leetcode.com/problems/symmetric-tree/ """ + from __future__ import annotations from dataclasses import dataclass diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py index 041e140f5b15..2da571e8d326 100644 --- a/data_structures/binary_tree/wavelet_tree.py +++ b/data_structures/binary_tree/wavelet_tree.py @@ -7,6 +7,7 @@ 2. https://www.youtube.com/watch?v=4aSv9PcecDw&t=811s 3. https://www.youtube.com/watch?v=CybAgVF-MMc&t=1178s """ + from __future__ import annotations test_array = [2, 1, 4, 5, 6, 0, 8, 9, 1, 2, 0, 6, 4, 2, 0, 6, 5, 3, 2, 7] diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py index 12dafb2d935e..edc4736b6132 100644 --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -1,6 +1,6 @@ """ - Disjoint set. - Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure +Disjoint set. +Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure """ diff --git a/data_structures/hashing/bloom_filter.py b/data_structures/hashing/bloom_filter.py index 7fd0985bdc33..eb2cb4b79c46 100644 --- a/data_structures/hashing/bloom_filter.py +++ b/data_structures/hashing/bloom_filter.py @@ -58,6 +58,7 @@ >>> bloom.bitstring '01100101' """ + from hashlib import md5, sha256 HASH_FUNCTIONS = (sha256, md5) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index 76c6c86814ec..324282cbfd8d 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -11,6 +11,7 @@ Reference: https://en.wikipedia.org/wiki/Double_hashing """ + from .hash_table import HashTable from .number_theory.prime_numbers import is_prime, next_prime diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py index 6a6f8e54d5e9..9213d6930f67 100644 --- a/data_structures/hashing/hash_map.py +++ b/data_structures/hashing/hash_map.py @@ -7,6 +7,7 @@ Modern Dictionaries by Raymond Hettinger https://www.youtube.com/watch?v=p33CVV29OG8 """ + from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index 0c25896f9880..2549a1477b2b 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ - module to operations with prime numbers +module to operations with prime numbers """ import math diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 225113f72cee..00ef337a1211 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -5,6 +5,7 @@ head node gives us access of the complete list - Last node: points to null """ + from __future__ import annotations from typing import Any diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index ca0d3bb48540..e47dbdadcf39 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -1,6 +1,7 @@ """ Algorithm that merges two sorted linked lists into one sorted linked list. """ + from __future__ import annotations from collections.abc import Iterable, Iterator diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 4413c53e520e..88d3e0daddf0 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -2,6 +2,7 @@ Based on "Skip Lists: A Probabilistic Alternative to Balanced Trees" by William Pugh https://epaperpress.com/sortsearch/download/skiplist.pdf """ + from __future__ import annotations from random import random diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 17a23038d288..607d0bda3df4 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -1,6 +1,7 @@ """ Implementation of double ended queue. """ + from __future__ import annotations from collections.abc import Iterable diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index 3af97d28e4f7..80f6d309af9a 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -1,4 +1,5 @@ -""" A Queue using a linked list like structure """ +"""A Queue using a linked list like structure""" + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py index d9845100008e..2da67ecc263c 100644 --- a/data_structures/queue/queue_on_pseudo_stack.py +++ b/data_structures/queue/queue_on_pseudo_stack.py @@ -1,4 +1,5 @@ """Queue represented by a pseudo stack (represented by a list with pop and append)""" + from typing import Any diff --git a/data_structures/stacks/dijkstras_two_stack_algorithm.py b/data_structures/stacks/dijkstras_two_stack_algorithm.py index 976c9a53c931..94d19156f1c3 100644 --- a/data_structures/stacks/dijkstras_two_stack_algorithm.py +++ b/data_structures/stacks/dijkstras_two_stack_algorithm.py @@ -29,6 +29,7 @@ NOTE: It only works with whole numbers. """ + __author__ = "Alexander Joslin" import operator as op diff --git a/data_structures/stacks/stack_with_singly_linked_list.py b/data_structures/stacks/stack_with_singly_linked_list.py index f5ce83b863ce..8e77c2b967ef 100644 --- a/data_structures/stacks/stack_with_singly_linked_list.py +++ b/data_structures/stacks/stack_with_singly_linked_list.py @@ -1,4 +1,5 @@ -""" A Stack using a linked list like structure """ +"""A Stack using a linked list like structure""" + from __future__ import annotations from collections.abc import Iterator diff --git a/digital_image_processing/convert_to_negative.py b/digital_image_processing/convert_to_negative.py index 7df44138973c..9bf2d8f2c075 100644 --- a/digital_image_processing/convert_to_negative.py +++ b/digital_image_processing/convert_to_negative.py @@ -1,6 +1,7 @@ """ - Implemented an algorithm using opencv to convert a colored image into its negative +Implemented an algorithm using opencv to convert a colored image into its negative """ + from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 35aedc16d404..4b59356d8f08 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -1,6 +1,7 @@ """ Implementation Burke's algorithm (dithering) """ + import numpy as np from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py index 199ac4d9939a..6ef4434d959c 100644 --- a/digital_image_processing/filters/bilateral_filter.py +++ b/digital_image_processing/filters/bilateral_filter.py @@ -9,6 +9,7 @@ Output: img:A 2d zero padded image with values in between 0 and 1 """ + import math import sys diff --git a/digital_image_processing/filters/gaussian_filter.py b/digital_image_processing/filters/gaussian_filter.py index 634d836e5edc..0c34e59fafe5 100644 --- a/digital_image_processing/filters/gaussian_filter.py +++ b/digital_image_processing/filters/gaussian_filter.py @@ -1,6 +1,7 @@ """ Implementation of gaussian filter algorithm """ + from itertools import product from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey diff --git a/digital_image_processing/filters/median_filter.py b/digital_image_processing/filters/median_filter.py index 174018569d62..fc8b582ef67a 100644 --- a/digital_image_processing/filters/median_filter.py +++ b/digital_image_processing/filters/median_filter.py @@ -1,6 +1,7 @@ """ Implementation of median filter algorithm """ + from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import divide, int8, multiply, ravel, sort, zeros_like diff --git a/digital_image_processing/histogram_equalization/histogram_stretch.py b/digital_image_processing/histogram_equalization/histogram_stretch.py index 5ea7773e32d9..1270c964dee6 100644 --- a/digital_image_processing/histogram_equalization/histogram_stretch.py +++ b/digital_image_processing/histogram_equalization/histogram_stretch.py @@ -3,6 +3,7 @@ @author: Binish125 """ + import copy import os diff --git a/digital_image_processing/resize/resize.py b/digital_image_processing/resize/resize.py index 4836521f9f58..7bde118da69b 100644 --- a/digital_image_processing/resize/resize.py +++ b/digital_image_processing/resize/resize.py @@ -1,4 +1,5 @@ -""" Multiple image resizing techniques """ +"""Multiple image resizing techniques""" + import numpy as np from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/sepia.py b/digital_image_processing/sepia.py index e9dd2c06066d..1924a80451e5 100644 --- a/digital_image_processing/sepia.py +++ b/digital_image_processing/sepia.py @@ -1,6 +1,7 @@ """ - Implemented an algorithm using opencv to tone an image with sepia technique +Implemented an algorithm using opencv to tone an image with sepia technique """ + from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 7993110d6bdd..d1200f4d65ca 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -1,6 +1,7 @@ """ PyTest's for Digital Image Processing """ + import numpy as np from cv2 import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uint8 diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 1d1bf301def5..a5d8b713bdbc 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -12,6 +12,7 @@ which have not been implemented here, yet. """ + from __future__ import annotations from collections.abc import Iterable diff --git a/divide_and_conquer/kth_order_statistic.py b/divide_and_conquer/kth_order_statistic.py index 666ad1a39b8a..23fd8be5ea47 100644 --- a/divide_and_conquer/kth_order_statistic.py +++ b/divide_and_conquer/kth_order_statistic.py @@ -8,6 +8,7 @@ For more information of this algorithm: https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf """ + from __future__ import annotations from random import choice diff --git a/divide_and_conquer/max_subarray.py b/divide_and_conquer/max_subarray.py index 851ef621a24c..0fad7ab5d920 100644 --- a/divide_and_conquer/max_subarray.py +++ b/divide_and_conquer/max_subarray.py @@ -6,6 +6,7 @@ This divide-and-conquer algorithm finds the maximum subarray in O(n log n) time. """ + from __future__ import annotations import time diff --git a/divide_and_conquer/peak.py b/divide_and_conquer/peak.py index e60f28bfbe29..71ab5ac86574 100644 --- a/divide_and_conquer/peak.py +++ b/divide_and_conquer/peak.py @@ -7,6 +7,7 @@ (From Kleinberg and Tardos. Algorithm Design. Addison Wesley 2006: Chapter 5 Solved Exercise 1) """ + from __future__ import annotations diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 6e53a702cbb1..5d585fc7fcec 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -2,6 +2,7 @@ Program to list all the ways a target string can be constructed from the given list of substrings """ + from __future__ import annotations diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py index 56bb8e96ba02..a6e6a0cda7bf 100644 --- a/dynamic_programming/bitmask.py +++ b/dynamic_programming/bitmask.py @@ -8,6 +8,7 @@ a person can do only one task and a task is performed only by one person. Find the total no of ways in which the tasks can be distributed. """ + from collections import defaultdict diff --git a/dynamic_programming/fast_fibonacci.py b/dynamic_programming/fast_fibonacci.py index f48186a34c25..9f956ca2f979 100644 --- a/dynamic_programming/fast_fibonacci.py +++ b/dynamic_programming/fast_fibonacci.py @@ -4,6 +4,7 @@ This program calculates the nth Fibonacci number in O(log(n)). It's possible to calculate F(1_000_000) in less than a second. """ + from __future__ import annotations import sys diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index 4d0a250e8dfe..372dd2c74a71 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -5,6 +5,7 @@ its submasks. The mask s is submask of m if only bits that were included in bitmask are set """ + from __future__ import annotations diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index d827893763c5..2a78e2e7ad1d 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -10,6 +10,7 @@ Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return [10, 22, 33, 41, 60, 80] as output """ + from __future__ import annotations diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py index 084254a61f6c..da6e525ce816 100644 --- a/dynamic_programming/matrix_chain_multiplication.py +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -38,6 +38,7 @@ arr = [40, 20, 30, 10, 30] output: 26000 """ + from collections.abc import Iterator from contextlib import contextmanager from functools import cache diff --git a/dynamic_programming/max_subarray_sum.py b/dynamic_programming/max_subarray_sum.py index c76943472b97..8c1dc0889a85 100644 --- a/dynamic_programming/max_subarray_sum.py +++ b/dynamic_programming/max_subarray_sum.py @@ -9,6 +9,7 @@ Reference: https://en.wikipedia.org/wiki/Maximum_subarray_problem """ + from collections.abc import Sequence diff --git a/electronics/charging_capacitor.py b/electronics/charging_capacitor.py index 4029b0ecf267..0021e4e345e0 100644 --- a/electronics/charging_capacitor.py +++ b/electronics/charging_capacitor.py @@ -14,6 +14,7 @@ time 't' from the initiation of charging a capacitor with the help of the exponential function containing RC. Both at charging and discharging of a capacitor. """ + from math import exp # value of exp = 2.718281828459… diff --git a/electronics/charging_inductor.py b/electronics/charging_inductor.py index e5c0126c248a..8a3bbc0bbfcd 100644 --- a/electronics/charging_inductor.py +++ b/electronics/charging_inductor.py @@ -25,6 +25,7 @@ in its 'magnetic field'.with the help 'RL-time-constant' we can find current at any time in inductor while it is charging. """ + from math import exp # value of exp = 2.718281828459… diff --git a/electronics/resistor_color_code.py b/electronics/resistor_color_code.py index b0534b813def..189d19946d9d 100644 --- a/electronics/resistor_color_code.py +++ b/electronics/resistor_color_code.py @@ -58,6 +58,7 @@ https://learn.parallax.com/support/reference/resistor-color-codes https://byjus.com/physics/resistor-colour-codes/ """ + valid_colors: list = [ "Black", "Brown", diff --git a/financial/exponential_moving_average.py b/financial/exponential_moving_average.py index 0b6cea3b4c91..b56eb2712415 100644 --- a/financial/exponential_moving_average.py +++ b/financial/exponential_moving_average.py @@ -1,12 +1,12 @@ """ - Calculate the exponential moving average (EMA) on the series of stock prices. - Wikipedia Reference: https://en.wikipedia.org/wiki/Exponential_smoothing - https://www.investopedia.com/terms/e/ema.asp#toc-what-is-an-exponential - -moving-average-ema - - Exponential moving average is used in finance to analyze changes stock prices. - EMA is used in conjunction with Simple moving average (SMA), EMA reacts to the - changes in the value quicker than SMA, which is one of the advantages of using EMA. +Calculate the exponential moving average (EMA) on the series of stock prices. +Wikipedia Reference: https://en.wikipedia.org/wiki/Exponential_smoothing +https://www.investopedia.com/terms/e/ema.asp#toc-what-is-an-exponential +-moving-average-ema + +Exponential moving average is used in finance to analyze changes stock prices. +EMA is used in conjunction with Simple moving average (SMA), EMA reacts to the +changes in the value quicker than SMA, which is one of the advantages of using EMA. """ from collections.abc import Iterator diff --git a/financial/simple_moving_average.py b/financial/simple_moving_average.py index d5d68ffd3dab..f5ae444fd027 100644 --- a/financial/simple_moving_average.py +++ b/financial/simple_moving_average.py @@ -6,6 +6,7 @@ Reference: https://en.wikipedia.org/wiki/Moving_average """ + from collections.abc import Sequence diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index b0aaa86b11d8..30cd4b39c7c1 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -20,7 +20,6 @@ - numpy """ - from __future__ import annotations import matplotlib.pyplot as plt # type: ignore diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index 84dbda997562..5eb9af0aafe1 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -15,7 +15,6 @@ (see also https://en.wikipedia.org/wiki/Plotting_algorithms_for_the_Mandelbrot_set ) """ - import colorsys from PIL import Image # type: ignore diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index 45f7ab84cfff..ceb2001b681d 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -22,6 +22,7 @@ This code was written by editing the code from https://www.riannetrujillo.com/blog/python-fractal/ """ + import sys import turtle diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index 529a235db625..7b9eac6c8587 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -10,7 +10,6 @@ # Author: Swayam Singh (https://github.com/practice404) - from queue import PriorityQueue from typing import Any diff --git a/graphs/bidirectional_a_star.py b/graphs/bidirectional_a_star.py index 373d67142aa9..00f623de3493 100644 --- a/graphs/bidirectional_a_star.py +++ b/graphs/bidirectional_a_star.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Bidirectional_search """ + from __future__ import annotations import time diff --git a/graphs/bidirectional_breadth_first_search.py b/graphs/bidirectional_breadth_first_search.py index 511b080a9add..71c5a9aff08f 100644 --- a/graphs/bidirectional_breadth_first_search.py +++ b/graphs/bidirectional_breadth_first_search.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Bidirectional_search """ + from __future__ import annotations import time diff --git a/graphs/boruvka.py b/graphs/boruvka.py index 2715a3085948..3dc059ff6a62 100644 --- a/graphs/boruvka.py +++ b/graphs/boruvka.py @@ -1,29 +1,30 @@ """Borůvka's algorithm. - Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm. - Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a - connected graph, or a minimum spanning forest if a graph that is not connected. +Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm. +Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a +connected graph, or a minimum spanning forest if a graph that is not connected. - The time complexity of this algorithm is O(ELogV), where E represents the number - of edges, while V represents the number of nodes. - O(number_of_edges Log number_of_nodes) +The time complexity of this algorithm is O(ELogV), where E represents the number +of edges, while V represents the number of nodes. +O(number_of_edges Log number_of_nodes) - The space complexity of this algorithm is O(V + E), since we have to keep a couple - of lists whose sizes are equal to the number of nodes, as well as keep all the - edges of a graph inside of the data structure itself. +The space complexity of this algorithm is O(V + E), since we have to keep a couple +of lists whose sizes are equal to the number of nodes, as well as keep all the +edges of a graph inside of the data structure itself. - Borůvka's algorithm gives us pretty much the same result as other MST Algorithms - - they all find the minimum spanning tree, and the time complexity is approximately - the same. +Borůvka's algorithm gives us pretty much the same result as other MST Algorithms - +they all find the minimum spanning tree, and the time complexity is approximately +the same. - One advantage that Borůvka's algorithm has compared to the alternatives is that it - doesn't need to presort the edges or maintain a priority queue in order to find the - minimum spanning tree. - Even though that doesn't help its complexity, since it still passes the edges logE - times, it is a bit simpler to code. +One advantage that Borůvka's algorithm has compared to the alternatives is that it +doesn't need to presort the edges or maintain a priority queue in order to find the +minimum spanning tree. +Even though that doesn't help its complexity, since it still passes the edges logE +times, it is a bit simpler to code. - Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm +Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm """ + from __future__ import annotations from typing import Any diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py index 171d3875f3c5..cab79be39ed3 100644 --- a/graphs/breadth_first_search.py +++ b/graphs/breadth_first_search.py @@ -1,6 +1,7 @@ #!/usr/bin/python -""" Author: OMKAR PATHAK """ +"""Author: OMKAR PATHAK""" + from __future__ import annotations from queue import Queue diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py index a0b92b90b456..ccadfa346bf1 100644 --- a/graphs/breadth_first_search_2.py +++ b/graphs/breadth_first_search_2.py @@ -12,6 +12,7 @@ mark w as explored add w to Q (at the end) """ + from __future__ import annotations from collections import deque diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index d489b110b3a7..c06440bccef3 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -1,6 +1,7 @@ """Breath First Search (BFS) can be used when finding the shortest path from a given source node to a target node in an unweighted graph. """ + from __future__ import annotations graph = { diff --git a/graphs/breadth_first_search_shortest_path_2.py b/graphs/breadth_first_search_shortest_path_2.py index b0c8d353ba04..4f9b6e65bdf3 100644 --- a/graphs/breadth_first_search_shortest_path_2.py +++ b/graphs/breadth_first_search_shortest_path_2.py @@ -1,9 +1,10 @@ """Breadth-first search shortest path implementations. - doctest: - python -m doctest -v bfs_shortest_path.py - Manual test: - python bfs_shortest_path.py +doctest: +python -m doctest -v bfs_shortest_path.py +Manual test: +python bfs_shortest_path.py """ + demo_graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], diff --git a/graphs/breadth_first_search_zero_one_shortest_path.py b/graphs/breadth_first_search_zero_one_shortest_path.py index 78047c5d2237..d3a255bac1ef 100644 --- a/graphs/breadth_first_search_zero_one_shortest_path.py +++ b/graphs/breadth_first_search_zero_one_shortest_path.py @@ -3,6 +3,7 @@ 0-1-graph is the weighted graph with the weights equal to 0 or 1. Link: https://codeforces.com/blog/entry/22276 """ + from __future__ import annotations from collections import deque diff --git a/graphs/deep_clone_graph.py b/graphs/deep_clone_graph.py index 55678b4c01ec..18ea99c6a52d 100644 --- a/graphs/deep_clone_graph.py +++ b/graphs/deep_clone_graph.py @@ -9,6 +9,7 @@ Each node in the graph contains a value (int) and a list (List[Node]) of its neighbors. """ + from dataclasses import dataclass diff --git a/graphs/depth_first_search.py b/graphs/depth_first_search.py index f20a503ca395..a666e74ce607 100644 --- a/graphs/depth_first_search.py +++ b/graphs/depth_first_search.py @@ -1,4 +1,5 @@ """Non recursive implementation of a DFS algorithm.""" + from __future__ import annotations diff --git a/graphs/depth_first_search_2.py b/graphs/depth_first_search_2.py index 5ff13af33168..8fe48b7f2b42 100644 --- a/graphs/depth_first_search_2.py +++ b/graphs/depth_first_search_2.py @@ -1,6 +1,6 @@ #!/usr/bin/python -""" Author: OMKAR PATHAK """ +"""Author: OMKAR PATHAK""" class Graph: diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py index b0bdfab60649..87e9d2233bb2 100644 --- a/graphs/dijkstra.py +++ b/graphs/dijkstra.py @@ -30,6 +30,7 @@ distance between each vertex that makes up the path from start vertex to target vertex. """ + import heapq diff --git a/graphs/even_tree.py b/graphs/even_tree.py index 92ffb4b232f7..7d47899527a7 100644 --- a/graphs/even_tree.py +++ b/graphs/even_tree.py @@ -12,6 +12,7 @@ Note: The tree input will be such that it can always be decomposed into components containing an even number of nodes. """ + # pylint: disable=invalid-name from collections import defaultdict diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 208e57f9b32f..f8da73f3438e 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -8,6 +8,7 @@ URL: https://www.researchgate.net/publication/235255851 """ + # fmt: off edge_array = [ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4', diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py index d0b94f03e9b4..abc75311cd60 100644 --- a/graphs/graph_adjacency_list.py +++ b/graphs/graph_adjacency_list.py @@ -15,6 +15,7 @@ - Make edge weights and vertex values customizable to store whatever the client wants - Support multigraph functionality if the client wants it """ + from __future__ import annotations import random diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index cdef388d9098..059a6aa9ffb5 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -15,6 +15,7 @@ - Make edge weights and vertex values customizable to store whatever the client wants - Support multigraph functionality if the client wants it """ + from __future__ import annotations import random diff --git a/graphs/graphs_floyd_warshall.py b/graphs/graphs_floyd_warshall.py index 56cf8b9e382b..aaed9ac5df8b 100644 --- a/graphs/graphs_floyd_warshall.py +++ b/graphs/graphs_floyd_warshall.py @@ -1,7 +1,7 @@ # floyd_warshall.py """ - The problem is to find the shortest distance between all pairs of vertices in a - weighted directed graph that can have negative edge weights. +The problem is to find the shortest distance between all pairs of vertices in a +weighted directed graph that can have negative edge weights. """ diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index 81f30ef615fe..cc918f81dfe8 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -6,6 +6,7 @@ at a time, from an arbitrary starting vertex, at each step adding the cheapest possible connection from the tree to another vertex. """ + from __future__ import annotations from sys import maxsize diff --git a/graphs/page_rank.py b/graphs/page_rank.py index b9e4c4a72a93..c0ce3a94c76b 100644 --- a/graphs/page_rank.py +++ b/graphs/page_rank.py @@ -1,6 +1,7 @@ """ Author: https://github.com/bhushan-borole """ + """ The input graph for the algorithm is: diff --git a/graphs/prim.py b/graphs/prim.py index 6cb1a6def359..5b3ce04441ec 100644 --- a/graphs/prim.py +++ b/graphs/prim.py @@ -1,8 +1,8 @@ """Prim's Algorithm. - Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm. +Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm. - Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm +Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm """ import heapq as hq diff --git a/greedy_methods/gas_station.py b/greedy_methods/gas_station.py index 2427375d2664..6391ce379329 100644 --- a/greedy_methods/gas_station.py +++ b/greedy_methods/gas_station.py @@ -23,6 +23,7 @@ start checking from the next station. """ + from dataclasses import dataclass diff --git a/hashes/adler32.py b/hashes/adler32.py index 611ebc88b80f..38d76ab12aa0 100644 --- a/hashes/adler32.py +++ b/hashes/adler32.py @@ -1,11 +1,11 @@ """ - Adler-32 is a checksum algorithm which was invented by Mark Adler in 1995. - Compared to a cyclic redundancy check of the same length, it trades reliability for - speed (preferring the latter). - Adler-32 is more reliable than Fletcher-16, and slightly less reliable than - Fletcher-32.[2] +Adler-32 is a checksum algorithm which was invented by Mark Adler in 1995. +Compared to a cyclic redundancy check of the same length, it trades reliability for +speed (preferring the latter). +Adler-32 is more reliable than Fletcher-16, and slightly less reliable than +Fletcher-32.[2] - source: https://en.wikipedia.org/wiki/Adler-32 +source: https://en.wikipedia.org/wiki/Adler-32 """ MOD_ADLER = 65521 diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index b34fdd4c7a74..b3095852ac51 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -4,44 +4,44 @@ # Black: True """ - * This code implement the Hamming code: - https://en.wikipedia.org/wiki/Hamming_code - In telecommunication, - Hamming codes are a family of linear error-correcting codes. Hamming - codes can detect up to two-bit errors or correct one-bit errors - without detection of uncorrected errors. By contrast, the simple - parity code cannot correct errors, and can detect only an odd number - of bits in error. Hamming codes are perfect codes, that is, they - achieve the highest possible rate for codes with their block length - and minimum distance of three. - - * the implemented code consists of: - * a function responsible for encoding the message (emitterConverter) - * return the encoded message - * a function responsible for decoding the message (receptorConverter) - * return the decoded message and a ack of data integrity - - * how to use: - to be used you must declare how many parity bits (sizePari) - you want to include in the message. - it is desired (for test purposes) to select a bit to be set - as an error. This serves to check whether the code is working correctly. - Lastly, the variable of the message/word that must be desired to be - encoded (text). - - * how this work: - declaration of variables (sizePari, be, text) - - converts the message/word (text) to binary using the - text_to_bits function - encodes the message using the rules of hamming encoding - decodes the message using the rules of hamming encoding - print the original message, the encoded message and the - decoded message - - forces an error in the coded text variable - decodes the message that was forced the error - print the original message, the encoded message, the bit changed - message and the decoded message +* This code implement the Hamming code: + https://en.wikipedia.org/wiki/Hamming_code - In telecommunication, +Hamming codes are a family of linear error-correcting codes. Hamming +codes can detect up to two-bit errors or correct one-bit errors +without detection of uncorrected errors. By contrast, the simple +parity code cannot correct errors, and can detect only an odd number +of bits in error. Hamming codes are perfect codes, that is, they +achieve the highest possible rate for codes with their block length +and minimum distance of three. + +* the implemented code consists of: + * a function responsible for encoding the message (emitterConverter) + * return the encoded message + * a function responsible for decoding the message (receptorConverter) + * return the decoded message and a ack of data integrity + +* how to use: + to be used you must declare how many parity bits (sizePari) + you want to include in the message. + it is desired (for test purposes) to select a bit to be set + as an error. This serves to check whether the code is working correctly. + Lastly, the variable of the message/word that must be desired to be + encoded (text). + +* how this work: + declaration of variables (sizePari, be, text) + + converts the message/word (text) to binary using the + text_to_bits function + encodes the message using the rules of hamming encoding + decodes the message using the rules of hamming encoding + print the original message, the encoded message and the + decoded message + + forces an error in the coded text variable + decodes the message that was forced the error + print the original message, the encoded message, the bit changed + message and the decoded message """ # Imports diff --git a/hashes/luhn.py b/hashes/luhn.py index bb77fd05c556..a29bf39e3d82 100644 --- a/hashes/luhn.py +++ b/hashes/luhn.py @@ -1,4 +1,5 @@ -""" Luhn Algorithm """ +"""Luhn Algorithm""" + from __future__ import annotations diff --git a/hashes/sdbm.py b/hashes/sdbm.py index a5432874ba7d..a5abc6f3185b 100644 --- a/hashes/sdbm.py +++ b/hashes/sdbm.py @@ -1,21 +1,21 @@ """ - This algorithm was created for sdbm (a public-domain reimplementation of ndbm) - database library. - It was found to do well in scrambling bits, causing better distribution of the keys - and fewer splits. - It also happens to be a good general hashing function with good distribution. - The actual function (pseudo code) is: - for i in i..len(str): - hash(i) = hash(i - 1) * 65599 + str[i]; +This algorithm was created for sdbm (a public-domain reimplementation of ndbm) +database library. +It was found to do well in scrambling bits, causing better distribution of the keys +and fewer splits. +It also happens to be a good general hashing function with good distribution. +The actual function (pseudo code) is: + for i in i..len(str): + hash(i) = hash(i - 1) * 65599 + str[i]; - What is included below is the faster version used in gawk. [there is even a faster, - duff-device version] - The magic constant 65599 was picked out of thin air while experimenting with - different constants. - It turns out to be a prime. - This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere. +What is included below is the faster version used in gawk. [there is even a faster, +duff-device version] +The magic constant 65599 was picked out of thin air while experimenting with +different constants. +It turns out to be a prime. +This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere. - source: http://www.cse.yorku.ca/~oz/hash.html +source: http://www.cse.yorku.ca/~oz/hash.html """ diff --git a/hashes/sha1.py b/hashes/sha1.py index a0fa688f863e..75a1423e9b5f 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -25,6 +25,7 @@ Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/ """ + import argparse import hashlib # hashlib is only used inside the Test class import struct diff --git a/knapsack/knapsack.py b/knapsack/knapsack.py index 18a36c3bcdda..bb507be1ba3c 100644 --- a/knapsack/knapsack.py +++ b/knapsack/knapsack.py @@ -1,6 +1,7 @@ -""" A naive recursive implementation of 0-1 Knapsack Problem - https://en.wikipedia.org/wiki/Knapsack_problem +"""A naive recursive implementation of 0-1 Knapsack Problem +https://en.wikipedia.org/wiki/Knapsack_problem """ + from __future__ import annotations diff --git a/knapsack/tests/test_knapsack.py b/knapsack/tests/test_knapsack.py index 6932bbb3536b..7bfb8780627b 100644 --- a/knapsack/tests/test_knapsack.py +++ b/knapsack/tests/test_knapsack.py @@ -6,6 +6,7 @@ This file contains the test-suite for the knapsack problem. """ + import unittest from knapsack import knapsack as k diff --git a/linear_algebra/gaussian_elimination.py b/linear_algebra/gaussian_elimination.py index a1a35131b157..724773c0db98 100644 --- a/linear_algebra/gaussian_elimination.py +++ b/linear_algebra/gaussian_elimination.py @@ -3,7 +3,6 @@ Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination """ - import numpy as np from numpy import float64 from numpy.typing import NDArray diff --git a/linear_algebra/jacobi_iteration_method.py b/linear_algebra/jacobi_iteration_method.py index 8c91a19ef1b0..2cc9c103018b 100644 --- a/linear_algebra/jacobi_iteration_method.py +++ b/linear_algebra/jacobi_iteration_method.py @@ -1,6 +1,7 @@ """ Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method """ + from __future__ import annotations import numpy as np diff --git a/linear_algebra/lu_decomposition.py b/linear_algebra/lu_decomposition.py index 094b20abfecc..1d364163d9a7 100644 --- a/linear_algebra/lu_decomposition.py +++ b/linear_algebra/lu_decomposition.py @@ -15,6 +15,7 @@ Reference: https://en.wikipedia.org/wiki/LU_decomposition """ + from __future__ import annotations import numpy as np diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 4cf566ec9e36..4c0b58deb978 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -3,6 +3,7 @@ - https://en.wikipedia.org/wiki/Conjugate_gradient_method - https://en.wikipedia.org/wiki/Definite_symmetric_matrix """ + from typing import Any import numpy as np diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 5074faf31d1d..5af6c62e3ad4 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -18,6 +18,7 @@ - function square_zero_matrix(N) - function random_matrix(W, H, a, b) """ + from __future__ import annotations import math @@ -96,12 +97,10 @@ def __sub__(self, other: Vector) -> Vector: raise Exception("must have the same size") @overload - def __mul__(self, other: float) -> Vector: - ... + def __mul__(self, other: float) -> Vector: ... @overload - def __mul__(self, other: Vector) -> float: - ... + def __mul__(self, other: Vector) -> float: ... def __mul__(self, other: float | Vector) -> float | Vector: """ @@ -309,12 +308,10 @@ def __sub__(self, other: Matrix) -> Matrix: raise Exception("matrices must have the same dimension!") @overload - def __mul__(self, other: float) -> Matrix: - ... + def __mul__(self, other: float) -> Matrix: ... @overload - def __mul__(self, other: Vector) -> Vector: - ... + def __mul__(self, other: Vector) -> Vector: ... def __mul__(self, other: float | Vector) -> Vector | Matrix: """ diff --git a/linear_algebra/src/rayleigh_quotient.py b/linear_algebra/src/rayleigh_quotient.py index 4773429cbf1b..46bf1671d2b1 100644 --- a/linear_algebra/src/rayleigh_quotient.py +++ b/linear_algebra/src/rayleigh_quotient.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Rayleigh_quotient """ + from typing import Any import numpy as np diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 95ab408b3d86..fc5f90fd5cbe 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -6,6 +6,7 @@ This file contains the test-suite for the linear algebra library. """ + import unittest import pytest diff --git a/linear_algebra/src/transformations_2d.py b/linear_algebra/src/transformations_2d.py index cdf42100d5d9..b4185cd2848f 100644 --- a/linear_algebra/src/transformations_2d.py +++ b/linear_algebra/src/transformations_2d.py @@ -11,6 +11,7 @@ reflection(45) = [[0.05064397763545947, 0.893996663600558], [0.893996663600558, 0.7018070490682369]] """ + from math import cos, sin diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index bbc97d8e22bf..dc171bacd3a2 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -12,6 +12,7 @@ https://en.wikipedia.org/wiki/Simplex_algorithm https://tinyurl.com/simplex4beginners """ + from typing import Any import numpy as np diff --git a/machine_learning/apriori_algorithm.py b/machine_learning/apriori_algorithm.py index d9fd1f82ea3c..09a89ac236bd 100644 --- a/machine_learning/apriori_algorithm.py +++ b/machine_learning/apriori_algorithm.py @@ -10,6 +10,7 @@ WIKI: https://en.wikipedia.org/wiki/Apriori_algorithm Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining """ + from itertools import combinations diff --git a/machine_learning/astar.py b/machine_learning/astar.py index ff5208266343..a5859e51fe70 100644 --- a/machine_learning/astar.py +++ b/machine_learning/astar.py @@ -12,6 +12,7 @@ https://en.wikipedia.org/wiki/A*_search_algorithm """ + import numpy as np diff --git a/machine_learning/automatic_differentiation.py b/machine_learning/automatic_differentiation.py index cd2e5cdaa782..5c2708247c21 100644 --- a/machine_learning/automatic_differentiation.py +++ b/machine_learning/automatic_differentiation.py @@ -6,6 +6,7 @@ Author: Poojan Smart Email: smrtpoojan@gmail.com """ + from __future__ import annotations from collections import defaultdict diff --git a/machine_learning/data_transformations.py b/machine_learning/data_transformations.py index ecfd3b9e27c2..a1c28d514fd5 100644 --- a/machine_learning/data_transformations.py +++ b/machine_learning/data_transformations.py @@ -25,6 +25,7 @@ 2. non-gaussian (non-normal) distributions work better with normalization 3. If a column or list of values has extreme values / outliers, use standardization """ + from statistics import mean, stdev diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index c67e09c7f114..7f129919a3ce 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -3,6 +3,7 @@ Input data set: The input data set must be 1-dimensional with continuous labels. Output: The decision tree maps a real number input to a real number output. """ + import numpy as np diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py index 205d598464a1..6b9870f5e1d2 100644 --- a/machine_learning/frequent_pattern_growth.py +++ b/machine_learning/frequent_pattern_growth.py @@ -9,6 +9,7 @@ Examples: https://www.javatpoint.com/fp-growth-algorithm-in-data-mining """ + from __future__ import annotations from dataclasses import dataclass, field diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index 9ffc02bbc284..db38b3c95b52 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -2,6 +2,7 @@ Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis function. """ + import numpy # List of input, output pairs diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 4a219edc3bb1..9f6646944458 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -40,6 +40,7 @@ 5. Transfers Dataframe into excel format it must have feature called 'Clust' with k means clustering numbers in it. """ + import warnings import numpy as np diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 88c047157893..606e11f3698e 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -1,47 +1,48 @@ """ - Linear Discriminant Analysis +Linear Discriminant Analysis - Assumptions About Data : - 1. The input variables has a gaussian distribution. - 2. The variance calculated for each input variables by class grouping is the - same. - 3. The mix of classes in your training set is representative of the problem. +Assumptions About Data : + 1. The input variables has a gaussian distribution. + 2. The variance calculated for each input variables by class grouping is the + same. + 3. The mix of classes in your training set is representative of the problem. - Learning The Model : - The LDA model requires the estimation of statistics from the training data : - 1. Mean of each input value for each class. - 2. Probability of an instance belong to each class. - 3. Covariance for the input data for each class +Learning The Model : + The LDA model requires the estimation of statistics from the training data : + 1. Mean of each input value for each class. + 2. Probability of an instance belong to each class. + 3. Covariance for the input data for each class - Calculate the class means : - mean(x) = 1/n ( for i = 1 to i = n --> sum(xi)) + Calculate the class means : + mean(x) = 1/n ( for i = 1 to i = n --> sum(xi)) - Calculate the class probabilities : - P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1)) - P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1)) + Calculate the class probabilities : + P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1)) + P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1)) - Calculate the variance : - We can calculate the variance for dataset in two steps : - 1. Calculate the squared difference for each input variable from the - group mean. - 2. Calculate the mean of the squared difference. - ------------------------------------------------ - Squared_Difference = (x - mean(k)) ** 2 - Variance = (1 / (count(x) - count(classes))) * - (for i = 1 to i = n --> sum(Squared_Difference(xi))) + Calculate the variance : + We can calculate the variance for dataset in two steps : + 1. Calculate the squared difference for each input variable from the + group mean. + 2. Calculate the mean of the squared difference. + ------------------------------------------------ + Squared_Difference = (x - mean(k)) ** 2 + Variance = (1 / (count(x) - count(classes))) * + (for i = 1 to i = n --> sum(Squared_Difference(xi))) - Making Predictions : - discriminant(x) = x * (mean / variance) - - ((mean ** 2) / (2 * variance)) + Ln(probability) - --------------------------------------------------------------------------- - After calculating the discriminant value for each class, the class with the - largest discriminant value is taken as the prediction. +Making Predictions : + discriminant(x) = x * (mean / variance) - + ((mean ** 2) / (2 * variance)) + Ln(probability) + --------------------------------------------------------------------------- + After calculating the discriminant value for each class, the class with the + largest discriminant value is taken as the prediction. - Author: @EverLookNeverSee +Author: @EverLookNeverSee """ + from collections.abc import Callable from math import log from os import name, system diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 0847112ad538..39bee5712c16 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -7,6 +7,7 @@ fit our dataset. In this particular code, I had used a CSGO dataset (ADR vs Rating). We try to best fit a line through dataset and estimate the parameters. """ + import numpy as np import requests diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 59a70fd65cf9..090af5382185 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -14,6 +14,7 @@ Coursera ML course https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac """ + import numpy as np from matplotlib import pyplot as plt from sklearn import datasets diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index ecbd451266ad..f0fd12c9de7f 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -1,9 +1,10 @@ """ - Create a Long Short Term Memory (LSTM) network model - An LSTM is a type of Recurrent Neural Network (RNN) as discussed at: - * https://colah.github.io/posts/2015-08-Understanding-LSTMs - * https://en.wikipedia.org/wiki/Long_short-term_memory +Create a Long Short Term Memory (LSTM) network model +An LSTM is a type of Recurrent Neural Network (RNN) as discussed at: +* https://colah.github.io/posts/2015-08-Understanding-LSTMs +* https://en.wikipedia.org/wiki/Long_short-term_memory """ + import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py index 7ce8ceb50ff2..a1e99ce4ad40 100644 --- a/machine_learning/mfcc.py +++ b/machine_learning/mfcc.py @@ -57,7 +57,6 @@ Author: Amir Lavasani """ - import logging import numpy as np diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py index 32fdf1d2b41d..fb9d0074e791 100644 --- a/machine_learning/self_organizing_map.py +++ b/machine_learning/self_organizing_map.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Self-organizing_map """ + import math diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 9ee8c52fb2e9..be16baca1a4c 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -30,7 +30,6 @@ https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf """ - import os import sys import urllib.request diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 7a23ec463c8f..0bc3b17d7e5a 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -7,6 +7,7 @@ 1. the nearest vector 2. distance between the vector and the nearest vector (float) """ + from __future__ import annotations import math diff --git a/maths/allocation_number.py b/maths/allocation_number.py index d419e74d01ff..52f1ac4bdb23 100644 --- a/maths/allocation_number.py +++ b/maths/allocation_number.py @@ -5,6 +5,7 @@ for i in allocation_list: requests.get(url,headers={'Range':f'bytes={i}'}) """ + from __future__ import annotations diff --git a/maths/area.py b/maths/area.py index ea7216c8fe3f..31a654206977 100644 --- a/maths/area.py +++ b/maths/area.py @@ -2,6 +2,7 @@ Find the area of various geometric shapes Wikipedia reference: https://en.wikipedia.org/wiki/Area """ + from math import pi, sqrt, tan diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index 0da6546b2e36..10aec768fa09 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -1,6 +1,7 @@ """ Approximates the area under the curve using the trapezoidal rule """ + from __future__ import annotations from collections.abc import Callable diff --git a/maths/basic_maths.py b/maths/basic_maths.py index c9e3d00fa23b..833f31c18b9e 100644 --- a/maths/basic_maths.py +++ b/maths/basic_maths.py @@ -1,4 +1,5 @@ """Implementation of Basic Math in Python.""" + import math diff --git a/maths/binomial_distribution.py b/maths/binomial_distribution.py index 5b56f2d59244..eabcaea0d1b2 100644 --- a/maths/binomial_distribution.py +++ b/maths/binomial_distribution.py @@ -1,5 +1,6 @@ """For more information about the Binomial Distribution - - https://en.wikipedia.org/wiki/Binomial_distribution""" +https://en.wikipedia.org/wiki/Binomial_distribution""" + from math import factorial diff --git a/maths/chinese_remainder_theorem.py b/maths/chinese_remainder_theorem.py index d3e75e77922a..18af63d106e8 100644 --- a/maths/chinese_remainder_theorem.py +++ b/maths/chinese_remainder_theorem.py @@ -11,6 +11,7 @@ 1. Use extended euclid algorithm to find x,y such that a*x + b*y = 1 2. Take n = ra*by + rb*ax """ + from __future__ import annotations diff --git a/maths/continued_fraction.py b/maths/continued_fraction.py index 04ff0b6ff0d2..2c38bf88b1e9 100644 --- a/maths/continued_fraction.py +++ b/maths/continued_fraction.py @@ -4,7 +4,6 @@ https://en.wikipedia.org/wiki/Continued_fraction """ - from fractions import Fraction from math import floor diff --git a/maths/entropy.py b/maths/entropy.py index 23753d884484..76fac4ee717d 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -4,6 +4,7 @@ Implementation of entropy of information https://en.wikipedia.org/wiki/Entropy_(information_theory) """ + from __future__ import annotations import math diff --git a/maths/gamma.py b/maths/gamma.py index 822bbc74456f..e328cd8b22b7 100644 --- a/maths/gamma.py +++ b/maths/gamma.py @@ -8,6 +8,7 @@ the non-positive integers Python's Standard Library math.gamma() function overflows around gamma(171.624). """ + import math from numpy import inf diff --git a/maths/gaussian.py b/maths/gaussian.py index 51ebc2e25849..0e02010a9c67 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -1,6 +1,7 @@ """ Reference: https://en.wikipedia.org/wiki/Gaussian_function """ + from numpy import exp, pi, sqrt diff --git a/maths/interquartile_range.py b/maths/interquartile_range.py index d4d72e73ef49..e91a651647d4 100644 --- a/maths/interquartile_range.py +++ b/maths/interquartile_range.py @@ -7,6 +7,7 @@ Script inspired by this Wikipedia article: https://en.wikipedia.org/wiki/Interquartile_range """ + from __future__ import annotations diff --git a/maths/is_square_free.py b/maths/is_square_free.py index 08c70dc32c38..a336c37e8dbc 100644 --- a/maths/is_square_free.py +++ b/maths/is_square_free.py @@ -3,6 +3,7 @@ psf/black : True ruff : True """ + from __future__ import annotations diff --git a/maths/karatsuba.py b/maths/karatsuba.py index 3d29e31d2107..0e063fb44b83 100644 --- a/maths/karatsuba.py +++ b/maths/karatsuba.py @@ -1,4 +1,4 @@ -""" Multiply two numbers using Karatsuba algorithm """ +"""Multiply two numbers using Karatsuba algorithm""" def karatsuba(a: int, b: int) -> int: diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 0a5621aacd79..292387414dee 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -1,13 +1,13 @@ """ - In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne - numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test +In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne +numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test - A Mersenne number is a number that is one less than a power of two. - That is M_p = 2^p - 1 - https://en.wikipedia.org/wiki/Mersenne_prime +A Mersenne number is a number that is one less than a power of two. +That is M_p = 2^p - 1 +https://en.wikipedia.org/wiki/Mersenne_prime - The Lucas–Lehmer test is the primality test used by the - Great Internet Mersenne Prime Search (GIMPS) to locate large primes. +The Lucas–Lehmer test is the primality test used by the +Great Internet Mersenne Prime Search (GIMPS) to locate large primes. """ diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index d5c3c3ab958b..6ec5551a5e6e 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions """ + from math import factorial, pi diff --git a/maths/max_sum_sliding_window.py b/maths/max_sum_sliding_window.py index c6f9b4ed0ad7..090117429604 100644 --- a/maths/max_sum_sliding_window.py +++ b/maths/max_sum_sliding_window.py @@ -6,6 +6,7 @@ called 'Window sliding technique' where the nested loops can be converted to a single loop to reduce time complexity. """ + from __future__ import annotations diff --git a/maths/modular_exponential.py b/maths/modular_exponential.py index 42987dbf3a24..a27e29ebc02a 100644 --- a/maths/modular_exponential.py +++ b/maths/modular_exponential.py @@ -1,8 +1,8 @@ """ - Modular Exponential. - Modular exponentiation is a type of exponentiation performed over a modulus. - For more explanation, please check - https://en.wikipedia.org/wiki/Modular_exponentiation +Modular Exponential. +Modular exponentiation is a type of exponentiation performed over a modulus. +For more explanation, please check +https://en.wikipedia.org/wiki/Modular_exponentiation """ """Calculate Modular Exponential.""" diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index 474f1f65deb4..d174a0b188a2 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -1,6 +1,7 @@ """ @author: MatteoRaso """ + from collections.abc import Callable from math import pi, sqrt from random import uniform diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py index d61f022a413d..fb406171098a 100644 --- a/maths/numerical_analysis/adams_bashforth.py +++ b/maths/numerical_analysis/adams_bashforth.py @@ -4,6 +4,7 @@ https://en.wikipedia.org/wiki/Linear_multistep_method Author : Ravi Kumar """ + from collections.abc import Callable from dataclasses import dataclass diff --git a/maths/numerical_analysis/nevilles_method.py b/maths/numerical_analysis/nevilles_method.py index 1f48b43fbd22..256b61f5f218 100644 --- a/maths/numerical_analysis/nevilles_method.py +++ b/maths/numerical_analysis/nevilles_method.py @@ -1,11 +1,11 @@ """ - Python program to show how to interpolate and evaluate a polynomial - using Neville's method. - Neville’s method evaluates a polynomial that passes through a - given set of x and y points for a particular x value (x0) using the - Newton polynomial form. - Reference: - https://rpubs.com/aaronsc32/nevilles-method-polynomial-interpolation +Python program to show how to interpolate and evaluate a polynomial +using Neville's method. +Neville’s method evaluates a polynomial that passes through a +given set of x and y points for a particular x value (x0) using the +Newton polynomial form. +Reference: + https://rpubs.com/aaronsc32/nevilles-method-polynomial-interpolation """ diff --git a/maths/numerical_analysis/newton_raphson.py b/maths/numerical_analysis/newton_raphson.py index feee38f905dd..10fb244bf426 100644 --- a/maths/numerical_analysis/newton_raphson.py +++ b/maths/numerical_analysis/newton_raphson.py @@ -9,6 +9,7 @@ Reference: https://en.wikipedia.org/wiki/Newton%27s_method """ + from collections.abc import Callable RealFunc = Callable[[float], float] diff --git a/maths/numerical_analysis/numerical_integration.py b/maths/numerical_analysis/numerical_integration.py index 4ac562644a07..f64436ec48c1 100644 --- a/maths/numerical_analysis/numerical_integration.py +++ b/maths/numerical_analysis/numerical_integration.py @@ -1,6 +1,7 @@ """ Approximates the area under the curve using the trapezoidal rule """ + from __future__ import annotations from collections.abc import Callable diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py index 2bd9cd6129b8..451cde4cb935 100644 --- a/maths/numerical_analysis/runge_kutta_gills.py +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -4,6 +4,7 @@ https://www.geeksforgeeks.org/gills-4th-order-method-to-solve-differential-equations/ Author : Ravi Kumar """ + from collections.abc import Callable from math import sqrt diff --git a/maths/numerical_analysis/secant_method.py b/maths/numerical_analysis/secant_method.py index d39cb0ff30ef..9fff8222cdde 100644 --- a/maths/numerical_analysis/secant_method.py +++ b/maths/numerical_analysis/secant_method.py @@ -2,6 +2,7 @@ Implementing Secant method in Python Author: dimgrichr """ + from math import exp diff --git a/maths/prime_factors.py b/maths/prime_factors.py index e520ae3a6d04..47abcf10e618 100644 --- a/maths/prime_factors.py +++ b/maths/prime_factors.py @@ -1,6 +1,7 @@ """ python/black : True """ + from __future__ import annotations diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py index b8d6a86206be..55c42fd90e99 100644 --- a/maths/series/geometric_series.py +++ b/maths/series/geometric_series.py @@ -9,7 +9,6 @@ python3 geometric_series.py """ - from __future__ import annotations diff --git a/maths/series/p_series.py b/maths/series/p_series.py index a091a6f3fecf..93812f443857 100644 --- a/maths/series/p_series.py +++ b/maths/series/p_series.py @@ -9,7 +9,6 @@ python3 p_series.py """ - from __future__ import annotations diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index a0520aa5cf50..3923dc3e1612 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -10,6 +10,7 @@ doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich) Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem """ + from __future__ import annotations import math diff --git a/maths/solovay_strassen_primality_test.py b/maths/solovay_strassen_primality_test.py index 1d11d458369a..b2d905b07bed 100644 --- a/maths/solovay_strassen_primality_test.py +++ b/maths/solovay_strassen_primality_test.py @@ -9,7 +9,6 @@ https://en.wikipedia.org/wiki/Solovay%E2%80%93Strassen_primality_test """ - import random diff --git a/maths/special_numbers/armstrong_numbers.py b/maths/special_numbers/armstrong_numbers.py index b037aacb16c3..b2b4010a8f5b 100644 --- a/maths/special_numbers/armstrong_numbers.py +++ b/maths/special_numbers/armstrong_numbers.py @@ -8,6 +8,7 @@ On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 """ + PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None) diff --git a/maths/special_numbers/weird_number.py b/maths/special_numbers/weird_number.py index 2834a9fee31e..5c9240d0ea4e 100644 --- a/maths/special_numbers/weird_number.py +++ b/maths/special_numbers/weird_number.py @@ -3,6 +3,7 @@ Fun fact: The set of weird numbers has positive asymptotic density. """ + from math import sqrt diff --git a/maths/tanh.py b/maths/tanh.py index 38a369d9118d..011d6f17e22b 100644 --- a/maths/tanh.py +++ b/maths/tanh.py @@ -9,6 +9,7 @@ Script inspired from its corresponding Wikipedia article https://en.wikipedia.org/wiki/Activation_function """ + import numpy as np diff --git a/maths/triplet_sum.py b/maths/triplet_sum.py index af77ed145bce..e74f67daad47 100644 --- a/maths/triplet_sum.py +++ b/maths/triplet_sum.py @@ -3,6 +3,7 @@ we are required to find a triplet from the array such that it's sum is equal to the target. """ + from __future__ import annotations from itertools import permutations diff --git a/maths/two_pointer.py b/maths/two_pointer.py index d0fb0fc9c2f1..8a6d8eb7aff0 100644 --- a/maths/two_pointer.py +++ b/maths/two_pointer.py @@ -17,6 +17,7 @@ [1]: https://github.com/TheAlgorithms/Python/blob/master/other/two_sum.py """ + from __future__ import annotations diff --git a/maths/two_sum.py b/maths/two_sum.py index 12ad332d6c4e..58c933a5078a 100644 --- a/maths/two_sum.py +++ b/maths/two_sum.py @@ -11,6 +11,7 @@ Because nums[0] + nums[1] = 2 + 7 = 9, return [0, 1]. """ + from __future__ import annotations diff --git a/maths/volume.py b/maths/volume.py index b4df4e475783..33be9bdd131a 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -3,6 +3,7 @@ * https://en.wikipedia.org/wiki/Volume * https://en.wikipedia.org/wiki/Spherical_cap """ + from __future__ import annotations from math import pi, pow diff --git a/matrix/matrix_multiplication_recursion.py b/matrix/matrix_multiplication_recursion.py index 287142480ce7..57c4d80de017 100644 --- a/matrix/matrix_multiplication_recursion.py +++ b/matrix/matrix_multiplication_recursion.py @@ -7,6 +7,7 @@ Perform matrix multiplication using a recursive algorithm. https://en.wikipedia.org/wiki/Matrix_multiplication """ + # type Matrix = list[list[int]] # psf/black currenttly fails on this line Matrix = list[list[int]] diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 7d5fb522e012..b47d3b68f3d1 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -6,6 +6,7 @@ (1) Start with initial flow as 0 (2) Choose the augmenting path from source to sink and add the path to flow """ + graph = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], diff --git a/neural_network/activation_functions/binary_step.py b/neural_network/activation_functions/binary_step.py index 8f8f4d405fd2..d3d774602182 100644 --- a/neural_network/activation_functions/binary_step.py +++ b/neural_network/activation_functions/binary_step.py @@ -8,7 +8,6 @@ https://en.wikipedia.org/wiki/Activation_function """ - import numpy as np diff --git a/neural_network/activation_functions/rectified_linear_unit.py b/neural_network/activation_functions/rectified_linear_unit.py index 458c6bd5c391..2d5cf96fd387 100644 --- a/neural_network/activation_functions/rectified_linear_unit.py +++ b/neural_network/activation_functions/rectified_linear_unit.py @@ -9,6 +9,7 @@ Script inspired from its corresponding Wikipedia article https://en.wikipedia.org/wiki/Rectifier_(neural_networks) """ + from __future__ import annotations import numpy as np diff --git a/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py index 603ac0b7e120..a053e690ba44 100644 --- a/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py +++ b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py @@ -8,7 +8,6 @@ https://en.wikipedia.org/wiki/Soboleva_modified_hyperbolic_tangent """ - import numpy as np diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index bdd096b3f653..7e0bdbbe2857 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -17,6 +17,7 @@ Date: 2017.11.23 """ + import numpy as np from matplotlib import pyplot as plt diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index e9726a0cb4a7..07cc456b7466 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -1,18 +1,19 @@ """ - - - - - - -- - - - - - - - - - - - - - - - - - - - - - - - Name - - CNN - Convolution Neural Network For Photo Recognizing - Goal - - Recognize Handing Writing Word Photo - Detail: Total 5 layers neural network - * Convolution layer - * Pooling layer - * Input layer layer of BP - * Hidden layer of BP - * Output layer of BP - Author: Stephen Lee - Github: 245885195@qq.com - Date: 2017.9.20 - - - - - - -- - - - - - - - - - - - - - - - - - - - - - - + - - - - - -- - - - - - - - - - - - - - - - - - - - - - - +Name - - CNN - Convolution Neural Network For Photo Recognizing +Goal - - Recognize Handing Writing Word Photo +Detail: Total 5 layers neural network + * Convolution layer + * Pooling layer + * Input layer layer of BP + * Hidden layer of BP + * Output layer of BP +Author: Stephen Lee +Github: 245885195@qq.com +Date: 2017.9.20 +- - - - - -- - - - - - - - - - - - - - - - - - - - - - - """ + import pickle import numpy as np diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 2128449c03e9..f7ae86b48e65 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -17,7 +17,6 @@ This module and all its submodules are deprecated. """ - import gzip import os import typing diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index f5fb103ba528..436577eb5b5d 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -8,6 +8,7 @@ For more information about the algorithm: https://en.wikipedia.org/wiki/DPLL_algorithm """ + from __future__ import annotations import random diff --git a/other/fischer_yates_shuffle.py b/other/fischer_yates_shuffle.py index fa2f4dce9db0..37e11479a4c9 100644 --- a/other/fischer_yates_shuffle.py +++ b/other/fischer_yates_shuffle.py @@ -5,6 +5,7 @@ For more details visit wikipedia/Fischer-Yates-Shuffle. """ + import random from typing import Any diff --git a/other/gauss_easter.py b/other/gauss_easter.py index 4447d4ab86af..d1c525593f79 100644 --- a/other/gauss_easter.py +++ b/other/gauss_easter.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Computus#Gauss'_Easter_algorithm """ + import math from datetime import datetime, timedelta diff --git a/other/majority_vote_algorithm.py b/other/majority_vote_algorithm.py index ab8b386dd2e5..8d3b56707d06 100644 --- a/other/majority_vote_algorithm.py +++ b/other/majority_vote_algorithm.py @@ -4,6 +4,7 @@ We have to solve in O(n) time and O(1) Space. URL : https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_majority_vote_algorithm """ + from collections import Counter diff --git a/other/quine.py b/other/quine.py index 500a351d38dc..08e885bc1ce7 100644 --- a/other/quine.py +++ b/other/quine.py @@ -8,4 +8,5 @@ More info on: https://en.wikipedia.org/wiki/Quine_(computing) """ + print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))")) diff --git a/other/word_search.py b/other/word_search.py index a4796e220c7c..9e8acadbd9a4 100644 --- a/other/word_search.py +++ b/other/word_search.py @@ -5,7 +5,6 @@ @ https://en.wikipedia.org/wiki/Word_search """ - from random import choice, randint, shuffle # The words to display on the word search - diff --git a/physics/archimedes_principle_of_buoyant_force.py b/physics/archimedes_principle_of_buoyant_force.py index 5f569837220f..71043e0e1111 100644 --- a/physics/archimedes_principle_of_buoyant_force.py +++ b/physics/archimedes_principle_of_buoyant_force.py @@ -8,7 +8,6 @@ https://en.wikipedia.org/wiki/Archimedes%27_principle """ - # Acceleration Constant on Earth (unit m/s^2) g = 9.80665 # Also available in scipy.constants.g diff --git a/physics/center_of_mass.py b/physics/center_of_mass.py index bd9ba2480584..59c3b807f401 100644 --- a/physics/center_of_mass.py +++ b/physics/center_of_mass.py @@ -24,6 +24,7 @@ Reference: https://en.wikipedia.org/wiki/Center_of_mass """ + from collections import namedtuple Particle = namedtuple("Particle", "x y z mass") # noqa: PYI024 diff --git a/physics/in_static_equilibrium.py b/physics/in_static_equilibrium.py index d56299f60858..e3c2f9d07aed 100644 --- a/physics/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -1,6 +1,7 @@ """ Checks if a system of forces is in static equilibrium. """ + from __future__ import annotations from numpy import array, cos, cross, float64, radians, sin diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index ec008784ba62..4d555716199a 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -11,7 +11,6 @@ (See also http://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) """ - from __future__ import annotations import random diff --git a/physics/rms_speed_of_molecule.py b/physics/rms_speed_of_molecule.py index 478cee01c7fd..fb23eb8a21cf 100644 --- a/physics/rms_speed_of_molecule.py +++ b/physics/rms_speed_of_molecule.py @@ -20,7 +20,6 @@ alternative method. """ - UNIVERSAL_GAS_CONSTANT = 8.3144598 diff --git a/project_euler/problem_002/sol4.py b/project_euler/problem_002/sol4.py index 70b7d6a80a1d..3a2e4fce341c 100644 --- a/project_euler/problem_002/sol4.py +++ b/project_euler/problem_002/sol4.py @@ -14,6 +14,7 @@ References: - https://en.wikipedia.org/wiki/Fibonacci_number """ + import math from decimal import Decimal, getcontext diff --git a/project_euler/problem_003/sol1.py b/project_euler/problem_003/sol1.py index a7d01bb041ba..d1c0e61cf1a6 100644 --- a/project_euler/problem_003/sol1.py +++ b/project_euler/problem_003/sol1.py @@ -10,6 +10,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number#Unique_factorization """ + import math diff --git a/project_euler/problem_006/sol3.py b/project_euler/problem_006/sol3.py index 529f233c9f8e..16445258c2b7 100644 --- a/project_euler/problem_006/sol3.py +++ b/project_euler/problem_006/sol3.py @@ -15,6 +15,7 @@ Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum. """ + import math diff --git a/project_euler/problem_007/sol2.py b/project_euler/problem_007/sol2.py index 75d351889ea8..fd99453c1100 100644 --- a/project_euler/problem_007/sol2.py +++ b/project_euler/problem_007/sol2.py @@ -11,6 +11,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number """ + import math diff --git a/project_euler/problem_007/sol3.py b/project_euler/problem_007/sol3.py index 774260db99a0..39db51a93427 100644 --- a/project_euler/problem_007/sol3.py +++ b/project_euler/problem_007/sol3.py @@ -11,6 +11,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number """ + import itertools import math diff --git a/project_euler/problem_008/sol2.py b/project_euler/problem_008/sol2.py index 889c3a3143c2..f83cb1db30b6 100644 --- a/project_euler/problem_008/sol2.py +++ b/project_euler/problem_008/sol2.py @@ -30,6 +30,7 @@ Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? """ + from functools import reduce N = ( diff --git a/project_euler/problem_008/sol3.py b/project_euler/problem_008/sol3.py index c6081aa05e2c..bf3bcb05b7e9 100644 --- a/project_euler/problem_008/sol3.py +++ b/project_euler/problem_008/sol3.py @@ -30,6 +30,7 @@ Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? """ + import sys N = ( diff --git a/project_euler/problem_010/sol2.py b/project_euler/problem_010/sol2.py index 245cca1d1720..1a1fc0f33cb3 100644 --- a/project_euler/problem_010/sol2.py +++ b/project_euler/problem_010/sol2.py @@ -10,6 +10,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number """ + import math from collections.abc import Iterator from itertools import takewhile diff --git a/project_euler/problem_013/sol1.py b/project_euler/problem_013/sol1.py index 7a414a9379e0..87d0e0a60e9b 100644 --- a/project_euler/problem_013/sol1.py +++ b/project_euler/problem_013/sol1.py @@ -5,6 +5,7 @@ Work out the first ten digits of the sum of the following one-hundred 50-digit numbers. """ + import os diff --git a/project_euler/problem_014/sol2.py b/project_euler/problem_014/sol2.py index 2448e652ce5b..797b0f9886fe 100644 --- a/project_euler/problem_014/sol2.py +++ b/project_euler/problem_014/sol2.py @@ -25,6 +25,7 @@ Which starting number, under one million, produces the longest chain? """ + from __future__ import annotations COLLATZ_SEQUENCE_LENGTHS = {1: 1} diff --git a/project_euler/problem_015/sol1.py b/project_euler/problem_015/sol1.py index fb2020d6179f..fd9014a406f6 100644 --- a/project_euler/problem_015/sol1.py +++ b/project_euler/problem_015/sol1.py @@ -5,6 +5,7 @@ the right and down, there are exactly 6 routes to the bottom right corner. How many such routes are there through a 20×20 grid? """ + from math import factorial diff --git a/project_euler/problem_018/solution.py b/project_euler/problem_018/solution.py index 70306148bb9e..cbe8743be15f 100644 --- a/project_euler/problem_018/solution.py +++ b/project_euler/problem_018/solution.py @@ -27,6 +27,7 @@ 63 66 04 68 89 53 67 30 73 16 69 87 40 31 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23 """ + import os diff --git a/project_euler/problem_020/sol2.py b/project_euler/problem_020/sol2.py index 676e96e7836a..a1d56ade7708 100644 --- a/project_euler/problem_020/sol2.py +++ b/project_euler/problem_020/sol2.py @@ -8,6 +8,7 @@ Find the sum of the digits in the number 100! """ + from math import factorial diff --git a/project_euler/problem_020/sol3.py b/project_euler/problem_020/sol3.py index 4f28ac5fcfde..1886e05463f4 100644 --- a/project_euler/problem_020/sol3.py +++ b/project_euler/problem_020/sol3.py @@ -8,6 +8,7 @@ Find the sum of the digits in the number 100! """ + from math import factorial diff --git a/project_euler/problem_021/sol1.py b/project_euler/problem_021/sol1.py index 353510ae8f94..f6dbfa8864db 100644 --- a/project_euler/problem_021/sol1.py +++ b/project_euler/problem_021/sol1.py @@ -13,6 +13,7 @@ Evaluate the sum of all the amicable numbers under 10000. """ + from math import sqrt diff --git a/project_euler/problem_022/sol1.py b/project_euler/problem_022/sol1.py index 982906245e87..b6386186e7df 100644 --- a/project_euler/problem_022/sol1.py +++ b/project_euler/problem_022/sol1.py @@ -14,6 +14,7 @@ What is the total of all the name scores in the file? """ + import os diff --git a/project_euler/problem_022/sol2.py b/project_euler/problem_022/sol2.py index 5ae41c84686e..f7092ea1cd12 100644 --- a/project_euler/problem_022/sol2.py +++ b/project_euler/problem_022/sol2.py @@ -14,6 +14,7 @@ What is the total of all the name scores in the file? """ + import os diff --git a/project_euler/problem_024/sol1.py b/project_euler/problem_024/sol1.py index 1c6378b38260..3fb1bd4ec582 100644 --- a/project_euler/problem_024/sol1.py +++ b/project_euler/problem_024/sol1.py @@ -9,6 +9,7 @@ What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9? """ + from itertools import permutations diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index 6f49e89fb465..9e950b355f7a 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -23,6 +23,7 @@ What is the index of the first term in the Fibonacci sequence to contain 1000 digits? """ + from collections.abc import Generator diff --git a/project_euler/problem_030/sol1.py b/project_euler/problem_030/sol1.py index 2c6b4e4e85d5..7d83e314523f 100644 --- a/project_euler/problem_030/sol1.py +++ b/project_euler/problem_030/sol1.py @@ -1,4 +1,4 @@ -""" Problem Statement (Digit Fifth Powers): https://projecteuler.net/problem=30 +"""Problem Statement (Digit Fifth Powers): https://projecteuler.net/problem=30 Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits: @@ -21,7 +21,6 @@ and hence a number between 1000 and 1000000 """ - DIGITS_FIFTH_POWER = {str(digit): digit**5 for digit in range(10)} diff --git a/project_euler/problem_032/sol32.py b/project_euler/problem_032/sol32.py index c4d11e86c877..a402b5584061 100644 --- a/project_euler/problem_032/sol32.py +++ b/project_euler/problem_032/sol32.py @@ -12,6 +12,7 @@ HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum. """ + import itertools diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index 32be424b6a7b..187fd61bde6c 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -14,6 +14,7 @@ If the product of these four fractions is given in its lowest common terms, find the value of the denominator. """ + from __future__ import annotations from fractions import Fraction diff --git a/project_euler/problem_035/sol1.py b/project_euler/problem_035/sol1.py index 644c992ed8a5..cf9f6821d798 100644 --- a/project_euler/problem_035/sol1.py +++ b/project_euler/problem_035/sol1.py @@ -15,6 +15,7 @@ we will rule out the numbers which contain an even digit. After this we will generate each circular combination of the number and check if all are prime. """ + from __future__ import annotations sieve = [True] * 1000001 diff --git a/project_euler/problem_036/sol1.py b/project_euler/problem_036/sol1.py index 1d27356ec51e..3865b2a39ea9 100644 --- a/project_euler/problem_036/sol1.py +++ b/project_euler/problem_036/sol1.py @@ -14,6 +14,7 @@ (Please note that the palindromic number, in either base, may not include leading zeros.) """ + from __future__ import annotations diff --git a/project_euler/problem_038/sol1.py b/project_euler/problem_038/sol1.py index e4a6d09f8f7d..5bef273ea2a9 100644 --- a/project_euler/problem_038/sol1.py +++ b/project_euler/problem_038/sol1.py @@ -37,6 +37,7 @@ => 100 <= a < 334, candidate = a * 10^6 + 2a * 10^3 + 3a = 1002003 * a """ + from __future__ import annotations diff --git a/project_euler/problem_041/sol1.py b/project_euler/problem_041/sol1.py index 2ef0120684c3..0c37f5469a6c 100644 --- a/project_euler/problem_041/sol1.py +++ b/project_euler/problem_041/sol1.py @@ -10,6 +10,7 @@ So we will check only 7 digit pandigital numbers to obtain the largest possible pandigital prime. """ + from __future__ import annotations import math diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index f8a54e40eaab..f678bcdef710 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -13,6 +13,7 @@ containing nearly two-thousand common English words, how many are triangle words? """ + import os # Precomputes a list of the 100 first triangular numbers diff --git a/project_euler/problem_043/sol1.py b/project_euler/problem_043/sol1.py index c533f40da9c9..f3a2c71edc4e 100644 --- a/project_euler/problem_043/sol1.py +++ b/project_euler/problem_043/sol1.py @@ -18,7 +18,6 @@ Find the sum of all 0 to 9 pandigital numbers with this property. """ - from itertools import permutations diff --git a/project_euler/problem_050/sol1.py b/project_euler/problem_050/sol1.py index fc6e6f2b9a5d..0a5f861f0ef0 100644 --- a/project_euler/problem_050/sol1.py +++ b/project_euler/problem_050/sol1.py @@ -15,6 +15,7 @@ Which prime, below one-million, can be written as the sum of the most consecutive primes? """ + from __future__ import annotations diff --git a/project_euler/problem_051/sol1.py b/project_euler/problem_051/sol1.py index 921704bc4455..dc740c8b947d 100644 --- a/project_euler/problem_051/sol1.py +++ b/project_euler/problem_051/sol1.py @@ -15,6 +15,7 @@ Find the smallest prime which, by replacing part of the number (not necessarily adjacent digits) with the same digit, is part of an eight prime value family. """ + from __future__ import annotations from collections import Counter diff --git a/project_euler/problem_053/sol1.py b/project_euler/problem_053/sol1.py index 0692bbe0ebb8..a32b73c545d6 100644 --- a/project_euler/problem_053/sol1.py +++ b/project_euler/problem_053/sol1.py @@ -16,6 +16,7 @@ How many, not necessarily distinct, values of nCr, for 1 ≤ n ≤ 100, are greater than one-million? """ + from math import factorial diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 86dfa5edd2f5..66aa3a0826f5 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -40,6 +40,7 @@ https://www.codewars.com/kata/ranking-poker-hands https://www.codewars.com/kata/sortable-poker-hands """ + from __future__ import annotations import os diff --git a/project_euler/problem_058/sol1.py b/project_euler/problem_058/sol1.py index 6a991c58b6b8..1d2f406eafdb 100644 --- a/project_euler/problem_058/sol1.py +++ b/project_euler/problem_058/sol1.py @@ -33,6 +33,7 @@ count of current primes. """ + import math diff --git a/project_euler/problem_059/sol1.py b/project_euler/problem_059/sol1.py index b795dd243b08..65bfd3f0b0fb 100644 --- a/project_euler/problem_059/sol1.py +++ b/project_euler/problem_059/sol1.py @@ -25,6 +25,7 @@ must contain common English words, decrypt the message and find the sum of the ASCII values in the original text. """ + from __future__ import annotations import string diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index 2b41fedc6784..171ff8c268f6 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -11,6 +11,7 @@ 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows. """ + import os diff --git a/project_euler/problem_067/sol2.py b/project_euler/problem_067/sol2.py index 2e88a57170a8..4fb093d49956 100644 --- a/project_euler/problem_067/sol2.py +++ b/project_euler/problem_067/sol2.py @@ -11,6 +11,7 @@ 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows. """ + import os diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index f1114a280a31..9874b7418559 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -28,6 +28,7 @@ Finding totients https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula """ + from __future__ import annotations import numpy as np diff --git a/project_euler/problem_074/sol1.py b/project_euler/problem_074/sol1.py index a257d4d94fa8..91440b3fd02b 100644 --- a/project_euler/problem_074/sol1.py +++ b/project_euler/problem_074/sol1.py @@ -27,7 +27,6 @@ non-repeating terms? """ - DIGIT_FACTORIALS = { "0": 1, "1": 1, diff --git a/project_euler/problem_074/sol2.py b/project_euler/problem_074/sol2.py index b54bc023e387..52a996bfa51d 100644 --- a/project_euler/problem_074/sol2.py +++ b/project_euler/problem_074/sol2.py @@ -33,6 +33,7 @@ is greater then the desired one. After generating each chain, the length is checked and the counter increases. """ + from math import factorial DIGIT_FACTORIAL: dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} diff --git a/project_euler/problem_077/sol1.py b/project_euler/problem_077/sol1.py index 6098ea9e50a6..e8f4e979a625 100644 --- a/project_euler/problem_077/sol1.py +++ b/project_euler/problem_077/sol1.py @@ -12,6 +12,7 @@ What is the first value which can be written as the sum of primes in over five thousand different ways? """ + from __future__ import annotations from functools import lru_cache diff --git a/project_euler/problem_079/sol1.py b/project_euler/problem_079/sol1.py index d34adcd243b0..74392e9bd094 100644 --- a/project_euler/problem_079/sol1.py +++ b/project_euler/problem_079/sol1.py @@ -13,6 +13,7 @@ Given that the three characters are always asked for in order, analyse the file so as to determine the shortest possible secret passcode of unknown length. """ + import itertools from pathlib import Path diff --git a/project_euler/problem_080/sol1.py b/project_euler/problem_080/sol1.py index 916998bdd8ad..8cfcbd41b588 100644 --- a/project_euler/problem_080/sol1.py +++ b/project_euler/problem_080/sol1.py @@ -6,6 +6,7 @@ square roots. Time: 5 October 2020, 18:30 """ + import decimal diff --git a/project_euler/problem_081/sol1.py b/project_euler/problem_081/sol1.py index aef6106b54df..293027bddd0e 100644 --- a/project_euler/problem_081/sol1.py +++ b/project_euler/problem_081/sol1.py @@ -13,6 +13,7 @@ and down in matrix.txt (https://projecteuler.net/project/resources/p081_matrix.txt), a 31K text file containing an 80 by 80 matrix. """ + import os diff --git a/project_euler/problem_085/sol1.py b/project_euler/problem_085/sol1.py index d0f29796498c..d0b361ee750d 100644 --- a/project_euler/problem_085/sol1.py +++ b/project_euler/problem_085/sol1.py @@ -44,6 +44,7 @@ Reference: https://en.wikipedia.org/wiki/Triangular_number https://en.wikipedia.org/wiki/Quadratic_formula """ + from __future__ import annotations from math import ceil, floor, sqrt diff --git a/project_euler/problem_086/sol1.py b/project_euler/problem_086/sol1.py index 064af215c049..cbd2b648e0ac 100644 --- a/project_euler/problem_086/sol1.py +++ b/project_euler/problem_086/sol1.py @@ -66,7 +66,6 @@ """ - from math import sqrt diff --git a/project_euler/problem_091/sol1.py b/project_euler/problem_091/sol1.py index 6c9aa3fa6c70..7db98fca0049 100644 --- a/project_euler/problem_091/sol1.py +++ b/project_euler/problem_091/sol1.py @@ -11,7 +11,6 @@ Given that 0 ≤ x1, y1, x2, y2 ≤ 50, how many right triangles can be formed? """ - from itertools import combinations, product diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py index d5c503af796a..2d209333cf31 100644 --- a/project_euler/problem_101/sol1.py +++ b/project_euler/problem_101/sol1.py @@ -41,6 +41,7 @@ Find the sum of FITs for the BOPs. """ + from __future__ import annotations from collections.abc import Callable diff --git a/project_euler/problem_102/sol1.py b/project_euler/problem_102/sol1.py index 4f6e6361e3e8..85fe5eac1e22 100644 --- a/project_euler/problem_102/sol1.py +++ b/project_euler/problem_102/sol1.py @@ -18,6 +18,7 @@ NOTE: The first two examples in the file represent the triangles in the example given above. """ + from __future__ import annotations from pathlib import Path diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 4659eac24bd3..3fe75909e2ea 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -27,6 +27,7 @@ We use Prim's algorithm to find a Minimum Spanning Tree. Reference: https://en.wikipedia.org/wiki/Prim%27s_algorithm """ + from __future__ import annotations import os diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index f74cdd999401..7239e13a51e9 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -37,6 +37,7 @@ r = 2pn when n is odd r = 2 when n is even. """ + from __future__ import annotations from collections.abc import Generator diff --git a/project_euler/problem_144/sol1.py b/project_euler/problem_144/sol1.py index b5f103b64ff5..bc16bf985f41 100644 --- a/project_euler/problem_144/sol1.py +++ b/project_euler/problem_144/sol1.py @@ -29,7 +29,6 @@ How many times does the beam hit the internal surface of the white cell before exiting? """ - from math import isclose, sqrt diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index 71b851178fdb..ce4438289722 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -13,6 +13,7 @@ How many reversible numbers are there below one-billion (10^9)? """ + EVEN_DIGITS = [0, 2, 4, 6, 8] ODD_DIGITS = [1, 3, 5, 7, 9] diff --git a/project_euler/problem_173/sol1.py b/project_euler/problem_173/sol1.py index 5416e25462cc..9235d00e1752 100644 --- a/project_euler/problem_173/sol1.py +++ b/project_euler/problem_173/sol1.py @@ -11,7 +11,6 @@ Using up to one million tiles how many different square laminae can be formed? """ - from math import ceil, sqrt diff --git a/project_euler/problem_180/sol1.py b/project_euler/problem_180/sol1.py index 12e34dcaa76b..72baed42b99e 100644 --- a/project_euler/problem_180/sol1.py +++ b/project_euler/problem_180/sol1.py @@ -44,6 +44,7 @@ Reference: https://en.wikipedia.org/wiki/Fermat%27s_Last_Theorem """ + from __future__ import annotations from fractions import Fraction diff --git a/project_euler/problem_191/sol1.py b/project_euler/problem_191/sol1.py index 6bff9d54eeca..efb2a5d086ad 100644 --- a/project_euler/problem_191/sol1.py +++ b/project_euler/problem_191/sol1.py @@ -25,7 +25,6 @@ https://projecteuler.net/problem=191 """ - cache: dict[tuple[int, int, int], int] = {} diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index da9436246a7c..8ad089ec09aa 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -27,6 +27,7 @@ References: - https://en.wikipedia.org/wiki/Pascal%27s_triangle """ + from __future__ import annotations diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index 2cd75efbb68d..100e9d41dd31 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -12,7 +12,6 @@ Find a(10^15) """ - ks = range(2, 20 + 1) base = [10**k for k in range(ks[-1] + 1)] memo: dict[int, dict[int, list[list[int]]]] = {} diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index 057bd64cc729..112c2a85220f 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -4,6 +4,7 @@ to mitigate the problem of process starvation. https://en.wikipedia.org/wiki/Highest_response_ratio_next """ + from statistics import mean import numpy as np diff --git a/scheduling/job_sequence_with_deadline.py b/scheduling/job_sequence_with_deadline.py index fccb49cd88e8..ee1fdbd0e55c 100644 --- a/scheduling/job_sequence_with_deadline.py +++ b/scheduling/job_sequence_with_deadline.py @@ -13,6 +13,7 @@ Time Complexity - O(n log n) https://medium.com/@nihardudhat2000/job-sequencing-with-deadline-17ddbb5890b5 """ + from dataclasses import dataclass from operator import attrgetter diff --git a/scheduling/non_preemptive_shortest_job_first.py b/scheduling/non_preemptive_shortest_job_first.py index 69c974b0044d..cb7ffd3abd9c 100644 --- a/scheduling/non_preemptive_shortest_job_first.py +++ b/scheduling/non_preemptive_shortest_job_first.py @@ -5,7 +5,6 @@ https://en.wikipedia.org/wiki/Shortest_job_next """ - from __future__ import annotations from statistics import mean diff --git a/scheduling/round_robin.py b/scheduling/round_robin.py index e8d54dd9a553..5f6c7f341baa 100644 --- a/scheduling/round_robin.py +++ b/scheduling/round_robin.py @@ -3,6 +3,7 @@ In Round Robin each process is assigned a fixed time slot in a cyclic way. https://en.wikipedia.org/wiki/Round-robin_scheduling """ + from __future__ import annotations from statistics import mean diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index 871de8207308..cfd0417ea62d 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -3,6 +3,7 @@ Please note arrival time and burst Please use spaces to separate times entered. """ + from __future__ import annotations import pandas as pd diff --git a/searches/binary_search.py b/searches/binary_search.py index 586be39c9a0d..2e66b672d5b4 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -9,6 +9,7 @@ For manual testing run: python3 binary_search.py """ + from __future__ import annotations import bisect diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 6fb841af4294..4897ef17299c 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -1,6 +1,7 @@ """ This is pure Python implementation of tree traversal algorithms """ + from __future__ import annotations import queue diff --git a/searches/fibonacci_search.py b/searches/fibonacci_search.py index 55fc05d39eeb..ec3dfa7f30f6 100644 --- a/searches/fibonacci_search.py +++ b/searches/fibonacci_search.py @@ -10,6 +10,7 @@ For manual testing run: python3 fibonacci_search.py """ + from functools import lru_cache diff --git a/searches/jump_search.py b/searches/jump_search.py index 3bc3c37809a1..e72d85e8a868 100644 --- a/searches/jump_search.py +++ b/searches/jump_search.py @@ -14,8 +14,7 @@ class Comparable(Protocol): - def __lt__(self, other: Any, /) -> bool: - ... + def __lt__(self, other: Any, /) -> bool: ... T = TypeVar("T", bound=Comparable) diff --git a/searches/quick_select.py b/searches/quick_select.py index 5ede8c4dd07f..c8282e1fa5fc 100644 --- a/searches/quick_select.py +++ b/searches/quick_select.py @@ -4,6 +4,7 @@ sorted, even if it is not already sorted https://en.wikipedia.org/wiki/Quickselect """ + import random diff --git a/searches/simple_binary_search.py b/searches/simple_binary_search.py index ff043d7369af..00e83ff9e4a3 100644 --- a/searches/simple_binary_search.py +++ b/searches/simple_binary_search.py @@ -7,6 +7,7 @@ For manual testing run: python3 simple_binary_search.py """ + from __future__ import annotations diff --git a/searches/tabu_search.py b/searches/tabu_search.py index d998ddc55976..fd482a81224c 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -24,6 +24,7 @@ -s size_of_tabu_search e.g. python tabu_search.py -f tabudata2.txt -i 4 -s 3 """ + import argparse import copy diff --git a/searches/ternary_search.py b/searches/ternary_search.py index cb36e72faac6..8dcd6b5bde2e 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -6,6 +6,7 @@ Time Complexity : O(log3 N) Space Complexity : O(1) """ + from __future__ import annotations # This is the precision for this function which can be altered. diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index b65f877a45e3..600f8139603a 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -3,6 +3,7 @@ Note that this program works only when size of input is a power of 2. """ + from __future__ import annotations diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index c016e9e26e73..1c1320a58a7d 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -27,6 +27,7 @@ Source: https://en.wikipedia.org/wiki/Bucket_sort """ + from __future__ import annotations diff --git a/sorts/dutch_national_flag_sort.py b/sorts/dutch_national_flag_sort.py index 758e3a887b84..b4f1665cea00 100644 --- a/sorts/dutch_national_flag_sort.py +++ b/sorts/dutch_national_flag_sort.py @@ -23,7 +23,6 @@ python dnf_sort.py """ - # Python program to sort a sequence containing only 0, 1 and 2 in a single pass. red = 0 # The first color of the flag. white = 1 # The second color of the flag. diff --git a/sorts/insertion_sort.py b/sorts/insertion_sort.py index f11ddac349a0..46b263d84a33 100644 --- a/sorts/insertion_sort.py +++ b/sorts/insertion_sort.py @@ -18,8 +18,7 @@ class Comparable(Protocol): - def __lt__(self, other: Any, /) -> bool: - ... + def __lt__(self, other: Any, /) -> bool: ... T = TypeVar("T", bound=Comparable) diff --git a/sorts/intro_sort.py b/sorts/intro_sort.py index 5a5741dc8375..1184b381b05d 100644 --- a/sorts/intro_sort.py +++ b/sorts/intro_sort.py @@ -3,6 +3,7 @@ if the size of the list is under 16, use insertion sort https://en.wikipedia.org/wiki/Introsort """ + import math diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 03f84c75b9d8..6aba4263663a 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -4,6 +4,7 @@ them. https://en.wikipedia.org/wiki/Radix_sort """ + from __future__ import annotations diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index b8ab46df1e59..9d2bcdbd7576 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -10,6 +10,7 @@ They are synchronized with locks and message passing but other forms of synchronization could be used. """ + from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time diff --git a/sorts/pigeon_sort.py b/sorts/pigeon_sort.py index 3e6d4c09c46f..fdfa692f4680 100644 --- a/sorts/pigeon_sort.py +++ b/sorts/pigeon_sort.py @@ -1,14 +1,15 @@ """ - This is an implementation of Pigeon Hole Sort. - For doctests run following command: +This is an implementation of Pigeon Hole Sort. +For doctests run following command: - python3 -m doctest -v pigeon_sort.py - or - python -m doctest -v pigeon_sort.py +python3 -m doctest -v pigeon_sort.py +or +python -m doctest -v pigeon_sort.py - For manual testing run: - python pigeon_sort.py +For manual testing run: +python pigeon_sort.py """ + from __future__ import annotations diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index 6b95fc144426..374d52e75c81 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -7,6 +7,7 @@ For manual testing run: python3 quick_sort.py """ + from __future__ import annotations from random import randrange diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index 832b6162f349..1dbf5fbd1365 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -3,6 +3,7 @@ Source: https://en.wikipedia.org/wiki/Radix_sort """ + from __future__ import annotations RADIX = 10 diff --git a/sorts/recursive_insertion_sort.py b/sorts/recursive_insertion_sort.py index 297dbe9457e6..93465350bee2 100644 --- a/sorts/recursive_insertion_sort.py +++ b/sorts/recursive_insertion_sort.py @@ -1,6 +1,7 @@ """ A recursive implementation of the insertion sort algorithm """ + from __future__ import annotations diff --git a/sorts/slowsort.py b/sorts/slowsort.py index a5f4e873ebb2..394e6eed50b1 100644 --- a/sorts/slowsort.py +++ b/sorts/slowsort.py @@ -8,6 +8,7 @@ Source: https://en.wikipedia.org/wiki/Slowsort """ + from __future__ import annotations diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index dc95856f44c8..056864957d4d 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -3,6 +3,7 @@ Build a Binary Search Tree and then iterate thru it to get a sorted list. """ + from __future__ import annotations from collections.abc import Iterator diff --git a/strings/boyer_moore_search.py b/strings/boyer_moore_search.py index 117305d32fd3..9615d2fd659b 100644 --- a/strings/boyer_moore_search.py +++ b/strings/boyer_moore_search.py @@ -17,6 +17,7 @@ n=length of main string m=length of pattern string """ + from __future__ import annotations diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index 9dcdffcfb921..d747368b2373 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -1,6 +1,7 @@ """ wiki: https://en.wikipedia.org/wiki/Anagram """ + from collections import defaultdict diff --git a/strings/top_k_frequent_words.py b/strings/top_k_frequent_words.py index f3d1e0cd5ca7..40fa7fc85cd1 100644 --- a/strings/top_k_frequent_words.py +++ b/strings/top_k_frequent_words.py @@ -13,7 +13,6 @@ def top_k_frequent_words(words, k_value): return [x[0] for x in Counter(words).most_common(k_value)] """ - from collections import Counter from functools import total_ordering diff --git a/web_programming/co2_emission.py b/web_programming/co2_emission.py index 97927e7ef541..88a426cb976d 100644 --- a/web_programming/co2_emission.py +++ b/web_programming/co2_emission.py @@ -1,6 +1,7 @@ """ Get CO2 emission data from the UK CarbonIntensity API """ + from datetime import date import requests diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 074ef878c0d7..6b4bacfe7d5a 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -1,4 +1,5 @@ """Get the site emails from URL.""" + from __future__ import annotations __author__ = "Muhammad Umer Farooq" diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py index aa4e1d7b1963..7a4985b68841 100644 --- a/web_programming/fetch_github_info.py +++ b/web_programming/fetch_github_info.py @@ -17,6 +17,7 @@ #!/usr/bin/env bash export USER_TOKEN="" """ + from __future__ import annotations import os diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index 5af90a0bb239..49abd3c88eec 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -1,6 +1,7 @@ """ Scraping jobs given job title and location from indeed website """ + from __future__ import annotations from collections.abc import Generator diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py index a16175688667..c2f2ac5ab291 100644 --- a/web_programming/get_amazon_product_data.py +++ b/web_programming/get_amazon_product_data.py @@ -4,7 +4,6 @@ information will include title, URL, price, ratings, and the discount available. """ - from itertools import zip_longest import requests diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index 47c6c42f2ad0..b03afb28ec53 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -31,6 +31,7 @@ Below a Django function for the views.py file contains a login form for demonstrating recaptcha verification. """ + import requests try: diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index d5d4cfe92f20..07429e9a9678 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -3,6 +3,7 @@ ISBN: https://en.wikipedia.org/wiki/International_Standard_Book_Number """ + from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests From 435309a61aa70303133306c9fe06a3df118c9a5c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 20:46:32 +0100 Subject: [PATCH 034/260] [pre-commit.ci] pre-commit autoupdate (#11325) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.2 → v0.3.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.2...v0.3.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a17c4c323c30..c4b30f29a5b5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.2 + rev: v0.3.3 hooks: - id: ruff - id: ruff-format From 8faf823e83a1b7a036e2f2569c0c185924c05307 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:33:40 +0200 Subject: [PATCH 035/260] adding a proper fractions algorithm (#11224) * adding a proper fractions algorithm * Implementing suggestions in maths/numerical_analysis/proper_fractions.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Implementing suggestions to proper_fractions.py * Fixing ruff errors in proper_fractions.py * Apply suggestions from code review * ruff check --output-format=github . * Update maths/numerical_analysis/proper_fractions.py * Update proper_fractions.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/ruff.yml | 2 +- maths/numerical_analysis/proper_fractions.py | 40 ++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 maths/numerical_analysis/proper_fractions.py diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 9ebabed3600a..d354eba672ae 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -13,4 +13,4 @@ jobs: steps: - uses: actions/checkout@v4 - run: pip install --user ruff - - run: ruff --output-format=github . + - run: ruff check --output-format=github . diff --git a/maths/numerical_analysis/proper_fractions.py b/maths/numerical_analysis/proper_fractions.py new file mode 100644 index 000000000000..774ce9a24876 --- /dev/null +++ b/maths/numerical_analysis/proper_fractions.py @@ -0,0 +1,40 @@ +from math import gcd + + +def proper_fractions(denominator: int) -> list[str]: + """ + this algorithm returns a list of proper fractions, in the + range between 0 and 1, which can be formed with the given denominator + https://en.wikipedia.org/wiki/Fraction#Proper_and_improper_fractions + + >>> proper_fractions(10) + ['1/10', '3/10', '7/10', '9/10'] + >>> proper_fractions(5) + ['1/5', '2/5', '3/5', '4/5'] + >>> proper_fractions(-15) + Traceback (most recent call last): + ... + ValueError: The Denominator Cannot be less than 0 + >>> proper_fractions(0) + [] + >>> proper_fractions(1.2) + Traceback (most recent call last): + ... + ValueError: The Denominator must be an integer + """ + + if denominator < 0: + raise ValueError("The Denominator Cannot be less than 0") + elif isinstance(denominator, float): + raise ValueError("The Denominator must be an integer") + return [ + f"{numerator}/{denominator}" + for numerator in range(1, denominator) + if gcd(numerator, denominator) == 1 + ] + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From a936e94704b09841784358a4ac002401f3faceed Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 20 Mar 2024 17:00:17 +0300 Subject: [PATCH 036/260] Enable ruff ARG001 rule (#11321) * Enable ruff ARG001 rule * Fix dynamic_programming/combination_sum_iv.py * Fix machine_learning/frequent_pattern_growth.py * Fix other/davis_putnam_logemann_loveland.py * Fix other/password.py * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix physics/n_body_simulation.py * Fix project_euler/problem_145/sol1.py * Fix project_euler/problem_174/sol1.py * Fix scheduling/highest_response_ratio_next.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * Fix scheduling/job_sequencing_with_deadline.py * Fix scheduling/job_sequencing_with_deadline.py * Fix * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/combination_sum_iv.py | 11 +++++------ machine_learning/frequent_pattern_growth.py | 4 ++-- other/davis_putnam_logemann_loveland.py | 3 ++- other/password.py | 12 ------------ physics/n_body_simulation.py | 2 +- project_euler/problem_145/sol1.py | 2 +- project_euler/problem_174/sol1.py | 4 +++- pyproject.toml | 1 - scheduling/highest_response_ratio_next.py | 5 ++++- scheduling/job_sequencing_with_deadline.py | 7 +++---- web_programming/nasa_data.py | 2 +- 11 files changed, 22 insertions(+), 31 deletions(-) diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py index b2aeb0824f64..4526729b70b7 100644 --- a/dynamic_programming/combination_sum_iv.py +++ b/dynamic_programming/combination_sum_iv.py @@ -22,12 +22,12 @@ """ -def combination_sum_iv(n: int, array: list[int], target: int) -> int: +def combination_sum_iv(array: list[int], target: int) -> int: """ Function checks the all possible combinations, and returns the count of possible combination in exponential Time Complexity. - >>> combination_sum_iv(3, [1,2,5], 5) + >>> combination_sum_iv([1,2,5], 5) 9 """ @@ -41,13 +41,13 @@ def count_of_possible_combinations(target: int) -> int: return count_of_possible_combinations(target) -def combination_sum_iv_dp_array(n: int, array: list[int], target: int) -> int: +def combination_sum_iv_dp_array(array: list[int], target: int) -> int: """ Function checks the all possible combinations, and returns the count of possible combination in O(N^2) Time Complexity as we are using Dynamic programming array here. - >>> combination_sum_iv_dp_array(3, [1,2,5], 5) + >>> combination_sum_iv_dp_array([1,2,5], 5) 9 """ @@ -96,7 +96,6 @@ def combination_sum_iv_bottom_up(n: int, array: list[int], target: int) -> int: import doctest doctest.testmod() - n = 3 target = 5 array = [1, 2, 5] - print(combination_sum_iv(n, array, target)) + print(combination_sum_iv(array, target)) diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py index 6b9870f5e1d2..947f8692f298 100644 --- a/machine_learning/frequent_pattern_growth.py +++ b/machine_learning/frequent_pattern_growth.py @@ -240,7 +240,7 @@ def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None: ascend_tree(leaf_node.parent, prefix_path) -def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: +def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001 """ Find the conditional pattern base for a given base pattern. @@ -277,7 +277,7 @@ def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: def mine_tree( - in_tree: TreeNode, + in_tree: TreeNode, # noqa: ARG001 header_table: dict, min_sup: int, pre_fix: set, diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 436577eb5b5d..5c6e2d9ffd5e 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -227,7 +227,8 @@ def find_pure_symbols( def find_unit_clauses( - clauses: list[Clause], model: dict[str, bool | None] + clauses: list[Clause], + model: dict[str, bool | None], # noqa: ARG001 ) -> tuple[list[str], dict[str, bool | None]]: """ Returns the unit symbols and their values to satisfy clause. diff --git a/other/password.py b/other/password.py index 1ce0d52316e6..dff1316c049c 100644 --- a/other/password.py +++ b/other/password.py @@ -51,18 +51,6 @@ def random(chars_incl: str, i: int) -> str: return "".join(secrets.choice(chars_incl) for _ in range(i)) -def random_number(chars_incl, i): - pass # Put your code here... - - -def random_letters(chars_incl, i): - pass # Put your code here... - - -def random_characters(chars_incl, i): - pass # Put your code here... - - def is_strong_password(password: str, min_length: int = 8) -> bool: """ This will check whether a given password is strong or not. The password must be at diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 4d555716199a..9bfb6b3c6864 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -239,7 +239,7 @@ def plot( ax.add_patch(patch) # Function called at each step of the animation - def update(frame: int) -> list[plt.Circle]: + def update(frame: int) -> list[plt.Circle]: # noqa: ARG001 update_step(body_system, DELTA_TIME, patches) return patches diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index ce4438289722..583bb03a0a90 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -110,7 +110,7 @@ def reversible_numbers( if (length - 1) % 4 == 0: return 0 - return slow_reversible_numbers(length, 0, [0] * length, length) + return slow_reversible_numbers(remaining_length, remainder, digits, length) def solution(max_power: int = 9) -> int: diff --git a/project_euler/problem_174/sol1.py b/project_euler/problem_174/sol1.py index cbc0df5a9d65..33c1b158adbb 100644 --- a/project_euler/problem_174/sol1.py +++ b/project_euler/problem_174/sol1.py @@ -26,6 +26,8 @@ def solution(t_limit: int = 1000000, n_limit: int = 10) -> int: Return the sum of N(n) for 1 <= n <= n_limit. >>> solution(1000,5) + 222 + >>> solution(1000,10) 249 >>> solution(10000,10) 2383 @@ -45,7 +47,7 @@ def solution(t_limit: int = 1000000, n_limit: int = 10) -> int: for hole_width in range(hole_width_lower_bound, outer_width - 1, 2): count[outer_width * outer_width - hole_width * hole_width] += 1 - return sum(1 for n in count.values() if 1 <= n <= 10) + return sum(1 for n in count.values() if 1 <= n <= n_limit) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 2e7da519da8b..a69ab7aa6437 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,5 @@ [tool.ruff] lint.ignore = [ # `ruff rule S101` for a description of that rule - "ARG001", # Unused function argument `amount` -- FIX ME? "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME "DTZ001", # The use of `datetime.datetime()` without `tzinfo` argument is not allowed -- FIX ME diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index 112c2a85220f..b549835616bf 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -75,7 +75,10 @@ def calculate_turn_around_time( def calculate_waiting_time( - process_name: list, turn_around_time: list, burst_time: list, no_of_process: int + process_name: list, # noqa: ARG001 + turn_around_time: list, + burst_time: list, + no_of_process: int, ) -> list: """ Calculate the waiting time of each processes. diff --git a/scheduling/job_sequencing_with_deadline.py b/scheduling/job_sequencing_with_deadline.py index 7b23c0b3575f..13946948492f 100644 --- a/scheduling/job_sequencing_with_deadline.py +++ b/scheduling/job_sequencing_with_deadline.py @@ -1,9 +1,8 @@ -def job_sequencing_with_deadlines(num_jobs: int, jobs: list) -> list: +def job_sequencing_with_deadlines(jobs: list) -> list: """ Function to find the maximum profit by doing jobs in a given time frame Args: - num_jobs [int]: Number of jobs jobs [list]: A list of tuples of (job_id, deadline, profit) Returns: @@ -11,10 +10,10 @@ def job_sequencing_with_deadlines(num_jobs: int, jobs: list) -> list: in a given time frame Examples: - >>> job_sequencing_with_deadlines(4, + >>> job_sequencing_with_deadlines( ... [(1, 4, 20), (2, 1, 10), (3, 1, 40), (4, 1, 30)]) [2, 60] - >>> job_sequencing_with_deadlines(5, + >>> job_sequencing_with_deadlines( ... [(1, 2, 100), (2, 1, 19), (3, 2, 27), (4, 1, 25), (5, 1, 15)]) [2, 127] """ diff --git a/web_programming/nasa_data.py b/web_programming/nasa_data.py index c0a2c4fdd1a7..81125e0a4f05 100644 --- a/web_programming/nasa_data.py +++ b/web_programming/nasa_data.py @@ -3,7 +3,7 @@ import requests -def get_apod_data(api_key: str, download: bool = False, path: str = ".") -> dict: +def get_apod_data(api_key: str) -> dict: """ Get the APOD(Astronomical Picture of the day) data Get your API Key from: https://api.nasa.gov/ From 481c071e8423ed3b17ddff96b905da3d27d4f7b4 Mon Sep 17 00:00:00 2001 From: Mehdi Oudghiri <144174136+PAxitoo@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:07:55 +0100 Subject: [PATCH 037/260] add vicsek to fractals (#11306) Co-authored-by: BastosLaG --- fractals/vicsek.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 fractals/vicsek.py diff --git a/fractals/vicsek.py b/fractals/vicsek.py new file mode 100644 index 000000000000..290fe95b79b4 --- /dev/null +++ b/fractals/vicsek.py @@ -0,0 +1,76 @@ +"""Authors Bastien Capiaux & Mehdi Oudghiri + +The Vicsek fractal algorithm is a recursive algorithm that creates a +pattern known as the Vicsek fractal or the Vicsek square. +It is based on the concept of self-similarity, where the pattern at each +level of recursion resembles the overall pattern. +The algorithm involves dividing a square into 9 equal smaller squares, +removing the center square, and then repeating this process on the remaining 8 squares. +This results in a pattern that exhibits self-similarity and has a +square-shaped outline with smaller squares within it. + +Source: https://en.wikipedia.org/wiki/Vicsek_fractal +""" + +import turtle + + +def draw_cross(x: float, y: float, length: float): + """ + Draw a cross at the specified position and with the specified length. + """ + turtle.up() + turtle.goto(x - length / 2, y - length / 6) + turtle.down() + turtle.seth(0) + turtle.begin_fill() + for _ in range(4): + turtle.fd(length / 3) + turtle.right(90) + turtle.fd(length / 3) + turtle.left(90) + turtle.fd(length / 3) + turtle.left(90) + turtle.end_fill() + + +def draw_fractal_recursive(x: float, y: float, length: float, depth: float): + """ + Recursively draw the Vicsek fractal at the specified position, with the + specified length and depth. + """ + if depth == 0: + draw_cross(x, y, length) + return + + draw_fractal_recursive(x, y, length / 3, depth - 1) + draw_fractal_recursive(x + length / 3, y, length / 3, depth - 1) + draw_fractal_recursive(x - length / 3, y, length / 3, depth - 1) + draw_fractal_recursive(x, y + length / 3, length / 3, depth - 1) + draw_fractal_recursive(x, y - length / 3, length / 3, depth - 1) + + +def set_color(rgb: str): + turtle.color(rgb) + + +def draw_vicsek_fractal(x: float, y: float, length: float, depth: float, color="blue"): + """ + Draw the Vicsek fractal at the specified position, with the specified + length and depth. + """ + turtle.speed(0) + turtle.hideturtle() + set_color(color) + draw_fractal_recursive(x, y, length, depth) + turtle.Screen().update() + + +def main(): + draw_vicsek_fractal(0, 0, 800, 4) + + turtle.done() + + +if __name__ == "__main__": + main() From 102e9a31b673e5444678fd55640a0038b6a16a9d Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 25 Mar 2024 10:43:24 +0300 Subject: [PATCH 038/260] Enable ruff DTZ001 rule (#11326) * updating DIRECTORY.md * Enable ruff DTZ001 rule * Fix other/gauss_easter.py * Fix * Fix * Fix * Fix * Fix * Fix --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 2 ++ other/gauss_easter.py | 16 ++++++++-------- pyproject.toml | 1 - 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2f828aa512a9..01667c9feee8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -419,6 +419,7 @@ * [Koch Snowflake](fractals/koch_snowflake.py) * [Mandelbrot](fractals/mandelbrot.py) * [Sierpinski Triangle](fractals/sierpinski_triangle.py) + * [Vicsek](fractals/vicsek.py) ## Fuzzy Logic * [Fuzzy Operations](fuzzy_logic/fuzzy_operations.py) @@ -678,6 +679,7 @@ * [Newton Forward Interpolation](maths/numerical_analysis/newton_forward_interpolation.py) * [Newton Raphson](maths/numerical_analysis/newton_raphson.py) * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) + * [Proper Fractions](maths/numerical_analysis/proper_fractions.py) * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) * [Runge Kutta Gills](maths/numerical_analysis/runge_kutta_gills.py) diff --git a/other/gauss_easter.py b/other/gauss_easter.py index d1c525593f79..7ccea7f5bbf0 100644 --- a/other/gauss_easter.py +++ b/other/gauss_easter.py @@ -3,7 +3,7 @@ """ import math -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta def gauss_easter(year: int) -> datetime: @@ -11,16 +11,16 @@ def gauss_easter(year: int) -> datetime: Calculation Gregorian easter date for given year >>> gauss_easter(2007) - datetime.datetime(2007, 4, 8, 0, 0) + datetime.datetime(2007, 4, 8, 0, 0, tzinfo=datetime.timezone.utc) >>> gauss_easter(2008) - datetime.datetime(2008, 3, 23, 0, 0) + datetime.datetime(2008, 3, 23, 0, 0, tzinfo=datetime.timezone.utc) >>> gauss_easter(2020) - datetime.datetime(2020, 4, 12, 0, 0) + datetime.datetime(2020, 4, 12, 0, 0, tzinfo=datetime.timezone.utc) >>> gauss_easter(2021) - datetime.datetime(2021, 4, 4, 0, 0) + datetime.datetime(2021, 4, 4, 0, 0, tzinfo=datetime.timezone.utc) """ metonic_cycle = year % 19 julian_leap_year = year % 4 @@ -45,11 +45,11 @@ def gauss_easter(year: int) -> datetime: ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: - return datetime(year, 4, 19) + return datetime(year, 4, 19, tzinfo=UTC) elif days_to_add == 28 and days_from_phm_to_sunday == 6: - return datetime(year, 4, 18) + return datetime(year, 4, 18, tzinfo=UTC) else: - return datetime(year, 3, 22) + timedelta( + return datetime(year, 3, 22, tzinfo=UTC) + timedelta( days=int(days_to_add + days_from_phm_to_sunday) ) diff --git a/pyproject.toml b/pyproject.toml index a69ab7aa6437..09093433a47a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "DTZ001", # The use of `datetime.datetime()` without `tzinfo` argument is not allowed -- FIX ME "DTZ005", # The use of `datetime.datetime.now()` without `tzinfo` argument is not allowed -- FIX ME "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first From ead54314f26615769ce8b055b25e25f9dbbb1f83 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 20:21:21 +0100 Subject: [PATCH 039/260] [pre-commit.ci] pre-commit autoupdate (#11328) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.3 → v0.3.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.3...v0.3.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c4b30f29a5b5..8b101207d5ff 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.3 + rev: v0.3.4 hooks: - id: ruff - id: ruff-format From b5cb1fba0debb5df7e5aea6bb069c6e3f130dba5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 25 Mar 2024 23:54:11 +0300 Subject: [PATCH 040/260] Enable ruff DTZ005 rule (#11327) * Enable ruff DTZ005 rule * Fix other/gauss_easter.py * Fix * Fix web_programming/instagram_pic.py * Fix web_programming/instagram_video.py * Apply suggestions from code review * Update instagram_pic.py * datetime.now(tz=UTC).astimezone() * .astimezone() * Fix --------- Co-authored-by: Christian Clauss --- other/gauss_easter.py | 4 ++-- pyproject.toml | 1 - web_programming/instagram_pic.py | 4 ++-- web_programming/instagram_video.py | 4 ++-- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/other/gauss_easter.py b/other/gauss_easter.py index 7ccea7f5bbf0..8c8c37c92796 100644 --- a/other/gauss_easter.py +++ b/other/gauss_easter.py @@ -55,6 +55,6 @@ def gauss_easter(year: int) -> datetime: if __name__ == "__main__": - for year in (1994, 2000, 2010, 2021, 2023): - tense = "will be" if year > datetime.now().year else "was" + for year in (1994, 2000, 2010, 2021, 2023, 2032, 2100): + tense = "will be" if year > datetime.now(tz=UTC).year else "was" print(f"Easter in {year} {tense} {gauss_easter(year)}") diff --git a/pyproject.toml b/pyproject.toml index 09093433a47a..5187491e5ee7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "DTZ005", # The use of `datetime.datetime.now()` without `tzinfo` argument is not allowed -- FIX ME "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py index 2630c8659232..2d987c1766dc 100644 --- a/web_programming/instagram_pic.py +++ b/web_programming/instagram_pic.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import UTC, datetime import requests from bs4 import BeautifulSoup @@ -36,7 +36,7 @@ def download_image(url: str) -> str: if not image_data: return f"Failed to download the image from {image_url}." - file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg" + file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.jpg" with open(file_name, "wb") as out_file: out_file.write(image_data) return f"Image downloaded and saved in the file {file_name}" diff --git a/web_programming/instagram_video.py b/web_programming/instagram_video.py index 243cece1a50e..1f1b0e297034 100644 --- a/web_programming/instagram_video.py +++ b/web_programming/instagram_video.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import UTC, datetime import requests @@ -11,7 +11,7 @@ def download_video(url: str) -> bytes: if __name__ == "__main__": url = input("Enter Video/IGTV url: ").strip() - file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" + file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.mp4" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f"Done. Video saved to disk as {file_name}.") From 19fd435042a3191f6a5787a6eaf58e9c47920845 Mon Sep 17 00:00:00 2001 From: MrBubb1es <63935943+MrBubb1es@users.noreply.github.com> Date: Thu, 28 Mar 2024 12:19:51 -0500 Subject: [PATCH 041/260] Improved doctests for some functions (#11334) --- .../binary_tree/binary_tree_traversals.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 2b33cdca4fed..49c208335b2c 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -97,6 +97,8 @@ def level_order(root: Node | None) -> Generator[int, None, None]: """ Returns a list of nodes value from a whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. + >>> list(level_order(make_tree())) + [1, 2, 3, 4, 5] """ if root is None: @@ -120,6 +122,10 @@ def get_nodes_from_left_to_right( """ Returns a list of nodes value from a particular level: Left to right direction of the binary tree. + >>> list(get_nodes_from_left_to_right(make_tree(), 1)) + [1] + >>> list(get_nodes_from_left_to_right(make_tree(), 2)) + [2, 3] """ def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: @@ -140,10 +146,14 @@ def get_nodes_from_right_to_left( """ Returns a list of nodes value from a particular level: Right to left direction of the binary tree. + >>> list(get_nodes_from_right_to_left(make_tree(), 1)) + [1] + >>> list(get_nodes_from_right_to_left(make_tree(), 2)) + [3, 2] """ def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: - if root is None: + if not root: return if level == 1: yield root.data @@ -158,6 +168,8 @@ def zigzag(root: Node | None) -> Generator[int, None, None]: """ ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively. + >>> list(zigzag(make_tree())) + [1, 3, 2, 4, 5] """ if root is None: return From 516a3028d1f6b6e7e11ae4501fdaee50a0965464 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:25:41 +0300 Subject: [PATCH 042/260] Enable ruff PLR5501 rule (#11332) * Enable ruff PLR5501 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/crossword_puzzle_solver.py | 5 +- cellular_automata/game_of_life.py | 5 +- ciphers/decrypt_caesar_with_chi_squared.py | 21 +++--- data_structures/binary_tree/avl_tree.py | 10 +-- .../binary_tree/binary_search_tree.py | 9 ++- .../binary_search_tree_recursive.py | 22 +++---- data_structures/binary_tree/red_black_tree.py | 66 +++++++++---------- data_structures/binary_tree/treap.py | 29 ++++---- data_structures/heap/max_heap.py | 7 +- .../stacks/infix_to_prefix_conversion.py | 13 ++-- data_structures/trie/radix_tree.py | 45 +++++++------ divide_and_conquer/convex_hull.py | 15 ++--- graphs/graph_list.py | 46 ++++++------- graphs/minimum_spanning_tree_prims.py | 7 +- graphs/multi_heuristic_astar.py | 33 +++++----- machine_learning/forecasting/run.py | 7 +- maths/largest_of_very_large_numbers.py | 9 ++- maths/pollard_rho.py | 13 ++-- matrix/cramers_rule_2x2.py | 15 ++--- project_euler/problem_019/sol1.py | 7 +- pyproject.toml | 1 - searches/hill_climbing.py | 7 +- searches/interpolation_search.py | 35 +++++----- strings/min_cost_string_conversion.py | 23 ++++--- 24 files changed, 211 insertions(+), 239 deletions(-) diff --git a/backtracking/crossword_puzzle_solver.py b/backtracking/crossword_puzzle_solver.py index b9c01c4efea9..e702c7e52153 100644 --- a/backtracking/crossword_puzzle_solver.py +++ b/backtracking/crossword_puzzle_solver.py @@ -28,9 +28,8 @@ def is_valid( if vertical: if row + i >= len(puzzle) or puzzle[row + i][col] != "": return False - else: - if col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": - return False + elif col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": + return False return True diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 67e647d6475b..76276b272d65 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -101,9 +101,8 @@ def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool: state = True elif alive > 3: state = False - else: - if alive == 3: - state = True + elif alive == 3: + state = True return state diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 6c36860207cd..10832203e531 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -206,20 +206,19 @@ def decrypt_caesar_with_chi_squared( # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value - else: - if letter.lower() in frequencies: - # Get the amount of times the letter occurs in the message - occurrences = decrypted_with_shift.count(letter) + elif letter.lower() in frequencies: + # Get the amount of times the letter occurs in the message + occurrences = decrypted_with_shift.count(letter) - # Get the excepcted amount of times the letter should appear based - # on letter frequencies - expected = frequencies[letter] * occurrences + # Get the excepcted amount of times the letter should appear based + # on letter frequencies + expected = frequencies[letter] * occurrences - # Complete the chi squared statistic formula - chi_letter_value = ((occurrences - expected) ** 2) / expected + # Complete the chi squared statistic formula + chi_letter_value = ((occurrences - expected) ** 2) / expected - # Add the margin of error to the total chi squared statistic - chi_squared_statistic += chi_letter_value + # Add the margin of error to the total chi squared statistic + chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary chi_squared_statistic_values[shift] = ( diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 041ed7e36d16..9fca7237404c 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -215,11 +215,11 @@ def del_node(root: MyNode, data: Any) -> MyNode | None: return root else: root.set_left(del_node(left_child, data)) - else: # root.get_data() < data - if right_child is None: - return root - else: - root.set_right(del_node(right_child, data)) + # root.get_data() < data + elif right_child is None: + return root + else: + root.set_right(del_node(right_child, data)) if get_height(right_child) - get_height(left_child) == 2: assert right_child is not None diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 08a60a12065d..090e3e25fe6d 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -185,12 +185,11 @@ def __insert(self, value) -> None: break else: parent_node = parent_node.left + elif parent_node.right is None: + parent_node.right = new_node + break else: - if parent_node.right is None: - parent_node.right = new_node - break - else: - parent_node = parent_node.right + parent_node = parent_node.right new_node.parent = parent_node def insert(self, *values) -> Self: diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 6af1b053f42c..d94ac5253360 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -74,14 +74,13 @@ def put(self, label: int) -> None: def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Node: if node is None: node = Node(label, parent) + elif label < node.label: + node.left = self._put(node.left, label, node) + elif label > node.label: + node.right = self._put(node.right, label, node) else: - if label < node.label: - node.left = self._put(node.left, label, node) - elif label > node.label: - node.right = self._put(node.right, label, node) - else: - msg = f"Node with label {label} already exists" - raise ValueError(msg) + msg = f"Node with label {label} already exists" + raise ValueError(msg) return node @@ -106,11 +105,10 @@ def _search(self, node: Node | None, label: int) -> Node: if node is None: msg = f"Node with label {label} does not exist" raise ValueError(msg) - else: - if label < node.label: - node = self._search(node.left, label) - elif label > node.label: - node = self._search(node.right, label) + elif label < node.label: + node = self._search(node.left, label) + elif label > node.label: + node = self._search(node.right, label) return node diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 3b5845cd957b..bdd808c828e0 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -107,12 +107,11 @@ def insert(self, label: int) -> RedBlackTree: else: self.left = RedBlackTree(label, 1, self) self.left._insert_repair() + elif self.right: + self.right.insert(label) else: - if self.right: - self.right.insert(label) - else: - self.right = RedBlackTree(label, 1, self) - self.right._insert_repair() + self.right = RedBlackTree(label, 1, self) + self.right._insert_repair() return self.parent or self def _insert_repair(self) -> None: @@ -178,36 +177,34 @@ def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 self.parent.left = None else: self.parent.right = None - else: - # The node is black - if child is None: - # This node and its child are black - if self.parent is None: - # The tree is now empty - return RedBlackTree(None) - else: - self._remove_repair() - if self.is_left(): - self.parent.left = None - else: - self.parent.right = None - self.parent = None + # The node is black + elif child is None: + # This node and its child are black + if self.parent is None: + # The tree is now empty + return RedBlackTree(None) else: - # This node is black and its child is red - # Move the child node here and make it black - self.label = child.label - self.left = child.left - self.right = child.right - if self.left: - self.left.parent = self - if self.right: - self.right.parent = self + self._remove_repair() + if self.is_left(): + self.parent.left = None + else: + self.parent.right = None + self.parent = None + else: + # This node is black and its child is red + # Move the child node here and make it black + self.label = child.label + self.left = child.left + self.right = child.right + if self.left: + self.left.parent = self + if self.right: + self.right.parent = self elif self.label is not None and self.label > label: if self.left: self.left.remove(label) - else: - if self.right: - self.right.remove(label) + elif self.right: + self.right.remove(label) return self.parent or self def _remove_repair(self) -> None: @@ -369,11 +366,10 @@ def search(self, label: int) -> RedBlackTree | None: return None else: return self.right.search(label) + elif self.left is None: + return None else: - if self.left is None: - return None - else: - return self.left.search(label) + return self.left.search(label) def floor(self, label: int) -> int | None: """Returns the largest element in this tree which is at most label. diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index a53ac566ed54..e7ddf931b83a 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -43,22 +43,21 @@ def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]: return None, None elif root.value is None: return None, None + elif value < root.value: + """ + Right tree's root will be current node. + Now we split(with the same value) current node's left son + Left tree: left part of that split + Right tree's left son: right part of that split + """ + left, root.left = split(root.left, value) + return left, root else: - if value < root.value: - """ - Right tree's root will be current node. - Now we split(with the same value) current node's left son - Left tree: left part of that split - Right tree's left son: right part of that split - """ - left, root.left = split(root.left, value) - return left, root - else: - """ - Just symmetric to previous case - """ - root.right, right = split(root.right, value) - return root, right + """ + Just symmetric to previous case + """ + root.right, right = split(root.right, value) + return root, right def merge(left: Node | None, right: Node | None) -> Node | None: diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py index fbc8eed09226..5a9f9cf88433 100644 --- a/data_structures/heap/max_heap.py +++ b/data_structures/heap/max_heap.py @@ -40,11 +40,10 @@ def __swap_down(self, i: int) -> None: while self.__size >= 2 * i: if 2 * i + 1 > self.__size: bigger_child = 2 * i + elif self.__heap[2 * i] > self.__heap[2 * i + 1]: + bigger_child = 2 * i else: - if self.__heap[2 * i] > self.__heap[2 * i + 1]: - bigger_child = 2 * i - else: - bigger_child = 2 * i + 1 + bigger_child = 2 * i + 1 temporary = self.__heap[i] if self.__heap[i] < self.__heap[bigger_child]: self.__heap[i] = self.__heap[bigger_child] diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index beff421c0cfa..878473b93c19 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -95,13 +95,12 @@ def infix_2_postfix(infix: str) -> str: while stack[-1] != "(": post_fix.append(stack.pop()) # Pop stack & add the content to Postfix stack.pop() - else: - if len(stack) == 0: - stack.append(x) # If stack is empty, push x to stack - else: # while priority of x is not > priority of element in the stack - while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: - post_fix.append(stack.pop()) # pop stack & add to Postfix - stack.append(x) # push x to stack + elif len(stack) == 0: + stack.append(x) # If stack is empty, push x to stack + else: # while priority of x is not > priority of element in the stack + while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: + post_fix.append(stack.pop()) # pop stack & add to Postfix + stack.append(x) # push x to stack print( x.center(8), diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index fadc50cb49a7..caf566a6ce30 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -153,31 +153,30 @@ def delete(self, word: str) -> bool: # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(remaining_word) + # If it is not a leaf, we don't have to delete + elif not incoming_node.is_leaf: + return False else: - # If it is not a leaf, we don't have to delete - if not incoming_node.is_leaf: - return False + # We delete the nodes if no edges go from it + if len(incoming_node.nodes) == 0: + del self.nodes[word[0]] + # We merge the current node with its only child + if len(self.nodes) == 1 and not self.is_leaf: + merging_node = next(iter(self.nodes.values())) + self.is_leaf = merging_node.is_leaf + self.prefix += merging_node.prefix + self.nodes = merging_node.nodes + # If there is more than 1 edge, we just mark it as non-leaf + elif len(incoming_node.nodes) > 1: + incoming_node.is_leaf = False + # If there is 1 edge, we merge it with its child else: - # We delete the nodes if no edges go from it - if len(incoming_node.nodes) == 0: - del self.nodes[word[0]] - # We merge the current node with its only child - if len(self.nodes) == 1 and not self.is_leaf: - merging_node = next(iter(self.nodes.values())) - self.is_leaf = merging_node.is_leaf - self.prefix += merging_node.prefix - self.nodes = merging_node.nodes - # If there is more than 1 edge, we just mark it as non-leaf - elif len(incoming_node.nodes) > 1: - incoming_node.is_leaf = False - # If there is 1 edge, we merge it with its child - else: - merging_node = next(iter(incoming_node.nodes.values())) - incoming_node.is_leaf = merging_node.is_leaf - incoming_node.prefix += merging_node.prefix - incoming_node.nodes = merging_node.nodes - - return True + merging_node = next(iter(incoming_node.nodes.values())) + incoming_node.is_leaf = merging_node.is_leaf + incoming_node.prefix += merging_node.prefix + incoming_node.nodes = merging_node.nodes + + return True def print_tree(self, height: int = 0) -> None: """Print the tree diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index a5d8b713bdbc..93f6daf1f88c 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -274,14 +274,13 @@ def convex_hull_bf(points: list[Point]) -> list[Point]: points_left_of_ij = True elif det_k < 0: points_right_of_ij = True - else: - # point[i], point[j], point[k] all lie on a straight line - # if point[k] is to the left of point[i] or it's to the - # right of point[j], then point[i], point[j] cannot be - # part of the convex hull of A - if points[k] < points[i] or points[k] > points[j]: - ij_part_of_convex_hull = False - break + # point[i], point[j], point[k] all lie on a straight line + # if point[k] is to the left of point[i] or it's to the + # right of point[j], then point[i], point[j] cannot be + # part of the convex hull of A + elif points[k] < points[i] or points[k] > points[j]: + ij_part_of_convex_hull = False + break if points_left_of_ij and points_right_of_ij: ij_part_of_convex_hull = False diff --git a/graphs/graph_list.py b/graphs/graph_list.py index e871f3b8a9d6..6563cbb76132 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -120,29 +120,29 @@ def add_edge( else: self.adj_list[source_vertex] = [destination_vertex] self.adj_list[destination_vertex] = [source_vertex] - else: # For directed graphs - # if both source vertex and destination vertex are present in adjacency - # list, add destination vertex to source vertex list of adjacent vertices. - if source_vertex in self.adj_list and destination_vertex in self.adj_list: - self.adj_list[source_vertex].append(destination_vertex) - # if only source vertex is present in adjacency list, add destination - # vertex to source vertex list of adjacent vertices and create a new vertex - # with destination vertex as key, which has no adjacent vertex - elif source_vertex in self.adj_list: - self.adj_list[source_vertex].append(destination_vertex) - self.adj_list[destination_vertex] = [] - # if only destination vertex is present in adjacency list, create a new - # vertex with source vertex as key and assign a list containing destination - # vertex as first adjacent vertex - elif destination_vertex in self.adj_list: - self.adj_list[source_vertex] = [destination_vertex] - # if both source vertex and destination vertex are not present in adjacency - # list, create a new vertex with source vertex as key and a list containing - # destination vertex as it's first adjacent vertex. Then create a new vertex - # with destination vertex as key, which has no adjacent vertex - else: - self.adj_list[source_vertex] = [destination_vertex] - self.adj_list[destination_vertex] = [] + # For directed graphs + # if both source vertex and destination vertex are present in adjacency + # list, add destination vertex to source vertex list of adjacent vertices. + elif source_vertex in self.adj_list and destination_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + # if only source vertex is present in adjacency list, add destination + # vertex to source vertex list of adjacent vertices and create a new vertex + # with destination vertex as key, which has no adjacent vertex + elif source_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + self.adj_list[destination_vertex] = [] + # if only destination vertex is present in adjacency list, create a new + # vertex with source vertex as key and assign a list containing destination + # vertex as first adjacent vertex + elif destination_vertex in self.adj_list: + self.adj_list[source_vertex] = [destination_vertex] + # if both source vertex and destination vertex are not present in adjacency + # list, create a new vertex with source vertex as key and a list containing + # destination vertex as it's first adjacent vertex. Then create a new vertex + # with destination vertex as key, which has no adjacent vertex + else: + self.adj_list[source_vertex] = [destination_vertex] + self.adj_list[destination_vertex] = [] return self diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 5a08ec57ff4d..90c9f4c91e86 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -18,11 +18,10 @@ def top_to_bottom(self, heap, start, size, positions): else: if 2 * start + 2 >= size: smallest_child = 2 * start + 1 + elif heap[2 * start + 1] < heap[2 * start + 2]: + smallest_child = 2 * start + 1 else: - if heap[2 * start + 1] < heap[2 * start + 2]: - smallest_child = 2 * start + 1 - else: - smallest_child = 2 * start + 2 + smallest_child = 2 * start + 2 if heap[smallest_child] < heap[start]: temp, temp1 = heap[smallest_child], positions[smallest_child] heap[smallest_child], positions[smallest_child] = ( diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 0a18ede6ed41..6af9a187a4e9 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -270,24 +270,23 @@ def multi_a_star(start: TPos, goal: TPos, n_heuristic: int): back_pointer, ) close_list_inad.append(get_s) + elif g_function[goal] <= open_list[0].minkey(): + if g_function[goal] < float("inf"): + do_something(back_pointer, goal, start) else: - if g_function[goal] <= open_list[0].minkey(): - if g_function[goal] < float("inf"): - do_something(back_pointer, goal, start) - else: - get_s = open_list[0].top_show() - visited.add(get_s) - expand_state( - get_s, - 0, - visited, - g_function, - close_list_anchor, - close_list_inad, - open_list, - back_pointer, - ) - close_list_anchor.append(get_s) + get_s = open_list[0].top_show() + visited.add(get_s) + expand_state( + get_s, + 0, + visited, + g_function, + close_list_anchor, + close_list_inad, + open_list, + back_pointer, + ) + close_list_anchor.append(get_s) print("No path found to goal") print() for i in range(n - 1, -1, -1): diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index 64e719daacc2..dbb86caf8568 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -113,11 +113,10 @@ def data_safety_checker(list_vote: list, actual_result: float) -> bool: for i in list_vote: if i > actual_result: safe = not_safe + 1 + elif abs(abs(i) - abs(actual_result)) <= 0.1: + safe += 1 else: - if abs(abs(i) - abs(actual_result)) <= 0.1: - safe += 1 - else: - not_safe += 1 + not_safe += 1 return safe > not_safe diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index eb5c121fd262..edee50371e02 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -20,11 +20,10 @@ def res(x, y): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.log10(x) - else: - if x == 0: # 0 raised to any number is 0 - return 0 - elif y == 0: - return 1 # any number raised to 0 is 1 + elif x == 0: # 0 raised to any number is 0 + return 0 + elif y == 0: + return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen") diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py index 5082f54f71a8..e8bc89cef6c5 100644 --- a/maths/pollard_rho.py +++ b/maths/pollard_rho.py @@ -94,14 +94,13 @@ def rand_fn(value: int, step: int, modulus: int) -> int: if divisor == 1: # No common divisor yet, just keep searching. continue + # We found a common divisor! + elif divisor == num: + # Unfortunately, the divisor is ``num`` itself and is useless. + break else: - # We found a common divisor! - if divisor == num: - # Unfortunately, the divisor is ``num`` itself and is useless. - break - else: - # The divisor is a nontrivial factor of ``num``! - return divisor + # The divisor is a nontrivial factor of ``num``! + return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py index 4f52dbe646ad..081035bec002 100644 --- a/matrix/cramers_rule_2x2.py +++ b/matrix/cramers_rule_2x2.py @@ -73,12 +73,11 @@ def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> tuple[float, raise ValueError("Infinite solutions. (Consistent system)") else: raise ValueError("No solution. (Inconsistent system)") + elif determinant_x == determinant_y == 0: + # Trivial solution (Inconsistent system) + return (0.0, 0.0) else: - if determinant_x == determinant_y == 0: - # Trivial solution (Inconsistent system) - return (0.0, 0.0) - else: - x = determinant_x / determinant - y = determinant_y / determinant - # Non-Trivial Solution (Consistent system) - return (x, y) + x = determinant_x / determinant + y = determinant_y / determinant + # Non-Trivial Solution (Consistent system) + return (x, y) diff --git a/project_euler/problem_019/sol1.py b/project_euler/problem_019/sol1.py index 0e38137d4f01..656f104c390d 100644 --- a/project_euler/problem_019/sol1.py +++ b/project_euler/problem_019/sol1.py @@ -46,10 +46,9 @@ def solution(): elif day > 29 and month == 2: month += 1 day = day - 29 - else: - if day > days_per_month[month - 1]: - month += 1 - day = day - days_per_month[month - 2] + elif day > days_per_month[month - 1]: + month += 1 + day = day - days_per_month[month - 2] if month > 12: year += 1 diff --git a/pyproject.toml b/pyproject.toml index 5187491e5ee7..290a6b7599be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLR5501", # Consider using `elif` instead of `else` -- FIX ME "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/searches/hill_climbing.py b/searches/hill_climbing.py index 83a3b8b74e27..689b7e5cca8f 100644 --- a/searches/hill_climbing.py +++ b/searches/hill_climbing.py @@ -137,11 +137,10 @@ def hill_climbing( if change > max_change and change > 0: max_change = change next_state = neighbor - else: # finding min + elif change < min_change and change < 0: # finding min # to direction with greatest descent - if change < min_change and change < 0: - min_change = change - next_state = neighbor + min_change = change + next_state = neighbor if next_state is not None: # we found at least one neighbor which improved the current state current_state = next_state diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 49194c2600a0..0591788aa40b 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -33,18 +33,16 @@ def interpolation_search(sorted_collection, item): current_item = sorted_collection[point] if current_item == item: return point + elif point < left: + right = left + left = point + elif point > right: + left = right + right = point + elif item < current_item: + right = point - 1 else: - if point < left: - right = left - left = point - elif point > right: - left = right - right = point - else: - if item < current_item: - right = point - 1 - else: - left = point + 1 + left = point + 1 return None @@ -79,15 +77,14 @@ def interpolation_search_by_recursion(sorted_collection, item, left, right): return interpolation_search_by_recursion(sorted_collection, item, point, left) elif point > right: return interpolation_search_by_recursion(sorted_collection, item, right, left) + elif sorted_collection[point] > item: + return interpolation_search_by_recursion( + sorted_collection, item, left, point - 1 + ) else: - if sorted_collection[point] > item: - return interpolation_search_by_recursion( - sorted_collection, item, left, point - 1 - ) - else: - return interpolation_search_by_recursion( - sorted_collection, item, point + 1, right - ) + return interpolation_search_by_recursion( + sorted_collection, item, point + 1, right + ) def __assert_sorted(collection): diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 0fad0b88c370..d147a9d7954c 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -60,19 +60,18 @@ def compute_transform_tables( def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: if i == 0 and j == 0: return [] + elif ops[i][j][0] in {"C", "R"}: + seq = assemble_transformation(ops, i - 1, j - 1) + seq.append(ops[i][j]) + return seq + elif ops[i][j][0] == "D": + seq = assemble_transformation(ops, i - 1, j) + seq.append(ops[i][j]) + return seq else: - if ops[i][j][0] in {"C", "R"}: - seq = assemble_transformation(ops, i - 1, j - 1) - seq.append(ops[i][j]) - return seq - elif ops[i][j][0] == "D": - seq = assemble_transformation(ops, i - 1, j) - seq.append(ops[i][j]) - return seq - else: - seq = assemble_transformation(ops, i, j - 1) - seq.append(ops[i][j]) - return seq + seq = assemble_transformation(ops, i, j - 1) + seq.append(ops[i][j]) + return seq if __name__ == "__main__": From da47d5c88ccf18e27c5b8f10830376031ad1792a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:26:41 +0300 Subject: [PATCH 043/260] Enable ruff N999 rule (#11331) * Enable ruff N999 rule * updating DIRECTORY.md --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 6 +++--- ...(nlogn).py => longest_increasing_subsequence_o_nlogn.py} | 0 ...)_graph.py => directed_and_undirected_weighted_graph.py} | 0 ...eural_network.py => two_hidden_layers_neural_network.py} | 0 pyproject.toml | 1 - 5 files changed, 3 insertions(+), 4 deletions(-) rename dynamic_programming/{longest_increasing_subsequence_o(nlogn).py => longest_increasing_subsequence_o_nlogn.py} (100%) rename graphs/{directed_and_undirected_(weighted)_graph.py => directed_and_undirected_weighted_graph.py} (100%) rename neural_network/{2_hidden_layers_neural_network.py => two_hidden_layers_neural_network.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 01667c9feee8..f6d6cb463faa 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -351,7 +351,7 @@ * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) - * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) + * [Longest Increasing Subsequence O Nlogn](dynamic_programming/longest_increasing_subsequence_o_nlogn.py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) @@ -465,7 +465,7 @@ * [Dijkstra Alternate](graphs/dijkstra_alternate.py) * [Dijkstra Binary Grid](graphs/dijkstra_binary_grid.py) * [Dinic](graphs/dinic.py) - * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) + * [Directed And Undirected Weighted Graph](graphs/directed_and_undirected_weighted_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) * [Eulerian Path And Circuit For Undirected Graph](graphs/eulerian_path_and_circuit_for_undirected_graph.py) * [Even Tree](graphs/even_tree.py) @@ -792,7 +792,6 @@ * [Minimum Cut](networking_flow/minimum_cut.py) ## Neural Network - * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * Activation Functions * [Binary Step](neural_network/activation_functions/binary_step.py) * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) @@ -809,6 +808,7 @@ * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Input Data](neural_network/input_data.py) * [Simple Neural Network](neural_network/simple_neural_network.py) + * [Two Hidden Layers Neural Network](neural_network/two_hidden_layers_neural_network.py) ## Other * [Activity Selection](other/activity_selection.py) diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py similarity index 100% rename from dynamic_programming/longest_increasing_subsequence_o(nlogn).py rename to dynamic_programming/longest_increasing_subsequence_o_nlogn.py diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_weighted_graph.py similarity index 100% rename from graphs/directed_and_undirected_(weighted)_graph.py rename to graphs/directed_and_undirected_weighted_graph.py diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py similarity index 100% rename from neural_network/2_hidden_layers_neural_network.py rename to neural_network/two_hidden_layers_neural_network.py diff --git a/pyproject.toml b/pyproject.toml index 290a6b7599be..5b2eb07b4555 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "G004", # Logging statement uses f-string "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME - "N999", # Invalid module name -- FIX ME "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey From efb7463cde48305cfebb8a547273c93edbdaaee5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:28:54 +0300 Subject: [PATCH 044/260] Enable ruff PLW0120 rule (#11330) Co-authored-by: Christian Clauss --- pyproject.toml | 1 - searches/fibonacci_search.py | 3 +-- searches/ternary_search.py | 3 +-- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5b2eb07b4555..b9f3115df92a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception diff --git a/searches/fibonacci_search.py b/searches/fibonacci_search.py index ec3dfa7f30f6..7b2252a68be2 100644 --- a/searches/fibonacci_search.py +++ b/searches/fibonacci_search.py @@ -123,8 +123,7 @@ def fibonacci_search(arr: list, val: int) -> int: elif val > item_k_1: offset += fibonacci(fibb_k - 1) fibb_k -= 2 - else: - return -1 + return -1 if __name__ == "__main__": diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 8dcd6b5bde2e..73e4b1ddc68b 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -106,8 +106,7 @@ def ite_ternary_search(array: list[int], target: int) -> int: else: left = one_third + 1 right = two_third - 1 - else: - return -1 + return -1 def rec_ternary_search(left: int, right: int, array: list[int], target: int) -> int: From f2246ce7fd539d94fd9299bd2fe42469dafab03f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 21:03:23 +0300 Subject: [PATCH 045/260] Enable ruff ICN001 rule (#11329) * Enable ruff ICN001 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/hill_cipher.py | 38 ++++----- fractals/julia_sets.py | 54 ++++++------ fractals/koch_snowflake.py | 34 ++++---- graphics/bezier_curve.py | 2 +- machine_learning/gradient_descent.py | 4 +- neural_network/input_data.py | 32 +++---- .../two_hidden_layers_neural_network.py | 84 +++++++++---------- pyproject.toml | 1 - 8 files changed, 121 insertions(+), 128 deletions(-) diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index ea337a72dc04..33b2529f017b 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -38,7 +38,7 @@ import string -import numpy +import numpy as np from maths.greatest_common_divisor import greatest_common_divisor @@ -49,11 +49,11 @@ class HillCipher: # i.e. a total of 36 characters # take x and return x % len(key_string) - modulus = numpy.vectorize(lambda x: x % 36) + modulus = np.vectorize(lambda x: x % 36) - to_int = numpy.vectorize(round) + to_int = np.vectorize(round) - def __init__(self, encrypt_key: numpy.ndarray) -> None: + def __init__(self, encrypt_key: np.ndarray) -> None: """ encrypt_key is an NxN numpy array """ @@ -63,7 +63,7 @@ def __init__(self, encrypt_key: numpy.ndarray) -> None: def replace_letters(self, letter: str) -> int: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_letters('T') 19 >>> hill_cipher.replace_letters('0') @@ -73,7 +73,7 @@ def replace_letters(self, letter: str) -> int: def replace_digits(self, num: int) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_digits(19) 'T' >>> hill_cipher.replace_digits(26) @@ -83,10 +83,10 @@ def replace_digits(self, num: int) -> str: def check_determinant(self) -> None: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.check_determinant() """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -101,7 +101,7 @@ def check_determinant(self) -> None: def process_text(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.process_text('Testing Hill Cipher') 'TESTINGHILLCIPHERR' >>> hill_cipher.process_text('hello') @@ -117,7 +117,7 @@ def process_text(self, text: str) -> str: def encrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.encrypt('testing hill cipher') 'WHXYJOLM9C6XT085LL' >>> hill_cipher.encrypt('hello') @@ -129,7 +129,7 @@ def encrypt(self, text: str) -> str: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[ 0 ] @@ -140,14 +140,14 @@ def encrypt(self, text: str) -> str: return encrypted - def make_decrypt_key(self) -> numpy.ndarray: + def make_decrypt_key(self) -> np.ndarray: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.make_decrypt_key() array([[ 6, 25], [ 5, 26]]) """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -158,16 +158,14 @@ def make_decrypt_key(self) -> numpy.ndarray: break inv_key = ( - det_inv - * numpy.linalg.det(self.encrypt_key) - * numpy.linalg.inv(self.encrypt_key) + det_inv * np.linalg.det(self.encrypt_key) * np.linalg.inv(self.encrypt_key) ) return self.to_int(self.modulus(inv_key)) def decrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL') 'TESTINGHILLCIPHERR' >>> hill_cipher.decrypt('85FF00') @@ -180,7 +178,7 @@ def decrypt(self, text: str) -> str: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0] decrypted_batch = "".join( self.replace_digits(num) for num in batch_decrypted @@ -199,7 +197,7 @@ def main() -> None: row = [int(x) for x in input().split()] hill_matrix.append(row) - hc = HillCipher(numpy.array(hill_matrix)) + hc = HillCipher(np.array(hill_matrix)) print("Would you like to encrypt or decrypt some text? (1 or 2)") option = input("\n1. Encrypt\n2. Decrypt\n") diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 482e1eddfecc..1eef4573ba19 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -25,8 +25,8 @@ from collections.abc import Callable from typing import Any -import numpy -from matplotlib import pyplot +import matplotlib.pyplot as plt +import numpy as np c_cauliflower = 0.25 + 0.0j c_polynomial_1 = -0.4 + 0.6j @@ -37,22 +37,20 @@ nb_pixels = 666 -def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray: +def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. >>> eval_exponential(0, 0) 1.0 - >>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15 + >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 True >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 True """ - return numpy.exp(z_values) + c_parameter + return np.exp(z_values) + c_parameter -def eval_quadratic_polynomial( - c_parameter: complex, z_values: numpy.ndarray -) -> numpy.ndarray: +def eval_quadratic_polynomial(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ >>> eval_quadratic_polynomial(0, 2) 4 @@ -66,7 +64,7 @@ def eval_quadratic_polynomial( return z_values * z_values + c_parameter -def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: +def prepare_grid(window_size: float, nb_pixels: int) -> np.ndarray: """ Create a grid of complex values of size nb_pixels*nb_pixels with real and imaginary parts ranging from -window_size to window_size (inclusive). @@ -77,20 +75,20 @@ def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: [ 0.-1.j, 0.+0.j, 0.+1.j], [ 1.-1.j, 1.+0.j, 1.+1.j]]) """ - x = numpy.linspace(-window_size, window_size, nb_pixels) + x = np.linspace(-window_size, window_size, nb_pixels) x = x.reshape((nb_pixels, 1)) - y = numpy.linspace(-window_size, window_size, nb_pixels) + y = np.linspace(-window_size, window_size, nb_pixels) y = y.reshape((1, nb_pixels)) return x + 1.0j * y def iterate_function( - eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray], + eval_function: Callable[[Any, np.ndarray], np.ndarray], function_params: Any, nb_iterations: int, - z_0: numpy.ndarray, + z_0: np.ndarray, infinity: float | None = None, -) -> numpy.ndarray: +) -> np.ndarray: """ Iterate the function "eval_function" exactly nb_iterations times. The first argument of the function is a parameter which is contained in @@ -98,22 +96,22 @@ def iterate_function( values to iterate from. This function returns the final iterates. - >>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape + >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0]) 0j - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1]) (1+0j) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2]) (256+0j) """ @@ -121,8 +119,8 @@ def iterate_function( for _ in range(nb_iterations): z_n = eval_function(function_params, z_n) if infinity is not None: - numpy.nan_to_num(z_n, copy=False, nan=infinity) - z_n[abs(z_n) == numpy.inf] = infinity + np.nan_to_num(z_n, copy=False, nan=infinity) + z_n[abs(z_n) == np.inf] = infinity return z_n @@ -130,21 +128,21 @@ def show_results( function_label: str, function_params: Any, escape_radius: float, - z_final: numpy.ndarray, + z_final: np.ndarray, ) -> None: """ Plots of whether the absolute value of z_final is greater than the value of escape_radius. Adds the function_label and function_params to the title. - >>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) + >>> show_results('80', 0, 1, np.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) """ abs_z_final = (abs(z_final)).transpose() abs_z_final[:, :] = abs_z_final[::-1, :] - pyplot.matshow(abs_z_final < escape_radius) - pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$") - pyplot.show() + plt.matshow(abs_z_final < escape_radius) + plt.title(f"Julia set of ${function_label}$, $c={function_params}$") + plt.show() def ignore_overflow_warnings() -> None: diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index 30cd4b39c7c1..724b78f41a69 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -22,25 +22,25 @@ from __future__ import annotations -import matplotlib.pyplot as plt # type: ignore -import numpy +import matplotlib.pyplot as plt +import numpy as np # initial triangle of Koch snowflake -VECTOR_1 = numpy.array([0, 0]) -VECTOR_2 = numpy.array([0.5, 0.8660254]) -VECTOR_3 = numpy.array([1, 0]) +VECTOR_1 = np.array([0, 0]) +VECTOR_2 = np.array([0.5, 0.8660254]) +VECTOR_3 = np.array([1, 0]) INITIAL_VECTORS = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] # uncomment for simple Koch curve instead of Koch snowflake # INITIAL_VECTORS = [VECTOR_1, VECTOR_3] -def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndarray]: +def iterate(initial_vectors: list[np.ndarray], steps: int) -> list[np.ndarray]: """ Go through the number of iterations determined by the argument "steps". Be careful with high values (above 5) since the time to calculate increases exponentially. - >>> iterate([numpy.array([0, 0]), numpy.array([1, 0])], 1) + >>> iterate([np.array([0, 0]), np.array([1, 0])], 1) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -50,13 +50,13 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar return vectors -def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: +def iteration_step(vectors: list[np.ndarray]) -> list[np.ndarray]: """ Loops through each pair of adjacent vectors. Each line between two adjacent vectors is divided into 4 segments by adding 3 additional vectors in-between the original two vectors. The vector in the middle is constructed through a 60 degree rotation so it is bent outwards. - >>> iteration_step([numpy.array([0, 0]), numpy.array([1, 0])]) + >>> iteration_step([np.array([0, 0]), np.array([1, 0])]) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -74,22 +74,22 @@ def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: return new_vectors -def rotate(vector: numpy.ndarray, angle_in_degrees: float) -> numpy.ndarray: +def rotate(vector: np.ndarray, angle_in_degrees: float) -> np.ndarray: """ Standard rotation of a 2D vector with a rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix ) - >>> rotate(numpy.array([1, 0]), 60) + >>> rotate(np.array([1, 0]), 60) array([0.5 , 0.8660254]) - >>> rotate(numpy.array([1, 0]), 90) + >>> rotate(np.array([1, 0]), 90) array([6.123234e-17, 1.000000e+00]) """ - theta = numpy.radians(angle_in_degrees) - c, s = numpy.cos(theta), numpy.sin(theta) - rotation_matrix = numpy.array(((c, -s), (s, c))) - return numpy.dot(rotation_matrix, vector) + theta = np.radians(angle_in_degrees) + c, s = np.cos(theta), np.sin(theta) + rotation_matrix = np.array(((c, -s), (s, c))) + return np.dot(rotation_matrix, vector) -def plot(vectors: list[numpy.ndarray]) -> None: +def plot(vectors: list[np.ndarray]) -> None: """ Utility function to plot the vectors using matplotlib.pyplot No doctest was implemented since this function does not have a return value diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 7c22329ad8b4..6eeb89da6bdf 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -78,7 +78,7 @@ def plot_curve(self, step_size: float = 0.01): step_size: defines the step(s) at which to evaluate the Bezier curve. The smaller the step size, the finer the curve produced. """ - from matplotlib import pyplot as plt # type: ignore + from matplotlib import pyplot as plt to_plot_x: list[float] = [] # x coordinates of points to plot to_plot_y: list[float] = [] # y coordinates of points to plot diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index db38b3c95b52..95463faf5635 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -3,7 +3,7 @@ function. """ -import numpy +import numpy as np # List of input, output pairs train_data = ( @@ -116,7 +116,7 @@ def run_gradient_descent(): temp_parameter_vector[i] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) - if numpy.allclose( + if np.allclose( parameter_vector, temp_parameter_vector, atol=absolute_error_limit, diff --git a/neural_network/input_data.py b/neural_network/input_data.py index f7ae86b48e65..9d4195487dbb 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -22,7 +22,7 @@ import typing import urllib -import numpy +import numpy as np from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -39,8 +39,8 @@ class _Datasets(typing.NamedTuple): def _read32(bytestream): - dt = numpy.dtype(numpy.uint32).newbyteorder(">") - return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] + dt = np.dtype(np.uint32).newbyteorder(">") + return np.frombuffer(bytestream.read(4), dtype=dt)[0] @deprecated(None, "Please use tf.data to implement this functionality.") @@ -68,7 +68,7 @@ def _extract_images(f): rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) - data = numpy.frombuffer(buf, dtype=numpy.uint8) + data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, rows, cols, 1) return data @@ -77,8 +77,8 @@ def _extract_images(f): def _dense_to_one_hot(labels_dense, num_classes): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] - index_offset = numpy.arange(num_labels) * num_classes - labels_one_hot = numpy.zeros((num_labels, num_classes)) + index_offset = np.arange(num_labels) * num_classes + labels_one_hot = np.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot @@ -107,7 +107,7 @@ def _extract_labels(f, one_hot=False, num_classes=10): ) num_items = _read32(bytestream) buf = bytestream.read(num_items) - labels = numpy.frombuffer(buf, dtype=numpy.uint8) + labels = np.frombuffer(buf, dtype=np.uint8) if one_hot: return _dense_to_one_hot(labels, num_classes) return labels @@ -153,7 +153,7 @@ def __init__( """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - numpy.random.seed(seed1 if seed is None else seed2) + np.random.seed(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -175,8 +175,8 @@ def __init__( ) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. - images = images.astype(numpy.float32) - images = numpy.multiply(images, 1.0 / 255.0) + images = images.astype(np.float32) + images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 @@ -210,8 +210,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): start = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: - perm0 = numpy.arange(self._num_examples) - numpy.random.shuffle(perm0) + perm0 = np.arange(self._num_examples) + np.random.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -224,8 +224,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): labels_rest_part = self._labels[start : self._num_examples] # Shuffle the data if shuffle: - perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) + perm = np.arange(self._num_examples) + np.random.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch @@ -235,8 +235,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): images_new_part = self._images[start:end] labels_new_part = self._labels[start:end] return ( - numpy.concatenate((images_rest_part, images_new_part), axis=0), - numpy.concatenate((labels_rest_part, labels_new_part), axis=0), + np.concatenate((images_rest_part, images_new_part), axis=0), + np.concatenate((labels_rest_part, labels_new_part), axis=0), ) else: self._index_in_epoch += batch_size diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index 7b374a93d039..dea7e2342d9f 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -5,11 +5,11 @@ - https://en.wikipedia.org/wiki/Feedforward_neural_network (Feedforward) """ -import numpy +import numpy as np class TwoHiddenLayerNeuralNetwork: - def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> None: + def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None: """ This function initializes the TwoHiddenLayerNeuralNetwork class with random weights for every layer and initializes predicted output with zeroes. @@ -28,30 +28,28 @@ def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> N # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = numpy.random.rand( + self.input_layer_and_first_hidden_layer_weights = np.random.rand( self.input_array.shape[1], 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = numpy.random.rand( - 4, 3 - ) + self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = numpy.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) # Real output values provided. self.output_array = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. - self.predicted_output = numpy.zeros(output_array.shape) + self.predicted_output = np.zeros(output_array.shape) - def feedforward(self) -> numpy.ndarray: + def feedforward(self) -> np.ndarray: """ The information moves in only one direction i.e. forward from the input nodes, through the two hidden nodes and to the output nodes. @@ -60,24 +58,24 @@ def feedforward(self) -> numpy.ndarray: Return layer_between_second_hidden_layer_and_output (i.e the last layer of the neural network). - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() - >>> array_sum = numpy.sum(res) - >>> numpy.isnan(array_sum) + >>> array_sum = np.sum(res) + >>> np.isnan(array_sum) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the # input nodes with the first hidden layer nodes. self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) @@ -86,7 +84,7 @@ def feedforward(self) -> numpy.ndarray: # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -100,8 +98,8 @@ def back_propagation(self) -> None: error rate obtained in the previous epoch (i.e., iteration). Updation is done using derivative of sogmoid activation function. - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> nn.back_propagation() @@ -110,15 +108,15 @@ def back_propagation(self) -> None: False """ - updated_second_hidden_layer_and_output_layer_weights = numpy.dot( + updated_second_hidden_layer_and_output_layer_weights = np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), ) - updated_first_hidden_layer_and_second_hidden_layer_weights = numpy.dot( + updated_first_hidden_layer_and_second_hidden_layer_weights = np.dot( self.layer_between_input_and_first_hidden_layer.T, - numpy.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -128,10 +126,10 @@ def back_propagation(self) -> None: self.layer_between_first_hidden_layer_and_second_hidden_layer ), ) - updated_input_layer_and_first_hidden_layer_weights = numpy.dot( + updated_input_layer_and_first_hidden_layer_weights = np.dot( self.input_array.T, - numpy.dot( - numpy.dot( + np.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -155,7 +153,7 @@ def back_propagation(self) -> None: updated_second_hidden_layer_and_output_layer_weights ) - def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None: + def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None: """ Performs the feedforwarding and back propagation process for the given number of iterations. @@ -166,8 +164,8 @@ def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None give_loss : boolean value, If True then prints loss for each iteration, If False then nothing is printed - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() @@ -179,10 +177,10 @@ def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None self.output = self.feedforward() self.back_propagation() if give_loss: - loss = numpy.mean(numpy.square(output - self.feedforward())) + loss = np.mean(np.square(output - self.feedforward())) print(f"Iteration {iteration} Loss: {loss}") - def predict(self, input_arr: numpy.ndarray) -> int: + def predict(self, input_arr: np.ndarray) -> int: """ Predict's the output for the given input values using the trained neural network. @@ -192,8 +190,8 @@ def predict(self, input_arr: numpy.ndarray) -> int: than the threshold value else returns 0, as the real output values are in binary. - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn.train(output_val, 1000, False) >>> nn.predict([0, 1, 0]) in (0, 1) @@ -204,18 +202,18 @@ def predict(self, input_arr: numpy.ndarray) -> int: self.array = input_arr self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.array, self.input_layer_and_first_hidden_layer_weights) ) self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) ) self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -224,26 +222,26 @@ def predict(self, input_arr: numpy.ndarray) -> int: return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0]) -def sigmoid(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid(value: np.ndarray) -> np.ndarray: """ Applies sigmoid activation function. return normalized values - >>> sigmoid(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[0.73105858, 0.5 , 0.88079708], [0.73105858, 0.5 , 0.5 ]]) """ - return 1 / (1 + numpy.exp(-value)) + return 1 / (1 + np.exp(-value)) -def sigmoid_derivative(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid_derivative(value: np.ndarray) -> np.ndarray: """ Provides the derivative value of the sigmoid function. returns derivative of the sigmoid value - >>> sigmoid_derivative(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid_derivative(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[ 0., 0., -2.], [ 0., 0., 0.]]) """ @@ -264,7 +262,7 @@ def example() -> int: True """ # Input values. - test_input = numpy.array( + test_input = np.array( ( [0, 0, 0], [0, 0, 1], @@ -275,11 +273,11 @@ def example() -> int: [1, 1, 0], [1, 1, 1], ), - dtype=numpy.float64, + dtype=np.float64, ) # True output values for the given input values. - output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) + output = np.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=np.float64) # Calling neural network class. neural_network = TwoHiddenLayerNeuralNetwork( @@ -290,7 +288,7 @@ def example() -> int: # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=output, iterations=10, give_loss=False) - return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.float64)) + return neural_network.predict(np.array(([1, 1, 1]), dtype=np.float64)) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index b9f3115df92a..22da7cb777b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME From c328b000ecdd4ad08d029999144e7ec702022390 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 21:35:37 +0200 Subject: [PATCH 046/260] [pre-commit.ci] pre-commit autoupdate (#11339) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.4 → v0.3.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.4...v0.3.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b101207d5ff..e6b1b0442c04 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.4 + rev: v0.3.5 hooks: - id: ruff - id: ruff-format From 39daaf8248b37404f69e8459d0378d77b59c6c0f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 1 Apr 2024 22:36:41 +0300 Subject: [PATCH 047/260] Enable ruff RUF100 rule (#11337) --- audio_filters/butterworth_filter.py | 14 +++++++------- data_structures/binary_tree/basic_binary_tree.py | 2 +- .../binary_tree/non_recursive_segment_tree.py | 2 +- data_structures/binary_tree/red_black_tree.py | 2 +- data_structures/binary_tree/segment_tree.py | 6 +++--- data_structures/heap/min_heap.py | 2 +- dynamic_programming/longest_common_subsequence.py | 2 +- .../longest_increasing_subsequence_o_nlogn.py | 4 ++-- graphs/articulation_points.py | 2 +- graphs/dinic.py | 2 +- other/sdes.py | 4 ++-- project_euler/problem_011/sol2.py | 2 +- pyproject.toml | 1 - strings/manacher.py | 2 +- 14 files changed, 23 insertions(+), 24 deletions(-) diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index 6449bc3f3dce..4e6ea1b18fb4 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -13,7 +13,7 @@ def make_lowpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-pass filter @@ -43,7 +43,7 @@ def make_lowpass( def make_highpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-pass filter @@ -73,7 +73,7 @@ def make_highpass( def make_bandpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a band-pass filter @@ -104,7 +104,7 @@ def make_bandpass( def make_allpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates an all-pass filter @@ -132,7 +132,7 @@ def make_peak( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a peak filter @@ -164,7 +164,7 @@ def make_lowshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-shelf filter @@ -201,7 +201,7 @@ def make_highshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-shelf filter diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 0439413d95b5..9d4c1bdbb57a 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -85,7 +85,7 @@ def depth(self) -> int: """ return self._depth(self.root) - def _depth(self, node: Node | None) -> int: # noqa: UP007 + def _depth(self, node: Node | None) -> int: if not node: return 0 return 1 + max(self._depth(node.left), self._depth(node.right)) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 42c78a3a1be0..45c476701d79 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -87,7 +87,7 @@ def update(self, p: int, v: T) -> None: p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T | None: # noqa: E741 + def query(self, l: int, r: int) -> T | None: """ Get range query value in log(N) time :param l: left element index diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index bdd808c828e0..e68d8d1e3735 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -152,7 +152,7 @@ def _insert_repair(self) -> None: self.grandparent.color = 1 self.grandparent._insert_repair() - def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 + def remove(self, label: int) -> RedBlackTree: """Remove label from this tree.""" if self.label == label: if self.left and self.right: diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 3b0b32946f6e..bb9c1ae2268b 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -35,7 +35,7 @@ def right(self, idx): """ return idx * 2 + 1 - def build(self, idx, l, r): # noqa: E741 + def build(self, idx, l, r): if l == r: self.st[idx] = self.A[l] else: @@ -56,7 +56,7 @@ def update(self, a, b, val): """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 + def update_recursive(self, idx, l, r, a, b, val): """ update(1, 1, N, a, b, v) for update val v to [a,b] """ @@ -83,7 +83,7 @@ def query(self, a, b): """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive(self, idx, l, r, a, b): # noqa: E741 + def query_recursive(self, idx, l, r, a, b): """ query(1, 1, N, a, b) for query max of [a,b] """ diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index ecb1876493b0..39f6d99e8a4c 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,7 +66,7 @@ def build_heap(self, array): # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) # noqa: E741 + l = self.get_left_child_idx(idx) r = self.get_right_child_idx(idx) smallest = idx diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 178b4169b213..22f50a166ae4 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741 + l = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): diff --git a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py index 5e11d729f395..44e333e97779 100644 --- a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py +++ b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py @@ -7,13 +7,13 @@ from __future__ import annotations -def ceil_index(v, l, r, key): # noqa: E741 +def ceil_index(v, l, r, key): while r - l > 1: m = (l + r) // 2 if v[m] >= key: r = m else: - l = m # noqa: E741 + l = m return r diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index d28045282425..3fcaffd73725 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,5 +1,5 @@ # Finding Articulation Points in Undirected Graph -def compute_ap(l): # noqa: E741 +def compute_ap(l): n = len(l) out_edge_count = 0 low = [0] * n diff --git a/graphs/dinic.py b/graphs/dinic.py index aaf3a119525c..4f5e81236984 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ def depth_first_search(self, vertex, sink, flow): # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # noqa: E741 l = 30 maybe faster for random data + for l in range(31): # l = 30 maybe faster for random data while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/other/sdes.py b/other/sdes.py index 31105984b9bb..a69add3430c3 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = xor(temp, key) - l = apply_sbox(s0, temp[:4]) # noqa: E741 + l = apply_sbox(s0, temp[:4]) r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l # noqa: E741 + l = "0" * (2 - len(l)) + l r = "0" * (2 - len(r)) + r temp = apply_table(l + r, p4_table) temp = xor(left, temp) diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 9ea0db991aaf..2958305331a9 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -35,7 +35,7 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] # noqa: E741 + l = [] for _ in range(20): l.append([int(x) for x in f.readline().split()]) diff --git a/pyproject.toml b/pyproject.toml index 22da7cb777b5..c8a8744abc83 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules - "RUF100", # Unused `noqa` directive -- FIX ME "S101", # Use of `assert` detected -- DO NOT FIX "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME diff --git a/strings/manacher.py b/strings/manacher.py index c58c7c19ec44..ca546e533acd 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -50,7 +50,7 @@ def palindromic_string(input_string: str) -> str: # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: - l = j - k + 1 # noqa: E741 + l = j - k + 1 r = j + k - 1 # update max_length and start position From f8a948914b928d9fd3c0e32c034bd90315caa389 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 1 Apr 2024 22:39:31 +0300 Subject: [PATCH 048/260] Enable ruff NPY002 rule (#11336) --- linear_algebra/src/conjugate_gradient.py | 6 ++++-- machine_learning/decision_tree.py | 3 ++- machine_learning/k_means_clust.py | 6 +++--- machine_learning/sequential_minimum_optimization.py | 5 +++-- neural_network/back_propagation_neural_network.py | 8 +++++--- neural_network/convolution_neural_network.py | 13 +++++++------ neural_network/input_data.py | 6 +++--- neural_network/two_hidden_layers_neural_network.py | 9 +++++---- pyproject.toml | 1 - 9 files changed, 32 insertions(+), 25 deletions(-) diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 4c0b58deb978..45da35813978 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -61,7 +61,8 @@ def _create_spd_matrix(dimension: int) -> Any: >>> _is_matrix_spd(spd_matrix) True """ - random_matrix = np.random.randn(dimension, dimension) + rng = np.random.default_rng() + random_matrix = rng.normal(size=(dimension, dimension)) spd_matrix = np.dot(random_matrix, random_matrix.T) assert _is_matrix_spd(spd_matrix) return spd_matrix @@ -157,7 +158,8 @@ def test_conjugate_gradient() -> None: # Create linear system with SPD matrix and known solution x_true. dimension = 3 spd_matrix = _create_spd_matrix(dimension) - x_true = np.random.randn(dimension, 1) + rng = np.random.default_rng() + x_true = rng.normal(size=(dimension, 1)) b = np.dot(spd_matrix, x_true) # Numpy solution. diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 7f129919a3ce..e48905eeac6a 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -187,7 +187,8 @@ def main(): tree = DecisionTree(depth=10, min_leaf_size=10) tree.train(x, y) - test_cases = (np.random.rand(10) * 2) - 1 + rng = np.random.default_rng() + test_cases = (rng.random(10) * 2) - 1 predictions = np.array([tree.predict(x) for x in test_cases]) avg_error = np.mean((predictions - test_cases) ** 2) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 9f6646944458..a926362fc18b 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -55,12 +55,12 @@ def get_initial_centroids(data, k, seed=None): """Randomly choose k data points as initial centroids""" - if seed is not None: # useful for obtaining consistent results - np.random.seed(seed) + # useful for obtaining consistent results + rng = np.random.default_rng(seed) n = data.shape[0] # number of data points # Pick K indices from range [0, N). - rand_indices = np.random.randint(0, n, k) + rand_indices = rng.integers(0, n, k) # Keep centroids as dense format, as many entries will be nonzero due to averaging. # As long as at least one document in a cluster contains a word, diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index be16baca1a4c..408d59ab5d29 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -289,12 +289,13 @@ def _choose_a2(self, i1): if cmd is None: return - for i2 in np.roll(self.unbound, np.random.choice(self.length)): + rng = np.random.default_rng() + for i2 in np.roll(self.unbound, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return - for i2 in np.roll(self._all_samples, np.random.choice(self.length)): + for i2 in np.roll(self._all_samples, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 7e0bdbbe2857..6131a13e945e 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -51,8 +51,9 @@ def __init__( self.is_input_layer = is_input_layer def initializer(self, back_units): - self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.units, back_units))) - self.bias = np.asmatrix(np.random.normal(0, 0.5, self.units)).T + rng = np.random.default_rng() + self.weight = np.asmatrix(rng.normal(0, 0.5, (self.units, back_units))) + self.bias = np.asmatrix(rng.normal(0, 0.5, self.units)).T if self.activation is None: self.activation = sigmoid @@ -174,7 +175,8 @@ def plot_loss(self): def example(): - x = np.random.randn(10, 10) + rng = np.random.default_rng() + x = rng.normal(size=(10, 10)) y = np.asarray( [ [0.8, 0.4], diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 07cc456b7466..3c551924442d 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -41,15 +41,16 @@ def __init__( self.size_pooling1 = size_p1 self.rate_weight = rate_w self.rate_thre = rate_t + rng = np.random.default_rng() self.w_conv1 = [ - np.asmatrix(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) + np.asmatrix(-1 * rng.random((self.conv1[0], self.conv1[0])) + 0.5) for i in range(self.conv1[1]) ] - self.wkj = np.asmatrix(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) - self.vji = np.asmatrix(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) - self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1 - self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1 - self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1 + self.wkj = np.asmatrix(-1 * rng.random((self.num_bp3, self.num_bp2)) + 0.5) + self.vji = np.asmatrix(-1 * rng.random((self.num_bp2, self.num_bp1)) + 0.5) + self.thre_conv1 = -2 * rng.random(self.conv1[1]) + 1 + self.thre_bp2 = -2 * rng.random(self.num_bp2) + 1 + self.thre_bp3 = -2 * rng.random(self.num_bp3) + 1 def save_model(self, save_path): # save model dict with pickle diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 9d4195487dbb..d189e3f9e0d9 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -153,7 +153,7 @@ def __init__( """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - np.random.seed(seed1 if seed is None else seed2) + self._rng = np.random.default_rng(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -211,7 +211,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: perm0 = np.arange(self._num_examples) - np.random.shuffle(perm0) + self._rng.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -225,7 +225,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): # Shuffle the data if shuffle: perm = np.arange(self._num_examples) - np.random.shuffle(perm) + self._rng.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index dea7e2342d9f..d488de590cc2 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -28,19 +28,20 @@ def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None: # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = np.random.rand( - self.input_array.shape[1], 4 + rng = np.random.default_rng() + self.input_layer_and_first_hidden_layer_weights = rng.random( + (self.input_array.shape[1], 4) ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) + self.first_hidden_layer_and_second_hidden_layer_weights = rng.random((4, 3)) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = rng.random((3, 1)) # Real output values provided. self.output_array = output_array diff --git a/pyproject.toml b/pyproject.toml index c8a8744abc83..50cd38005f09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME - "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From 93fb555e0a97096f62a122e73cfdc6f0579cefbe Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 04:27:56 +0300 Subject: [PATCH 049/260] Enable ruff SIM102 rule (#11341) * Enable ruff SIM102 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/sudoku_solver.py | 7 +++---- .../stacks/balanced_parentheses.py | 7 ++++--- graphs/a_star.py | 20 ++++++++++++------- graphs/bi_directional_dijkstra.py | 8 +++++--- other/davis_putnam_logemann_loveland.py | 7 +++---- project_euler/problem_033/sol1.py | 10 +++++++--- project_euler/problem_037/sol1.py | 7 ++++--- project_euler/problem_107/sol1.py | 9 +++++---- project_euler/problem_207/sol1.py | 8 +++++--- pyproject.toml | 1 - scheduling/shortest_job_first.py | 13 +++++++----- scripts/validate_solutions.py | 11 ++++++---- web_programming/emails_from_url.py | 15 ++++++++------ 13 files changed, 73 insertions(+), 50 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index c9dffcde2379..5c1cff06f9d4 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -92,10 +92,9 @@ def eliminate(values, s, d): dplaces = [s for s in u if d in values[s]] if len(dplaces) == 0: return False ## Contradiction: no place for this value - elif len(dplaces) == 1: - # d can only be in one place in unit; assign it there - if not assign(values, dplaces[0], d): - return False + # d can only be in one place in unit; assign it there + elif len(dplaces) == 1 and not assign(values, dplaces[0], d): + return False return values diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py index 3c036c220e5c..928815bb2111 100644 --- a/data_structures/stacks/balanced_parentheses.py +++ b/data_structures/stacks/balanced_parentheses.py @@ -19,9 +19,10 @@ def balanced_parentheses(parentheses: str) -> bool: for bracket in parentheses: if bracket in bracket_pairs: stack.push(bracket) - elif bracket in (")", "]", "}"): - if stack.is_empty() or bracket_pairs[stack.pop()] != bracket: - return False + elif bracket in (")", "]", "}") and ( + stack.is_empty() or bracket_pairs[stack.pop()] != bracket + ): + return False return stack.is_empty() diff --git a/graphs/a_star.py b/graphs/a_star.py index 06da3b5cd863..1d7063ccc55a 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -75,13 +75,19 @@ def search( for i in range(len(DIRECTIONS)): # to try out different valid actions x2 = x + DIRECTIONS[i][0] y2 = y + DIRECTIONS[i][1] - if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]): - if closed[x2][y2] == 0 and grid[x2][y2] == 0: - g2 = g + cost - f2 = g2 + heuristic[x2][y2] - cell.append([f2, g2, x2, y2]) - closed[x2][y2] = 1 - action[x2][y2] = i + if ( + x2 >= 0 + and x2 < len(grid) + and y2 >= 0 + and y2 < len(grid[0]) + and closed[x2][y2] == 0 + and grid[x2][y2] == 0 + ): + g2 = g + cost + f2 = g2 + heuristic[x2][y2] + cell.append([f2, g2, x2, y2]) + closed[x2][y2] = 1 + action[x2][y2] = i invpath = [] x = goal[0] y = goal[1] diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index 7b9eac6c8587..d2c4030b921b 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -36,9 +36,11 @@ def pass_and_relaxation( queue.put((new_cost_f, nxt)) cst_fwd[nxt] = new_cost_f parent[nxt] = v - if nxt in visited_backward: - if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: - shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] + if ( + nxt in visited_backward + and cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance + ): + shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 5c6e2d9ffd5e..3a76f3dfef08 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -64,10 +64,9 @@ def assign(self, model: dict[str, bool | None]) -> None: value = model[symbol] else: continue - if value is not None: - # Complement assignment if literal is in complemented form - if literal.endswith("'"): - value = not value + # Complement assignment if literal is in complemented form + if value is not None and literal.endswith("'"): + value = not value self.literals[literal] = value def evaluate(self, model: dict[str, bool | None]) -> bool | None: diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index 187fd61bde6c..71790d34fbed 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -44,9 +44,13 @@ def fraction_list(digit_len: int) -> list[str]: last_digit = int("1" + "0" * digit_len) for num in range(den, last_digit): while den <= 99: - if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): - if is_digit_cancelling(num, den): - solutions.append(f"{num}/{den}") + if ( + (num != den) + and (num % 10 == den // 10) + and (den % 10 != 0) + and is_digit_cancelling(num, den) + ): + solutions.append(f"{num}/{den}") den += 1 num += 1 den = 10 diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index ef7686cbcb96..9c09065f4bd0 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -85,9 +85,10 @@ def validate(n: int) -> bool: >>> validate(3797) True """ - if len(str(n)) > 3: - if not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])): - return False + if len(str(n)) > 3 and ( + not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])) + ): + return False return True diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 3fe75909e2ea..79cdd937042e 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -81,10 +81,11 @@ def prims_algorithm(self) -> Graph: while len(subgraph.vertices) < len(self.vertices): min_weight = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): - if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): - if weight < min_weight: - min_edge = edge - min_weight = weight + if (edge[0] in subgraph.vertices) ^ ( + edge[1] in subgraph.vertices + ) and weight < min_weight: + min_edge = edge + min_weight = weight subgraph.add_edge(min_edge, min_weight) diff --git a/project_euler/problem_207/sol1.py b/project_euler/problem_207/sol1.py index 2b3591f51cfa..c83dc1d4aaef 100644 --- a/project_euler/problem_207/sol1.py +++ b/project_euler/problem_207/sol1.py @@ -88,9 +88,11 @@ def solution(max_proportion: float = 1 / 12345) -> int: total_partitions += 1 if check_partition_perfect(partition_candidate): perfect_partitions += 1 - if perfect_partitions > 0: - if perfect_partitions / total_partitions < max_proportion: - return int(partition_candidate) + if ( + perfect_partitions > 0 + and perfect_partitions / total_partitions < max_proportion + ): + return int(partition_candidate) integer += 1 diff --git a/pyproject.toml b/pyproject.toml index 50cd38005f09..e3cf42c92c54 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SIM102", # Use a single `if` statement instead of nested `if` statements -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index cfd0417ea62d..6899ec87c591 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -37,11 +37,14 @@ def calculate_waitingtime( # Process until all processes are completed while complete != no_of_processes: for j in range(no_of_processes): - if arrival_time[j] <= increment_time and remaining_time[j] > 0: - if remaining_time[j] < minm: - minm = remaining_time[j] - short = j - check = True + if ( + arrival_time[j] <= increment_time + and remaining_time[j] > 0 + and remaining_time[j] < minm + ): + minm = remaining_time[j] + short = j + check = True if not check: increment_time += 1 diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index ca4af5261a8f..0afbdde315c7 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -71,10 +71,13 @@ def added_solution_file_path() -> list[pathlib.Path]: def collect_solution_file_paths() -> list[pathlib.Path]: - if os.environ.get("CI") and os.environ.get("GITHUB_EVENT_NAME") == "pull_request": - # Return only if there are any, otherwise default to all solutions - if filepaths := added_solution_file_path(): - return filepaths + # Return only if there are any, otherwise default to all solutions + if ( + os.environ.get("CI") + and os.environ.get("GITHUB_EVENT_NAME") == "pull_request" + and (filepaths := added_solution_file_path()) + ): + return filepaths return all_solution_file_paths() diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 6b4bacfe7d5a..26c88e1b13a5 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -30,12 +30,15 @@ def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None if tag == "a": # Check the list of defined attributes. for name, value in attrs: - # If href is defined, and not empty nor # print it. - if name == "href" and value != "#" and value != "": - # If not already in urls. - if value not in self.urls: - url = parse.urljoin(self.domain, value) - self.urls.append(url) + # If href is defined, not empty nor # print it and not already in urls. + if ( + name == "href" + and value != "#" + and value != "" + and value not in self.urls + ): + url = parse.urljoin(self.domain, value) + self.urls.append(url) # Get main domain name (example.com) From f8cdb3e9482ddca85cd1bffa96c038afc13f9c85 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 19:44:37 +0300 Subject: [PATCH 050/260] Enable ruff S105 rule (#11343) * Enable ruff S105 rule * Update web_programming/recaptcha_verification.py --------- Co-authored-by: Christian Clauss --- pyproject.toml | 1 - web_programming/recaptcha_verification.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e3cf42c92c54..65a0754d678c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules "S101", # Use of `assert` detected -- DO NOT FIX - "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index b03afb28ec53..c9b691b28a8b 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -43,7 +43,7 @@ def login_using_recaptcha(request): # Enter your recaptcha secret key here - secret_key = "secretKey" + secret_key = "secretKey" # noqa: S105 url = "https://www.google.com/recaptcha/api/siteverify" # when method is not POST, direct user to login page From f437f922792b8c7e3fbb168a1ec6bfdf183a7304 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:13:56 +0300 Subject: [PATCH 051/260] Enable ruff INP001 rule (#11346) * Enable ruff INP001 rule * Fix * Fix * Fix * Fix * Fix --- data_structures/arrays/__init__.py | 0 data_structures/hashing/tests/__init__.py | 0 digital_image_processing/morphological_operations/__init__.py | 0 electronics/__init__.py | 0 electronics/circular_convolution.py | 3 +-- fractals/__init__.py | 0 geometry/__init__.py | 0 greedy_methods/__init__.py | 0 linear_algebra/src/gaussian_elimination_pivoting/__init__.py | 0 linear_programming/__init__.py | 0 maths/numerical_analysis/__init__.py | 0 maths/special_numbers/__init__.py | 0 neural_network/activation_functions/__init__.py | 0 neural_network/activation_functions/mish.py | 3 ++- pyproject.toml | 1 - 15 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 data_structures/arrays/__init__.py create mode 100644 data_structures/hashing/tests/__init__.py create mode 100644 digital_image_processing/morphological_operations/__init__.py create mode 100644 electronics/__init__.py create mode 100644 fractals/__init__.py create mode 100644 geometry/__init__.py create mode 100644 greedy_methods/__init__.py create mode 100644 linear_algebra/src/gaussian_elimination_pivoting/__init__.py create mode 100644 linear_programming/__init__.py create mode 100644 maths/numerical_analysis/__init__.py create mode 100644 maths/special_numbers/__init__.py create mode 100644 neural_network/activation_functions/__init__.py diff --git a/data_structures/arrays/__init__.py b/data_structures/arrays/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/hashing/tests/__init__.py b/data_structures/hashing/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/digital_image_processing/morphological_operations/__init__.py b/digital_image_processing/morphological_operations/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/electronics/__init__.py b/electronics/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index f2e35742e944..768f2ad941bc 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -37,8 +37,7 @@ def circular_convolution(self) -> list[float]: using matrix method Usage: - >>> import circular_convolution as cc - >>> convolution = cc.CircularConvolution() + >>> convolution = CircularConvolution() >>> convolution.circular_convolution() [10, 10, 6, 14] diff --git a/fractals/__init__.py b/fractals/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/geometry/__init__.py b/geometry/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/greedy_methods/__init__.py b/greedy_methods/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/linear_algebra/src/gaussian_elimination_pivoting/__init__.py b/linear_algebra/src/gaussian_elimination_pivoting/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/linear_programming/__init__.py b/linear_programming/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/maths/numerical_analysis/__init__.py b/maths/numerical_analysis/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/maths/special_numbers/__init__.py b/maths/special_numbers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/neural_network/activation_functions/__init__.py b/neural_network/activation_functions/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/neural_network/activation_functions/mish.py b/neural_network/activation_functions/mish.py index e51655df8a3f..57a91413fe50 100644 --- a/neural_network/activation_functions/mish.py +++ b/neural_network/activation_functions/mish.py @@ -7,7 +7,8 @@ """ import numpy as np -from softplus import softplus + +from .softplus import softplus def mish(vector: np.ndarray) -> np.ndarray: diff --git a/pyproject.toml b/pyproject.toml index 65a0754d678c..9689cf2b37aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From f5bbea3776a5038d0e428ce3c06c25086076e212 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:18:47 +0300 Subject: [PATCH 052/260] Enable ruff RUF005 rule (#11344) --- data_structures/binary_tree/binary_search_tree.py | 2 +- dynamic_programming/subset_generation.py | 2 +- maths/odd_sieve.py | 2 +- pyproject.toml | 5 ++++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 090e3e25fe6d..32194ddc2043 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -336,7 +336,7 @@ def inorder(curr_node: Node | None) -> list[Node]: """ node_list = [] if curr_node is not None: - node_list = inorder(curr_node.left) + [curr_node] + inorder(curr_node.right) + node_list = [*inorder(curr_node.left), curr_node, *inorder(curr_node.right)] return node_list diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 1be412b9374d..d490bca737ba 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -45,7 +45,7 @@ def subset_combinations(elements: list[int], n: int) -> list: for i in range(1, r + 1): for j in range(i, 0, -1): for prev_combination in dp[j - 1]: - dp[j].append(tuple(prev_combination) + (elements[i - 1],)) + dp[j].append((*prev_combination, elements[i - 1])) try: return sorted(dp[n]) diff --git a/maths/odd_sieve.py b/maths/odd_sieve.py index 60e92921a94c..06605ca54296 100644 --- a/maths/odd_sieve.py +++ b/maths/odd_sieve.py @@ -33,7 +33,7 @@ def odd_sieve(num: int) -> list[int]: 0, ceil((num - i_squared) / (i << 1)) ) - return [2] + list(compress(range(3, num, 2), sieve)) + return [2, *list(compress(range(3, num, 2), sieve))] if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 9689cf2b37aa..e1d7dc91b2b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,10 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF00", # Ambiguous unicode character and other rules + "RUF001", # String contains ambiguous {}. Did you mean {}? + "RUF002", # Docstring contains ambiguous {}. Did you mean {}? + "RUF003", # Comment contains ambiguous {}. Did you mean {}? + "RUF007", # Prefer itertools.pairwise() over zip() when iterating over successive pairs "S101", # Use of `assert` detected -- DO NOT FIX "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME From 53b2926704f3ad3ec2134a114be3a338e755e28a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:29:34 +0300 Subject: [PATCH 053/260] Enable ruff PGH003 rule (#11345) * Enable ruff PGH003 rule * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- compression/huffman.py | 4 ++-- data_structures/binary_tree/binary_search_tree.py | 4 ++-- data_structures/linked_list/rotate_to_the_right.py | 2 +- fractals/mandelbrot.py | 2 +- graphics/bezier_curve.py | 2 +- maths/entropy.py | 4 ++-- matrix/spiral_print.py | 4 +++- matrix/tests/test_matrix_operation.py | 2 +- project_euler/problem_092/sol1.py | 2 +- project_euler/problem_104/sol1.py | 2 +- pyproject.toml | 1 - scripts/validate_filenames.py | 2 +- scripts/validate_solutions.py | 6 +++--- web_programming/covid_stats_via_xpath.py | 2 +- 14 files changed, 20 insertions(+), 19 deletions(-) diff --git a/compression/huffman.py b/compression/huffman.py index 65e5c2f25385..44eda6c03180 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -40,7 +40,7 @@ def build_tree(letters: list[Letter]) -> Letter | TreeNode: Run through the list of Letters and build the min heap for the Huffman Tree. """ - response: list[Letter | TreeNode] = letters # type: ignore + response: list[Letter | TreeNode] = list(letters) while len(response) > 1: left = response.pop(0) right = response.pop(0) @@ -59,7 +59,7 @@ def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]: if isinstance(root, Letter): root.bitstring[root.letter] = bitstring return [root] - treenode: TreeNode = root # type: ignore + treenode: TreeNode = root letters = [] letters += traverse_tree(treenode.left, bitstring + "0") letters += traverse_tree(treenode.right, bitstring + "1") diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 32194ddc2043..3f214d0113a4 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -294,9 +294,9 @@ def remove(self, value: int) -> None: predecessor = self.get_max( node.left ) # Gets the max value of the left branch - self.remove(predecessor.value) # type: ignore + self.remove(predecessor.value) # type: ignore[union-attr] node.value = ( - predecessor.value # type: ignore + predecessor.value # type: ignore[union-attr] ) # Assigns the value to the node to delete and keep tree structure def preorder_traverse(self, node: Node | None) -> Iterable: diff --git a/data_structures/linked_list/rotate_to_the_right.py b/data_structures/linked_list/rotate_to_the_right.py index 51b10481c0ce..6b1c54f4be4d 100644 --- a/data_structures/linked_list/rotate_to_the_right.py +++ b/data_structures/linked_list/rotate_to_the_right.py @@ -63,7 +63,7 @@ def insert_node(head: Node | None, data: int) -> Node: while temp_node.next_node: temp_node = temp_node.next_node - temp_node.next_node = new_node # type: ignore + temp_node.next_node = new_node return head diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index 5eb9af0aafe1..359d965a882d 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -17,7 +17,7 @@ import colorsys -from PIL import Image # type: ignore +from PIL import Image def get_distance(x: float, y: float, max_step: int) -> float: diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 6eeb89da6bdf..9d906f179c92 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -2,7 +2,7 @@ # https://www.tutorialspoint.com/computer_graphics/computer_graphics_curves.htm from __future__ import annotations -from scipy.special import comb # type: ignore +from scipy.special import comb class BezierCurve: diff --git a/maths/entropy.py b/maths/entropy.py index 76fac4ee717d..39ec67bea038 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -96,8 +96,8 @@ def analyze_text(text: str) -> tuple[dict, dict]: The first dictionary stores the frequency of single character strings. The second dictionary stores the frequency of two character strings. """ - single_char_strings = Counter() # type: ignore - two_char_strings = Counter() # type: ignore + single_char_strings = Counter() # type: ignore[var-annotated] + two_char_strings = Counter() # type: ignore[var-annotated] single_char_strings[text[-1]] += 1 # first case when we have space at start. diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 7ba0a275157b..c16dde69cb56 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -116,7 +116,9 @@ def spiral_traversal(matrix: list[list]) -> list[int]: [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + spiral_traversal([]) """ if matrix: - return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) # type: ignore + return list(matrix.pop(0)) + spiral_traversal( + [list(row) for row in zip(*matrix)][::-1] + ) else: return [] diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index 638f97daa2ed..addc870ca205 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -12,7 +12,7 @@ import sys import numpy as np -import pytest # type: ignore +import pytest # Custom/local libraries from matrix import matrix_operation as matop diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index 8d3f0c9ddd7b..3e45e82207a7 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -68,7 +68,7 @@ def chain(number: int) -> bool: """ if CHAINS[number - 1] is not None: - return CHAINS[number - 1] # type: ignore + return CHAINS[number - 1] # type: ignore[return-value] number_chain = chain(next_number(number)) CHAINS[number - 1] = number_chain diff --git a/project_euler/problem_104/sol1.py b/project_euler/problem_104/sol1.py index 60fd6fe99adb..d84dbcfc9c65 100644 --- a/project_euler/problem_104/sol1.py +++ b/project_euler/problem_104/sol1.py @@ -15,7 +15,7 @@ import sys -sys.set_int_max_str_digits(0) # type: ignore +sys.set_int_max_str_digits(0) def check(number: int) -> bool: diff --git a/pyproject.toml b/pyproject.toml index e1d7dc91b2b8..7eac811395ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index ed23f3907114..0890024dd349 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -4,7 +4,7 @@ try: from .build_directory_md import good_file_paths except ImportError: - from build_directory_md import good_file_paths # type: ignore + from build_directory_md import good_file_paths # type: ignore[no-redef] filepaths = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 0afbdde315c7..68dcd68b3947 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -21,8 +21,8 @@ def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: """Converts a file path to a Python module""" spec = importlib.util.spec_from_file_location(file_path.name, str(file_path)) - module = importlib.util.module_from_spec(spec) # type: ignore - spec.loader.exec_module(module) # type: ignore + module = importlib.util.module_from_spec(spec) # type: ignore[arg-type] + spec.loader.exec_module(module) # type: ignore[union-attr] return module @@ -92,7 +92,7 @@ def test_project_euler(solution_path: pathlib.Path) -> None: problem_number: str = solution_path.parent.name[8:].zfill(3) expected: str = PROBLEM_ANSWERS[problem_number] solution_module = convert_path_to_module(solution_path) - answer = str(solution_module.solution()) # type: ignore + answer = str(solution_module.solution()) answer = hashlib.sha256(answer.encode()).hexdigest() assert ( answer == expected diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index a95130badad9..7011a02bffa8 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -7,7 +7,7 @@ from typing import NamedTuple import requests -from lxml import html # type: ignore +from lxml import html class CovidData(NamedTuple): From cc2f5b13088b8a98181983b5589f48749016d4ce Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 8 Apr 2024 14:22:54 +0300 Subject: [PATCH 054/260] Do not fix ruff EXE001 rule (#11350) * Do not fix ruff EXE001 rule * Fix --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7eac811395ae..264f06d1f750 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable" -- FIX ME + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From 9e55c9d9845c07ce6390ab92a2d86be4816d4a69 Mon Sep 17 00:00:00 2001 From: Jiayou Qin <90779499+Jiayoqin@users.noreply.github.com> Date: Mon, 8 Apr 2024 07:35:22 -0400 Subject: [PATCH 055/260] Added documentations (#11352) * Added documentations * Update data_structures/queue/circular_queue.py --------- Co-authored-by: Christian Clauss --- data_structures/queue/circular_queue.py | 7 +++++-- data_structures/queue/circular_queue_linked_list.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/data_structures/queue/circular_queue.py b/data_structures/queue/circular_queue.py index 93a6ef805c7c..f2fb4c01e467 100644 --- a/data_structures/queue/circular_queue.py +++ b/data_structures/queue/circular_queue.py @@ -25,6 +25,7 @@ def __len__(self) -> int: def is_empty(self) -> bool: """ + Checks whether the queue is empty or not >>> cq = CircularQueue(5) >>> cq.is_empty() True @@ -35,6 +36,7 @@ def is_empty(self) -> bool: def first(self): """ + Returns the first element of the queue >>> cq = CircularQueue(5) >>> cq.first() False @@ -45,7 +47,8 @@ def first(self): def enqueue(self, data): """ - This function insert an element in the queue using self.rear value as an index + This function inserts an element at the end of the queue using self.rear value + as an index. >>> cq = CircularQueue(5) >>> cq.enqueue("A") # doctest: +ELLIPSIS >> cq = CircularQueue(5) >>> cq.dequeue() Traceback (most recent call last): diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py index 62042c4bce96..da8629678e52 100644 --- a/data_structures/queue/circular_queue_linked_list.py +++ b/data_structures/queue/circular_queue_linked_list.py @@ -39,7 +39,7 @@ def create_linked_list(self, initial_capacity: int) -> None: def is_empty(self) -> bool: """ - Checks where the queue is empty or not + Checks whether the queue is empty or not >>> cq = CircularQueueLinkedList() >>> cq.is_empty() True From 14ca726951473dd1993b6b13993105ea3b077ac3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 07:23:51 +0200 Subject: [PATCH 056/260] [pre-commit.ci] pre-commit autoupdate (#11355) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.5.0 → v4.6.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.5.0...v4.6.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e6b1b0442c04..d4b8d1136ed7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-executables-have-shebangs - id: check-toml From 0a9a860eb1174a513b231db2cf1a3378ff7c5b33 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:21:33 +0200 Subject: [PATCH 057/260] [pre-commit.ci] pre-commit autoupdate (#11364) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/MarcoGorelli/auto-walrus: v0.2.2 → 0.3.3](https://github.com/MarcoGorelli/auto-walrus/compare/v0.2.2...0.3.3) - [github.com/astral-sh/ruff-pre-commit: v0.3.5 → v0.3.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.5...v0.3.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d4b8d1136ed7..9472bcfa3e07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: v0.2.2 + rev: 0.3.3 hooks: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.5 + rev: v0.3.7 hooks: - id: ruff - id: ruff-format From a42eb357027328085f928a4ab6c7aa770aeb1d6b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 19 Apr 2024 22:30:22 +0300 Subject: [PATCH 058/260] Enable ruff E741 rule (#11370) * Enable ruff E741 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../binary_tree/non_recursive_segment_tree.py | 22 ++++++------ data_structures/binary_tree/segment_tree.py | 36 +++++++++---------- data_structures/heap/min_heap.py | 12 +++---- .../longest_common_subsequence.py | 10 +++--- .../longest_increasing_subsequence_o_nlogn.py | 14 ++++---- graphs/articulation_points.py | 10 +++--- graphs/dinic.py | 2 +- .../sequential_minimum_optimization.py | 4 +-- maths/pi_generator.py | 10 +++--- other/sdes.py | 10 +++--- project_euler/problem_011/sol2.py | 22 ++++++++---- pyproject.toml | 1 - strings/jaro_winkler.py | 8 ++--- strings/manacher.py | 33 ++++++++--------- 14 files changed, 102 insertions(+), 92 deletions(-) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 45c476701d79..ca0d5c111c4f 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -87,12 +87,12 @@ def update(self, p: int, v: T) -> None: p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T | None: + def query(self, left: int, right: int) -> T | None: """ Get range query value in log(N) time - :param l: left element index - :param r: right element index - :return: element combined in the range [l, r] + :param left: left element index + :param right: right element index + :return: element combined in the range [left, right] >>> st = SegmentTree([1, 2, 3, 4], lambda a, b: a + b) >>> st.query(0, 2) @@ -104,15 +104,15 @@ def query(self, l: int, r: int) -> T | None: >>> st.query(2, 3) 7 """ - l, r = l + self.N, r + self.N + left, right = left + self.N, right + self.N res: T | None = None - while l <= r: - if l % 2 == 1: - res = self.st[l] if res is None else self.fn(res, self.st[l]) - if r % 2 == 0: - res = self.st[r] if res is None else self.fn(res, self.st[r]) - l, r = (l + 1) // 2, (r - 1) // 2 + while left <= right: + if left % 2 == 1: + res = self.st[left] if res is None else self.fn(res, self.st[left]) + if right % 2 == 0: + res = self.st[right] if res is None else self.fn(res, self.st[right]) + left, right = (left + 1) // 2, (right - 1) // 2 return res diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index bb9c1ae2268b..c7069b3f6069 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -35,13 +35,13 @@ def right(self, idx): """ return idx * 2 + 1 - def build(self, idx, l, r): - if l == r: - self.st[idx] = self.A[l] + def build(self, idx, left, right): + if left == right: + self.st[idx] = self.A[left] else: - mid = (l + r) // 2 - self.build(self.left(idx), l, mid) - self.build(self.right(idx), mid + 1, r) + mid = (left + right) // 2 + self.build(self.left(idx), left, mid) + self.build(self.right(idx), mid + 1, right) self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) def update(self, a, b, val): @@ -56,18 +56,18 @@ def update(self, a, b, val): """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive(self, idx, l, r, a, b, val): + def update_recursive(self, idx, left, right, a, b, val): """ update(1, 1, N, a, b, v) for update val v to [a,b] """ - if r < a or l > b: + if right < a or left > b: return True - if l == r: + if left == right: self.st[idx] = val return True - mid = (l + r) // 2 - self.update_recursive(self.left(idx), l, mid, a, b, val) - self.update_recursive(self.right(idx), mid + 1, r, a, b, val) + mid = (left + right) // 2 + self.update_recursive(self.left(idx), left, mid, a, b, val) + self.update_recursive(self.right(idx), mid + 1, right, a, b, val) self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) return True @@ -83,17 +83,17 @@ def query(self, a, b): """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive(self, idx, l, r, a, b): + def query_recursive(self, idx, left, right, a, b): """ query(1, 1, N, a, b) for query max of [a,b] """ - if r < a or l > b: + if right < a or left > b: return -math.inf - if l >= a and r <= b: + if left >= a and right <= b: return self.st[idx] - mid = (l + r) // 2 - q1 = self.query_recursive(self.left(idx), l, mid, a, b) - q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b) + mid = (left + right) // 2 + q1 = self.query_recursive(self.left(idx), left, mid, a, b) + q2 = self.query_recursive(self.right(idx), mid + 1, right, a, b) return max(q1, q2) def show_data(self): diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 39f6d99e8a4c..ce7ed570a58d 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,14 +66,14 @@ def build_heap(self, array): # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) - r = self.get_right_child_idx(idx) + left = self.get_left_child_idx(idx) + right = self.get_right_child_idx(idx) smallest = idx - if l < len(array) and array[l] < array[idx]: - smallest = l - if r < len(array) and array[r] < array[smallest]: - smallest = r + if left < len(array) and array[left] < array[idx]: + smallest = left + if right < len(array) and array[right] < array[smallest]: + smallest = right if smallest != idx: array[idx], array[smallest] = array[smallest], array[idx] diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 22f50a166ae4..9a98b1736ed5 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,30 +38,30 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - l = [[0] * (n + 1) for _ in range(m + 1)] + dp = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): match = 1 if x[i - 1] == y[j - 1] else 0 - l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) + dp[i][j] = max(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1] + match) seq = "" i, j = m, n while i > 0 and j > 0: match = 1 if x[i - 1] == y[j - 1] else 0 - if l[i][j] == l[i - 1][j - 1] + match: + if dp[i][j] == dp[i - 1][j - 1] + match: if match == 1: seq = x[i - 1] + seq i -= 1 j -= 1 - elif l[i][j] == l[i - 1][j]: + elif dp[i][j] == dp[i - 1][j]: i -= 1 else: j -= 1 - return l[m][n], seq + return dp[m][n], seq if __name__ == "__main__": diff --git a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py index 44e333e97779..bbc7a62b6b5c 100644 --- a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py +++ b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py @@ -7,14 +7,14 @@ from __future__ import annotations -def ceil_index(v, l, r, key): - while r - l > 1: - m = (l + r) // 2 - if v[m] >= key: - r = m +def ceil_index(v, left, right, key): + while right - left > 1: + middle = (left + right) // 2 + if v[middle] >= key: + right = middle else: - l = m - return r + left = middle + return right def longest_increasing_subsequence_length(v: list[int]) -> int: diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index 3fcaffd73725..0bf16e55bc04 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,6 +1,6 @@ # Finding Articulation Points in Undirected Graph -def compute_ap(l): - n = len(l) +def compute_ap(graph): + n = len(graph) out_edge_count = 0 low = [0] * n visited = [False] * n @@ -12,7 +12,7 @@ def dfs(root, at, parent, out_edge_count): visited[at] = True low[at] = at - for to in l[at]: + for to in graph[at]: if to == parent: pass elif not visited[to]: @@ -41,7 +41,7 @@ def dfs(root, at, parent, out_edge_count): # Adjacency list of graph -data = { +graph = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], @@ -52,4 +52,4 @@ def dfs(root, at, parent, out_edge_count): 7: [6, 8], 8: [5, 7], } -compute_ap(data) +compute_ap(graph) diff --git a/graphs/dinic.py b/graphs/dinic.py index 4f5e81236984..7919e6bc060a 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ def depth_first_search(self, vertex, sink, flow): # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # l = 30 maybe faster for random data + for l in range(31): # l = 30 maybe faster for random data # noqa: E741 while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 408d59ab5d29..3abdd6ccbed8 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -309,9 +309,9 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: - l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) + l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) # noqa: E741 else: - l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) + l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) # noqa: E741 if l == h: return None, None diff --git a/maths/pi_generator.py b/maths/pi_generator.py index addd921747ba..97f2c540c1ce 100644 --- a/maths/pi_generator.py +++ b/maths/pi_generator.py @@ -41,7 +41,7 @@ def calculate_pi(limit: int) -> str: t = 1 k = 1 n = 3 - l = 3 + m = 3 decimal = limit counter = 0 @@ -65,11 +65,11 @@ def calculate_pi(limit: int) -> str: q *= 10 r = nr else: - nr = (2 * q + r) * l - nn = (q * (7 * k) + 2 + (r * l)) // (t * l) + nr = (2 * q + r) * m + nn = (q * (7 * k) + 2 + (r * m)) // (t * m) q *= k - t *= l - l += 2 + t *= m + m += 2 k += 1 n = nn r = nr diff --git a/other/sdes.py b/other/sdes.py index a69add3430c3..42186f453a3d 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,11 +44,11 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = xor(temp, key) - l = apply_sbox(s0, temp[:4]) - r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l - r = "0" * (2 - len(r)) + r - temp = apply_table(l + r, p4_table) + left_bin_str = apply_sbox(s0, temp[:4]) + right_bin_str = apply_sbox(s1, temp[4:]) + left_bin_str = "0" * (2 - len(left_bin_str)) + left_bin_str + right_bin_str = "0" * (2 - len(right_bin_str)) + right_bin_str + temp = apply_table(left_bin_str + right_bin_str, p4_table) temp = xor(left, temp) return temp + right diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 2958305331a9..09bf315702c5 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -35,37 +35,47 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] + grid = [] for _ in range(20): - l.append([int(x) for x in f.readline().split()]) + grid.append([int(x) for x in f.readline().split()]) maximum = 0 # right for i in range(20): for j in range(17): - temp = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] + temp = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] if temp > maximum: maximum = temp # down for i in range(17): for j in range(20): - temp = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] + temp = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * grid[i + 3][j] if temp > maximum: maximum = temp # diagonal 1 for i in range(17): for j in range(17): - temp = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] + temp = ( + grid[i][j] + * grid[i + 1][j + 1] + * grid[i + 2][j + 2] + * grid[i + 3][j + 3] + ) if temp > maximum: maximum = temp # diagonal 2 for i in range(17): for j in range(3, 20): - temp = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] + temp = ( + grid[i][j] + * grid[i + 1][j - 1] + * grid[i + 2][j - 2] + * grid[i + 3][j - 3] + ) if temp > maximum: maximum = temp return maximum diff --git a/pyproject.toml b/pyproject.toml index 264f06d1f750..1ac70b2fab93 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index f4a8fbad3ac8..c18f0d85d9f4 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -28,12 +28,12 @@ def jaro_winkler(str1: str, str2: str) -> float: def get_matched_characters(_str1: str, _str2: str) -> str: matched = [] limit = min(len(_str1), len(_str2)) // 2 - for i, l in enumerate(_str1): + for i, char in enumerate(_str1): left = int(max(0, i - limit)) right = int(min(i + limit + 1, len(_str2))) - if l in _str2[left:right]: - matched.append(l) - _str2 = f"{_str2[0:_str2.index(l)]} {_str2[_str2.index(l) + 1:]}" + if char in _str2[left:right]: + matched.append(char) + _str2 = f"{_str2[0:_str2.index(char)]} {_str2[_str2.index(char) + 1:]}" return "".join(matched) diff --git a/strings/manacher.py b/strings/manacher.py index ca546e533acd..fc8b01cd9c1c 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -9,9 +9,9 @@ def palindromic_string(input_string: str) -> str: 1. first this convert input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. - 2. for each character in new_string it find corresponding length and store the - length and l,r to store previously calculated info.(please look the explanation - for details) + 2. for each character in new_string it find corresponding length and + store the length and left,right to store previously calculated info. + (please look the explanation for details) 3. return corresponding output_string by removing all "|" """ @@ -29,7 +29,7 @@ def palindromic_string(input_string: str) -> str: # we will store the starting and ending of previous furthest ending palindromic # substring - l, r = 0, 0 + left, right = 0, 0 # length[i] shows the length of palindromic substring with center i length = [1 for i in range(len(new_input_string))] @@ -37,7 +37,7 @@ def palindromic_string(input_string: str) -> str: # for each character in new_string find corresponding palindromic string start = 0 for j in range(len(new_input_string)): - k = 1 if j > r else min(length[l + r - j] // 2, r - j + 1) + k = 1 if j > right else min(length[left + right - j] // 2, right - j + 1) while ( j - k >= 0 and j + k < len(new_input_string) @@ -47,11 +47,11 @@ def palindromic_string(input_string: str) -> str: length[j] = 2 * k - 1 - # does this string is ending after the previously explored end (that is r) ? - # if yes the update the new r to the last index of this - if j + k - 1 > r: - l = j - k + 1 - r = j + k - 1 + # does this string is ending after the previously explored end (that is right) ? + # if yes the update the new right to the last index of this + if j + k - 1 > right: + left = j - k + 1 + right = j + k - 1 # update max_length and start position if max_length < length[j]: @@ -78,8 +78,9 @@ def palindromic_string(input_string: str) -> str: consider the string for which we are calculating the longest palindromic substring is shown above where ... are some characters in between and right now we are calculating the length of palindromic substring with center at a5 with following conditions : -i) we have stored the length of palindromic substring which has center at a3 (starts at - l ends at r) and it is the furthest ending till now, and it has ending after a6 +i) we have stored the length of palindromic substring which has center at a3 + (starts at left ends at right) and it is the furthest ending till now, + and it has ending after a6 ii) a2 and a4 are equally distant from a3 so char(a2) == char(a4) iii) a0 and a6 are equally distant from a3 so char(a0) == char(a6) iv) a1 is corresponding equal character of a5 in palindrome with center a3 (remember @@ -98,11 +99,11 @@ def palindromic_string(input_string: str) -> str: a1 but this only holds if a0 and a6 are inside the limits of palindrome centered at a3 so finally .. -len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), r-a5) -where a3 lies from l to r and we have to keep updating that +len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), right-a5) +where a3 lies from left to right and we have to keep updating that -and if the a5 lies outside of l,r boundary we calculate length of palindrome with -bruteforce and update l,r. +and if the a5 lies outside of left,right boundary we calculate length of palindrome with +bruteforce and update left,right. it gives the linear time complexity just like z-function """ From 42593489d974feff169cf4f3455e3f209d7bdfcf Mon Sep 17 00:00:00 2001 From: Kelvin Date: Sat, 20 Apr 2024 16:20:37 +0530 Subject: [PATCH 059/260] Add doctests in all functions in basic_string.py (#11374) * Add doctests in all functions in basic_string.py * Revert back to original basic_string.py * Add doctest in basic_string.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update genetic_algorithm/basic_string.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- genetic_algorithm/basic_string.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 089c5c99a1ec..a906ce85a779 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -33,7 +33,12 @@ def evaluate(item: str, main_target: str) -> tuple[str, float]: def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string at a random point.""" + """ + Slice and combine two strings at a random point. + >>> random.seed(42) + >>> crossover("123456", "abcdef") + ('12345f', 'abcde6') + """ random_slice = random.randint(0, len(parent_1) - 1) child_1 = parent_1[:random_slice] + parent_2[random_slice:] child_2 = parent_2[:random_slice] + parent_1[random_slice:] @@ -41,7 +46,12 @@ def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: def mutate(child: str, genes: list[str]) -> str: - """Mutate a random gene of a child with another one from the list.""" + """ + Mutate a random gene of a child with another one from the list. + >>> random.seed(123) + >>> mutate("123456", list("ABCDEF")) + '12345A' + """ child_list = list(child) if random.uniform(0, 1) < MUTATION_PROBABILITY: child_list[random.randint(0, len(child)) - 1] = random.choice(genes) @@ -54,7 +64,22 @@ def select( population_score: list[tuple[str, float]], genes: list[str], ) -> list[str]: - """Select the second parent and generate new population""" + """ + Select the second parent and generate new population + + >>> random.seed(42) + >>> parent_1 = ("123456", 8.0) + >>> population_score = [("abcdef", 4.0), ("ghijkl", 5.0), ("mnopqr", 7.0)] + >>> genes = list("ABCDEF") + >>> child_n = int(min(parent_1[1] + 1, 10)) + >>> population = [] + >>> for _ in range(child_n): + ... parent_2 = population_score[random.randrange(len(population_score))][0] + ... child_1, child_2 = crossover(parent_1[0], parent_2) + ... population.extend((mutate(child_1, genes), mutate(child_2, genes))) + >>> len(population) == (int(parent_1[1]) + 1) * 2 + True + """ pop = [] # Generate more children proportionally to the fitness score. child_n = int(parent_1[1] * 100) + 1 From 7b88e15b1cc67c784872b0d16189e516474cf5a5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 20 Apr 2024 17:20:27 +0300 Subject: [PATCH 060/260] Enable ruff RUF007 rule (#11349) * Enable ruff RUF005 rule * Enable ruff RUF007 rule * Fix * Fix * Fix * Update sorts/bead_sort.py Co-authored-by: Christian Clauss * Update sorts/bead_sort.py * Revert "Update sorts/bead_sort.py" This reverts commit b10e5632e4479c2117c8b67113b5aa6545f127aa. * Revert "Update sorts/bead_sort.py" This reverts commit 2c1816bf102eeec5aa39cb2f1806afb64b672d14. * Update sorts/bead_sort.py --------- Co-authored-by: Christian Clauss --- data_structures/linked_list/skip_list.py | 3 ++- pyproject.toml | 1 - sorts/bead_sort.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 88d3e0daddf0..13e9a94a8698 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -5,6 +5,7 @@ from __future__ import annotations +from itertools import pairwise from random import random from typing import Generic, TypeVar @@ -389,7 +390,7 @@ def traverse_keys(node): def test_iter_always_yields_sorted_values(): def is_sorted(lst): - return all(next_item >= item for item, next_item in zip(lst, lst[1:])) + return all(next_item >= item for item, next_item in pairwise(lst)) skip_list = SkipList() for i in range(10): diff --git a/pyproject.toml b/pyproject.toml index 1ac70b2fab93..e46293a8d526 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? - "RUF007", # Prefer itertools.pairwise() over zip() when iterating over successive pairs "S101", # Use of `assert` detected -- DO NOT FIX "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME diff --git a/sorts/bead_sort.py b/sorts/bead_sort.py index e51173643d81..8ce0619fd573 100644 --- a/sorts/bead_sort.py +++ b/sorts/bead_sort.py @@ -31,7 +31,7 @@ def bead_sort(sequence: list) -> list: if any(not isinstance(x, int) or x < 0 for x in sequence): raise TypeError("Sequence must be list of non-negative integers") for _ in range(len(sequence)): - for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])): + for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])): # noqa: RUF007 if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower From 2702bf9400faece97a1ebc76d0f91b9cfe9658f6 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 21 Apr 2024 20:34:18 +0300 Subject: [PATCH 061/260] Enable ruff S113 rule (#11375) * Enable ruff S113 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/linear_regression.py | 3 ++- pyproject.toml | 1 - scripts/validate_solutions.py | 2 +- web_programming/co2_emission.py | 4 ++-- web_programming/covid_stats_via_xpath.py | 4 +++- web_programming/crawl_google_results.py | 2 +- web_programming/crawl_google_scholar_citation.py | 4 +++- web_programming/currency_converter.py | 2 +- web_programming/current_stock_price.py | 4 +++- web_programming/current_weather.py | 4 ++-- web_programming/daily_horoscope.py | 2 +- web_programming/download_images_from_google_query.py | 4 +++- web_programming/emails_from_url.py | 4 ++-- web_programming/fetch_anime_and_play.py | 8 +++++--- web_programming/fetch_bbc_news.py | 2 +- web_programming/fetch_github_info.py | 2 +- web_programming/fetch_jobs.py | 4 +++- web_programming/fetch_quotes.py | 4 ++-- web_programming/fetch_well_rx_price.py | 2 +- web_programming/get_amazon_product_data.py | 4 +++- web_programming/get_imdb_top_250_movies_csv.py | 2 +- web_programming/get_ip_geolocation.py | 2 +- web_programming/get_top_billionaires.py | 2 +- web_programming/get_top_hn_posts.py | 4 ++-- web_programming/giphy.py | 2 +- web_programming/instagram_crawler.py | 2 +- web_programming/instagram_pic.py | 4 ++-- web_programming/instagram_video.py | 4 ++-- web_programming/nasa_data.py | 6 +++--- web_programming/open_google_results.py | 1 + web_programming/random_anime_character.py | 6 ++++-- web_programming/recaptcha_verification.py | 4 +++- web_programming/reddit.py | 1 + web_programming/search_books_by_isbn.py | 2 +- web_programming/slack_message.py | 4 +++- web_programming/world_covid19_stats.py | 2 +- 36 files changed, 68 insertions(+), 46 deletions(-) diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 39bee5712c16..839a5366d1cc 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -19,7 +19,8 @@ def collect_dataset(): """ response = requests.get( "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/" - "master/Week1/ADRvsRating.csv" + "master/Week1/ADRvsRating.csv", + timeout=10, ) lines = response.text.splitlines() data = [] diff --git a/pyproject.toml b/pyproject.toml index e46293a8d526..ff22fba81c8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX - "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 68dcd68b3947..325c245e0d77 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -57,7 +57,7 @@ def added_solution_file_path() -> list[pathlib.Path]: "Accept": "application/vnd.github.v3+json", "Authorization": "token " + os.environ["GITHUB_TOKEN"], } - files = requests.get(get_files_url(), headers=headers).json() + files = requests.get(get_files_url(), headers=headers, timeout=10).json() for file in files: filepath = pathlib.Path.cwd().joinpath(file["filename"]) if ( diff --git a/web_programming/co2_emission.py b/web_programming/co2_emission.py index 88a426cb976d..19af70489d1d 100644 --- a/web_programming/co2_emission.py +++ b/web_programming/co2_emission.py @@ -11,13 +11,13 @@ # Emission in the last half hour def fetch_last_half_hour() -> str: - last_half_hour = requests.get(BASE_URL).json()["data"][0] + last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0] return last_half_hour["intensity"]["actual"] # Emissions in a specific date range def fetch_from_to(start, end) -> list: - return requests.get(f"{BASE_URL}/{start}/{end}").json()["data"] + return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"] if __name__ == "__main__": diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index 7011a02bffa8..c27a5d12bb3f 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -18,7 +18,9 @@ class CovidData(NamedTuple): def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData: xpath_str = '//div[@class = "maincounter-number"]/span/text()' - return CovidData(*html.fromstring(requests.get(url).content).xpath(xpath_str)) + return CovidData( + *html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str) + ) fmt = """Total COVID-19 cases in the world: {} diff --git a/web_programming/crawl_google_results.py b/web_programming/crawl_google_results.py index 1f5e6d31992b..cb75d450ff82 100644 --- a/web_programming/crawl_google_results.py +++ b/web_programming/crawl_google_results.py @@ -8,7 +8,7 @@ if __name__ == "__main__": print("Googling.....") url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) - res = requests.get(url, headers={"UserAgent": UserAgent().random}) + res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): diff --git a/web_programming/crawl_google_scholar_citation.py b/web_programming/crawl_google_scholar_citation.py index f92a3d139520..5f2ccad5f414 100644 --- a/web_programming/crawl_google_scholar_citation.py +++ b/web_programming/crawl_google_scholar_citation.py @@ -11,7 +11,9 @@ def get_citation(base_url: str, params: dict) -> str: """ Return the citation number. """ - soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser") + soup = BeautifulSoup( + requests.get(base_url, params=params, timeout=10).content, "html.parser" + ) div = soup.find("div", attrs={"class": "gs_ri"}) anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a") return anchors[2].get_text() diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 3bbcafa8f89b..9623504b89ea 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -176,7 +176,7 @@ def convert_currency( params = locals() # from is a reserved keyword params["from"] = params.pop("from_") - res = requests.get(URL_BASE, params=params).json() + res = requests.get(URL_BASE, params=params, timeout=10).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index 0c06354d8998..9567c05b0558 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -4,7 +4,9 @@ def stock_price(symbol: str = "AAPL") -> str: url = f"https://finance.yahoo.com/quote/%7Bsymbol%7D?p={symbol}" - yahoo_finance_source = requests.get(url, headers={"USER-AGENT": "Mozilla/5.0"}).text + yahoo_finance_source = requests.get( + url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 + ).text soup = BeautifulSoup(yahoo_finance_source, "html.parser") specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"}) diff --git a/web_programming/current_weather.py b/web_programming/current_weather.py index 3b6cd177cdfb..4a8fa5e3c845 100644 --- a/web_programming/current_weather.py +++ b/web_programming/current_weather.py @@ -20,13 +20,13 @@ def current_weather(location: str) -> list[dict]: if OPENWEATHERMAP_API_KEY: params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY} response_openweathermap = requests.get( - OPENWEATHERMAP_URL_BASE, params=params_openweathermap + OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10 ) weather_data.append({"OpenWeatherMap": response_openweathermap.json()}) if WEATHERSTACK_API_KEY: params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY} response_weatherstack = requests.get( - WEATHERSTACK_URL_BASE, params=params_weatherstack + WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10 ) weather_data.append({"Weatherstack": response_weatherstack.json()}) if not weather_data: diff --git a/web_programming/daily_horoscope.py b/web_programming/daily_horoscope.py index b0dd1cd65924..75e637d8e52c 100644 --- a/web_programming/daily_horoscope.py +++ b/web_programming/daily_horoscope.py @@ -7,7 +7,7 @@ def horoscope(zodiac_sign: int, day: str) -> str: "https://www.horoscope.com/us/horoscopes/general/" f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}" ) - soup = BeautifulSoup(requests.get(url).content, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser") return soup.find("div", class_="main-horoscope").p.text diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index 441347459f8e..235cd35763ef 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -39,7 +39,9 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) "ijn": "0", } - html = requests.get("https://www.google.com/search", params=params, headers=headers) + html = requests.get( + "https://www.google.com/search", params=params, headers=headers, timeout=10 + ) soup = BeautifulSoup(html.text, "html.parser") matched_images_data = "".join( re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script"))) diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 26c88e1b13a5..43fd78dcf5a4 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -77,7 +77,7 @@ def emails_from_url(/service/url: str = "https://github.com/") -> list[str]: try: # Open URL - r = requests.get(url) + r = requests.get(url, timeout=10) # pass the raw HTML to the parser to get links parser.feed(r.text) @@ -88,7 +88,7 @@ def emails_from_url(/service/url: str = "https://github.com/") -> list[str]: # open URL. # read = requests.get(link) try: - read = requests.get(link) + read = requests.get(link, timeout=10) # Get the valid email. emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text) # If not in list then append it. diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index 366807785e85..fd7c3a3a7381 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -28,7 +28,7 @@ def search_scraper(anime_name: str) -> list: search_url = f"{BASE_URL}/search/{anime_name}" response = requests.get( - search_url, headers={"UserAgent": UserAgent().chrome} + search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 ) # request the url. # Is the response ok? @@ -82,7 +82,9 @@ def search_anime_episode_list(episode_endpoint: str) -> list: request_url = f"{BASE_URL}{episode_endpoint}" - response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome}) + response = requests.get( + url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 + ) response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") @@ -132,7 +134,7 @@ def get_anime_episode(episode_endpoint: str) -> list: episode_page_url = f"{BASE_URL}{episode_endpoint}" response = requests.get( - url=episode_page_url, headers={"User-Agent": UserAgent().chrome} + url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10 ) response.raise_for_status() diff --git a/web_programming/fetch_bbc_news.py b/web_programming/fetch_bbc_news.py index 7f8bc57b69f5..e5cd864a9d83 100644 --- a/web_programming/fetch_bbc_news.py +++ b/web_programming/fetch_bbc_news.py @@ -7,7 +7,7 @@ def fetch_bbc_news(bbc_news_api_key: str) -> None: # fetching a list of articles in json format - bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key).json() + bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"], 1): print(f"{i}.) {article['title']}") diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py index 7a4985b68841..25d44245bb58 100644 --- a/web_programming/fetch_github_info.py +++ b/web_programming/fetch_github_info.py @@ -42,7 +42,7 @@ def fetch_github_info(auth_token: str) -> dict[Any, Any]: "Authorization": f"token {auth_token}", "Accept": "application/vnd.github.v3+json", } - return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json() + return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json() if __name__ == "__main__": # pragma: no cover diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index 49abd3c88eec..0d89bf45de57 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -13,7 +13,9 @@ def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]: - soup = BeautifulSoup(requests.get(url + location).content, "html.parser") + soup = BeautifulSoup( + requests.get(url + location, timeout=10).content, "html.parser" + ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}): job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip() diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py index d557e2d95e74..cf0add43f002 100644 --- a/web_programming/fetch_quotes.py +++ b/web_programming/fetch_quotes.py @@ -14,11 +14,11 @@ def quote_of_the_day() -> list: - return requests.get(API_ENDPOINT_URL + "/today").json() + return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json() def random_quotes() -> list: - return requests.get(API_ENDPOINT_URL + "/random").json() + return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json() if __name__ == "__main__": diff --git a/web_programming/fetch_well_rx_price.py b/web_programming/fetch_well_rx_price.py index ee51b9a5051b..93be2a9235d9 100644 --- a/web_programming/fetch_well_rx_price.py +++ b/web_programming/fetch_well_rx_price.py @@ -42,7 +42,7 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: return None request_url = BASE_URL.format(drug_name, zip_code) - response = get(request_url) + response = get(request_url, timeout=10) # Is the response ok? response.raise_for_status() diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py index c2f2ac5ab291..b98ff2c030af 100644 --- a/web_programming/get_amazon_product_data.py +++ b/web_programming/get_amazon_product_data.py @@ -24,7 +24,9 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame: ), "Accept-Language": "en-US, en;q=0.5", } - soup = BeautifulSoup(requests.get(url, headers=header).text, features="lxml") + soup = BeautifulSoup( + requests.get(url, headers=header, timeout=10).text, features="lxml" + ) # Initialize a Pandas dataframe with the column titles data_frame = DataFrame( columns=[ diff --git a/web_programming/get_imdb_top_250_movies_csv.py b/web_programming/get_imdb_top_250_movies_csv.py index e54b076ebd94..c914b29cb3b3 100644 --- a/web_programming/get_imdb_top_250_movies_csv.py +++ b/web_programming/get_imdb_top_250_movies_csv.py @@ -8,7 +8,7 @@ def get_imdb_top_250_movies(url: str = "") -> dict[str, float]: url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" - soup = BeautifulSoup(requests.get(url).text, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") titles = soup.find_all("td", attrs="titleColumn") ratings = soup.find_all("td", class_="ratingColumn imdbRating") return { diff --git a/web_programming/get_ip_geolocation.py b/web_programming/get_ip_geolocation.py index 62eaeafceb7e..574d287f0db1 100644 --- a/web_programming/get_ip_geolocation.py +++ b/web_programming/get_ip_geolocation.py @@ -8,7 +8,7 @@ def get_ip_geolocation(ip_address: str) -> str: url = f"https://ipinfo.io/%7Bip_address%7D/json" # Send a GET request to the API - response = requests.get(url) + response = requests.get(url, timeout=10) # Check if the HTTP request was successful response.raise_for_status() diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 703b635eef82..24828b6d787c 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -57,7 +57,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: Returns: List of top 10 realtime billionaires data. """ - response_json = requests.get(API_URL).json() + response_json = requests.get(API_URL, timeout=10).json() return [ { "Name": person["personName"], diff --git a/web_programming/get_top_hn_posts.py b/web_programming/get_top_hn_posts.py index fbb7c051a88e..f5d4f874c6c6 100644 --- a/web_programming/get_top_hn_posts.py +++ b/web_programming/get_top_hn_posts.py @@ -5,7 +5,7 @@ def get_hackernews_story(story_id: str) -> dict: url = f"https://hacker-news.firebaseio.com/v0/item/%7Bstory_id%7D.json?print=pretty" - return requests.get(url).json() + return requests.get(url, timeout=10).json() def hackernews_top_stories(max_stories: int = 10) -> list[dict]: @@ -13,7 +13,7 @@ def hackernews_top_stories(max_stories: int = 10) -> list[dict]: Get the top max_stories posts from HackerNews - https://news.ycombinator.com/ """ url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" - story_ids = requests.get(url).json()[:max_stories] + story_ids = requests.get(url, timeout=10).json()[:max_stories] return [get_hackernews_story(story_id) for story_id in story_ids] diff --git a/web_programming/giphy.py b/web_programming/giphy.py index a5c3f8f7493e..2bf3e3ea9c0b 100644 --- a/web_programming/giphy.py +++ b/web_programming/giphy.py @@ -11,7 +11,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list: """ formatted_query = "+".join(query.split()) url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" - gifs = requests.get(url).json()["data"] + gifs = requests.get(url, timeout=10).json()["data"] return [gif["url"] for gif in gifs] diff --git a/web_programming/instagram_crawler.py b/web_programming/instagram_crawler.py index 0816cd181051..df62735fb328 100644 --- a/web_programming/instagram_crawler.py +++ b/web_programming/instagram_crawler.py @@ -39,7 +39,7 @@ def get_json(self) -> dict: """ Return a dict of user information """ - html = requests.get(self.url, headers=headers).text + html = requests.get(self.url, headers=headers, timeout=10).text scripts = BeautifulSoup(html, "html.parser").find_all("script") try: return extract_user_profile(scripts[4]) diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py index 2d987c1766dc..292cacc16c04 100644 --- a/web_programming/instagram_pic.py +++ b/web_programming/instagram_pic.py @@ -15,7 +15,7 @@ def download_image(url: str) -> str: A message indicating the result of the operation. """ try: - response = requests.get(url) + response = requests.get(url, timeout=10) response.raise_for_status() except requests.exceptions.RequestException as e: return f"An error occurred during the HTTP request to {url}: {e!r}" @@ -30,7 +30,7 @@ def download_image(url: str) -> str: return f"Image URL not found in meta tag {image_meta_tag}." try: - image_data = requests.get(image_url).content + image_data = requests.get(image_url, timeout=10).content except requests.exceptions.RequestException as e: return f"An error occurred during the HTTP request to {image_url}: {e!r}" if not image_data: diff --git a/web_programming/instagram_video.py b/web_programming/instagram_video.py index 1f1b0e297034..a4cddce25138 100644 --- a/web_programming/instagram_video.py +++ b/web_programming/instagram_video.py @@ -5,8 +5,8 @@ def download_video(url: str) -> bytes: base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" - video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"] - return requests.get(video_url).content + video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"] + return requests.get(video_url, timeout=10).content if __name__ == "__main__": diff --git a/web_programming/nasa_data.py b/web_programming/nasa_data.py index 81125e0a4f05..33a6406c52a6 100644 --- a/web_programming/nasa_data.py +++ b/web_programming/nasa_data.py @@ -9,14 +9,14 @@ def get_apod_data(api_key: str) -> dict: Get your API Key from: https://api.nasa.gov/ """ url = "https://api.nasa.gov/planetary/apod" - return requests.get(url, params={"api_key": api_key}).json() + return requests.get(url, params={"api_key": api_key}, timeout=10).json() def save_apod(api_key: str, path: str = ".") -> dict: apod_data = get_apod_data(api_key) img_url = apod_data["url"] img_name = img_url.split("/")[-1] - response = requests.get(img_url, stream=True) + response = requests.get(img_url, stream=True, timeout=10) with open(f"{path}/{img_name}", "wb+") as img_file: shutil.copyfileobj(response.raw, img_file) @@ -29,7 +29,7 @@ def get_archive_data(query: str) -> dict: Get the data of a particular query from NASA archives """ url = "https://images-api.nasa.gov/search" - return requests.get(url, params={"q": query}).json() + return requests.get(url, params={"q": query}, timeout=10).json() if __name__ == "__main__": diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py index f61e3666dd7e..52dd37d7b91a 100644 --- a/web_programming/open_google_results.py +++ b/web_programming/open_google_results.py @@ -16,6 +16,7 @@ res = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, + timeout=10, ) try: diff --git a/web_programming/random_anime_character.py b/web_programming/random_anime_character.py index f15a9c05d9e5..aed932866258 100644 --- a/web_programming/random_anime_character.py +++ b/web_programming/random_anime_character.py @@ -12,7 +12,7 @@ def save_image(image_url: str, image_title: str) -> None: """ Saves the image of anime character """ - image = requests.get(image_url, headers=headers) + image = requests.get(image_url, headers=headers, timeout=10) with open(image_title, "wb") as file: file.write(image.content) @@ -21,7 +21,9 @@ def random_anime_character() -> tuple[str, str, str]: """ Returns the Title, Description, and Image Title of a random anime character . """ - soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser") + soup = BeautifulSoup( + requests.get(URL, headers=headers, timeout=10).text, "html.parser" + ) title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"] image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"] description = soup.find("p", id="description").get_text() diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index c9b691b28a8b..168862204fa9 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -56,7 +56,9 @@ def login_using_recaptcha(request): client_key = request.POST.get("g-recaptcha-response") # post recaptcha response to Google's recaptcha api - response = requests.post(url, data={"secret": secret_key, "response": client_key}) + response = requests.post( + url, data={"secret": secret_key, "response": client_key}, timeout=10 + ) # if the recaptcha api verified our keys if response.json().get("success", False): # authenticate the user diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 1c165ecc49ec..6cc1a6b62009 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -31,6 +31,7 @@ def get_subreddit_data( response = requests.get( f"https://reddit.com/r/%7Bsubreddit%7D/%7Bage%7D.json?limit={limit}", headers={"User-agent": "A random string"}, + timeout=10, ) if response.status_code == 429: raise requests.HTTPError(response=response) diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index 07429e9a9678..6b69018e6639 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -25,7 +25,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: if new_olid.count("/") != 1: msg = f"{olid} is not a valid Open Library olid" raise ValueError(msg) - return requests.get(f"https://openlibrary.org/%7Bnew_olid%7D.json").json() + return requests.get(f"https://openlibrary.org/%7Bnew_olid%7D.json", timeout=10).json() def summarize_book(ol_book_data: dict) -> dict: diff --git a/web_programming/slack_message.py b/web_programming/slack_message.py index 5e97d6b64c75..d4d5658898ac 100644 --- a/web_programming/slack_message.py +++ b/web_programming/slack_message.py @@ -5,7 +5,9 @@ def send_slack_message(message_body: str, slack_url: str) -> None: headers = {"Content-Type": "application/json"} - response = requests.post(slack_url, json={"text": message_body}, headers=headers) + response = requests.post( + slack_url, json={"text": message_body}, headers=headers, timeout=10 + ) if response.status_code != 200: msg = ( "Request to slack returned an error " diff --git a/web_programming/world_covid19_stats.py b/web_programming/world_covid19_stats.py index ca81abdc4ce9..4948d8cfd43c 100644 --- a/web_programming/world_covid19_stats.py +++ b/web_programming/world_covid19_stats.py @@ -13,7 +13,7 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") """ Return a dict of current worldwide COVID-19 statistics """ - soup = BeautifulSoup(requests.get(url).text, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") keys = soup.findAll("h1") values = soup.findAll("div", {"class": "maincounter-number"}) keys += soup.findAll("span", {"class": "panel-title"}) From dbfa21813ff6fe2d7b439dfd6daa60b14a64d24f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 21:43:19 +0200 Subject: [PATCH 062/260] [pre-commit.ci] pre-commit autoupdate (#11380) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.7 → v0.4.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.7...v0.4.1) - [github.com/tox-dev/pyproject-fmt: 1.7.0 → 1.8.0](https://github.com/tox-dev/pyproject-fmt/compare/1.7.0...1.8.0) * from keras import layers, models * Update lstm_prediction.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- computer_vision/cnn_classification.py | 2 +- machine_learning/lstm/lstm_prediction.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9472bcfa3e07..eedf6d939748 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.7 + rev: v0.4.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.7.0" + rev: "1.8.0" hooks: - id: pyproject-fmt diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index b813b71033f3..115333eba0d1 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -25,7 +25,7 @@ # Importing the Keras libraries and packages import tensorflow as tf -from tensorflow.keras import layers, models +from keras import layers, models if __name__ == "__main__": # Initialising the CNN diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index f0fd12c9de7f..81ac5f01d3d6 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -7,9 +7,9 @@ import numpy as np import pandas as pd +from keras.layers import LSTM, Dense +from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler -from tensorflow.keras.layers import LSTM, Dense -from tensorflow.keras.models import Sequential if __name__ == "__main__": """ From 79dc7c97acc492d657b5f2f50686cee5b0f64b30 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:45:24 +0300 Subject: [PATCH 063/260] Enable ruff RUF001 rule (#11378) * Enable ruff RUF001 rule * Fix * Fix --- fuzzy_logic/fuzzy_operations.py | 6 +++--- physics/basic_orbital_capture.py | 6 +++--- physics/malus_law.py | 2 +- pyproject.toml | 1 - 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py index e41cd2120049..c5e4cbde019d 100644 --- a/fuzzy_logic/fuzzy_operations.py +++ b/fuzzy_logic/fuzzy_operations.py @@ -57,7 +57,7 @@ class FuzzySet: # Union Operations >>> siya.union(sheru) - FuzzySet(name='Siya ∪ Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) + FuzzySet(name='Siya U Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) """ name: str @@ -147,10 +147,10 @@ def union(self, other) -> FuzzySet: FuzzySet: A new fuzzy set representing the union. >>> FuzzySet("a", 0.1, 0.2, 0.3).union(FuzzySet("b", 0.4, 0.5, 0.6)) - FuzzySet(name='a ∪ b', left_boundary=0.1, peak=0.6, right_boundary=0.35) + FuzzySet(name='a U b', left_boundary=0.1, peak=0.6, right_boundary=0.35) """ return FuzzySet( - f"{self.name} ∪ {other.name}", + f"{self.name} U {other.name}", min(self.left_boundary, other.left_boundary), max(self.right_boundary, other.right_boundary), (self.peak + other.peak) / 2, diff --git a/physics/basic_orbital_capture.py b/physics/basic_orbital_capture.py index eeb45e60240c..a5434b5cb7cb 100644 --- a/physics/basic_orbital_capture.py +++ b/physics/basic_orbital_capture.py @@ -4,14 +4,14 @@ """ These two functions will return the radii of impact for a target object -of mass M and radius R as well as it's effective cross sectional area σ(sigma). -That is to say any projectile with velocity v passing within σ, will impact the +of mass M and radius R as well as it's effective cross sectional area sigma. +That is to say any projectile with velocity v passing within sigma, will impact the target object with mass M. The derivation of which is given at the bottom of this file. The derivation shows that a projectile does not need to aim directly at the target body in order to hit it, as R_capture>R_target. Astronomers refer to the effective -cross section for capture as σ=π*R_capture**2. +cross section for capture as sigma=π*R_capture**2. This algorithm does not account for an N-body problem. diff --git a/physics/malus_law.py b/physics/malus_law.py index ae77d45cf614..374b3423f8ff 100644 --- a/physics/malus_law.py +++ b/physics/malus_law.py @@ -31,7 +31,7 @@ Real polarizers are also not perfect blockers of the polarization orthogonal to their polarization axis; the ratio of the transmission of the unwanted component to the wanted component is called the extinction ratio, and varies from around -1:500 for Polaroid to about 1:106 for Glan–Taylor prism polarizers. +1:500 for Polaroid to about 1:106 for Glan-Taylor prism polarizers. Reference : "https://en.wikipedia.org/wiki/Polarizer#Malus's_law_and_other_properties" """ diff --git a/pyproject.toml b/pyproject.toml index ff22fba81c8a..0185f4d7b987 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX From 4700297b3e332701eed1d0667f3afefc5b9b66be Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:51:47 +0300 Subject: [PATCH 064/260] Enable ruff RUF002 rule (#11377) * Enable ruff RUF002 rule * Fix --------- Co-authored-by: Christian Clauss --- backtracking/sudoku.py | 4 ++-- .../single_bit_manipulation_operations.py | 14 +++++++------- compression/burrows_wheeler.py | 2 +- compression/lempel_ziv.py | 4 ++-- compression/lempel_ziv_decompress.py | 4 ++-- data_structures/binary_tree/red_black_tree.py | 2 +- digital_image_processing/edge_detection/canny.py | 4 ++-- digital_image_processing/index_calculation.py | 2 +- dynamic_programming/combination_sum_iv.py | 2 +- electronics/coulombs_law.py | 4 ++-- hashes/fletcher16.py | 2 +- linear_algebra/lu_decomposition.py | 2 +- linear_algebra/src/schur_complement.py | 2 +- machine_learning/polynomial_regression.py | 4 ++-- maths/chudnovsky_algorithm.py | 2 +- maths/entropy.py | 4 ++-- maths/lucas_lehmer_primality_test.py | 4 ++-- maths/modular_division.py | 2 +- maths/numerical_analysis/bisection_2.py | 2 +- maths/numerical_analysis/nevilles_method.py | 2 +- maths/simultaneous_linear_equation_solver.py | 6 +++--- matrix/largest_square_area_in_matrix.py | 4 ++-- matrix/spiral_print.py | 2 +- neural_network/back_propagation_neural_network.py | 4 ++-- other/davis_putnam_logemann_loveland.py | 2 +- other/fischer_yates_shuffle.py | 2 +- physics/archimedes_principle_of_buoyant_force.py | 2 +- physics/center_of_mass.py | 8 ++++---- physics/centripetal_force.py | 2 +- physics/lorentz_transformation_four_vector.py | 14 +++++++------- physics/reynolds_number.py | 4 ++-- physics/terminal_velocity.py | 4 ++-- project_euler/problem_004/sol1.py | 2 +- project_euler/problem_004/sol2.py | 2 +- project_euler/problem_008/sol1.py | 2 +- project_euler/problem_008/sol2.py | 2 +- project_euler/problem_008/sol3.py | 2 +- project_euler/problem_015/sol1.py | 4 ++-- project_euler/problem_020/sol1.py | 4 ++-- project_euler/problem_020/sol2.py | 4 ++-- project_euler/problem_020/sol3.py | 4 ++-- project_euler/problem_020/sol4.py | 4 ++-- project_euler/problem_022/sol1.py | 2 +- project_euler/problem_022/sol2.py | 2 +- project_euler/problem_025/sol1.py | 2 +- project_euler/problem_025/sol2.py | 2 +- project_euler/problem_025/sol3.py | 2 +- project_euler/problem_027/sol1.py | 8 ++++---- project_euler/problem_031/sol1.py | 10 +++++----- project_euler/problem_031/sol2.py | 12 ++++++------ project_euler/problem_032/sol32.py | 2 +- project_euler/problem_038/sol1.py | 6 +++--- project_euler/problem_040/sol1.py | 2 +- project_euler/problem_044/sol1.py | 6 +++--- project_euler/problem_045/sol1.py | 4 ++-- project_euler/problem_046/sol1.py | 12 ++++++------ project_euler/problem_047/sol1.py | 10 +++++----- project_euler/problem_053/sol1.py | 2 +- project_euler/problem_097/sol1.py | 4 ++-- project_euler/problem_104/sol1.py | 2 +- project_euler/problem_120/sol1.py | 2 +- project_euler/problem_123/sol1.py | 2 +- project_euler/problem_135/sol1.py | 4 ++-- project_euler/problem_144/sol1.py | 4 ++-- project_euler/problem_174/sol1.py | 2 +- pyproject.toml | 1 + strings/jaro_winkler.py | 2 +- strings/manacher.py | 2 +- strings/prefix_function.py | 2 +- 69 files changed, 132 insertions(+), 131 deletions(-) diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 8f5459c76d45..cabeebb90433 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -1,7 +1,7 @@ """ -Given a partially filled 9×9 2D array, the objective is to fill a 9×9 +Given a partially filled 9x9 2D array, the objective is to fill a 9x9 square grid with digits numbered 1 to 9, so that every row, column, and -and each of the nine 3×3 sub-grids contains all of the digits. +and each of the nine 3x3 sub-grids contains all of the digits. This can be solved using Backtracking and is similar to n-queens. We check to see if a cell is safe or not and recursively call the diff --git a/bit_manipulation/single_bit_manipulation_operations.py b/bit_manipulation/single_bit_manipulation_operations.py index b43ff07b776f..fcbf033ccb24 100644 --- a/bit_manipulation/single_bit_manipulation_operations.py +++ b/bit_manipulation/single_bit_manipulation_operations.py @@ -8,8 +8,8 @@ def set_bit(number: int, position: int) -> int: Set the bit at position to 1. Details: perform bitwise or for given number and X. - Where X is a number with all the bits – zeroes and bit on given - position – one. + Where X is a number with all the bits - zeroes and bit on given + position - one. >>> set_bit(0b1101, 1) # 0b1111 15 @@ -26,8 +26,8 @@ def clear_bit(number: int, position: int) -> int: Set the bit at position to 0. Details: perform bitwise and for given number and X. - Where X is a number with all the bits – ones and bit on given - position – zero. + Where X is a number with all the bits - ones and bit on given + position - zero. >>> clear_bit(0b10010, 1) # 0b10000 16 @@ -42,8 +42,8 @@ def flip_bit(number: int, position: int) -> int: Flip the bit at position. Details: perform bitwise xor for given number and X. - Where X is a number with all the bits – zeroes and bit on given - position – one. + Where X is a number with all the bits - zeroes and bit on given + position - one. >>> flip_bit(0b101, 1) # 0b111 7 @@ -79,7 +79,7 @@ def get_bit(number: int, position: int) -> int: Get the bit at the given position Details: perform bitwise and for the given number and X, - Where X is a number with all the bits – zeroes and bit on given position – one. + Where X is a number with all the bits - zeroes and bit on given position - one. If the result is not equal to 0, then the bit on the given position is 1, else 0. >>> get_bit(0b1010, 0) diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index ce493a70c8f9..857d677c904e 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -1,7 +1,7 @@ """ https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform -The Burrows–Wheeler transform (BWT, also called block-sorting compression) +The Burrows-Wheeler transform (BWT, also called block-sorting compression) rearranges a character string into runs of similar characters. This is useful for compression, since it tends to be easy to compress a string that has runs of repeated characters by techniques such as move-to-front transform and diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index ac3f0c6cfc06..2751a0ebcdb6 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -1,5 +1,5 @@ """ -One of the several implementations of Lempel–Ziv–Welch compression algorithm +One of the several implementations of Lempel-Ziv-Welch compression algorithm https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ @@ -43,7 +43,7 @@ def add_key_to_lexicon( def compress_data(data_bits: str) -> str: """ - Compresses given data_bits using Lempel–Ziv–Welch compression algorithm + Compresses given data_bits using Lempel-Ziv-Welch compression algorithm and returns the result as a string """ lexicon = {"0": "0", "1": "1"} diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py index 0e49c83fb790..225e96236c2c 100644 --- a/compression/lempel_ziv_decompress.py +++ b/compression/lempel_ziv_decompress.py @@ -1,5 +1,5 @@ """ -One of the several implementations of Lempel–Ziv–Welch decompression algorithm +One of the several implementations of Lempel-Ziv-Welch decompression algorithm https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ @@ -26,7 +26,7 @@ def read_file_binary(file_path: str) -> str: def decompress_data(data_bits: str) -> str: """ - Decompresses given data_bits using Lempel–Ziv–Welch compression algorithm + Decompresses given data_bits using Lempel-Ziv-Welch compression algorithm and returns the result as a string """ lexicon = {"0": "0", "1": "1"} diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index e68d8d1e3735..a9ecf897c701 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -17,7 +17,7 @@ class RedBlackTree: and slower for reading in the average case, though, because they're both balanced binary search trees, both will get the same asymptotic performance. - To read more about them, https://en.wikipedia.org/wiki/Red–black_tree + To read more about them, https://en.wikipedia.org/wiki/Red-black_tree Unless otherwise specified, all asymptotic runtimes are specified in terms of the size of the tree. """ diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index f8cbeedb3874..944161c31cfc 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -74,9 +74,9 @@ def detect_high_low_threshold( image_shape, destination, threshold_low, threshold_high, weak, strong ): """ - High-Low threshold detection. If an edge pixel’s gradient value is higher + High-Low threshold detection. If an edge pixel's gradient value is higher than the high threshold value, it is marked as a strong edge pixel. If an - edge pixel’s gradient value is smaller than the high threshold value and + edge pixel's gradient value is smaller than the high threshold value and larger than the low threshold value, it is marked as a weak edge pixel. If an edge pixel's value is smaller than the low threshold value, it will be suppressed. diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 67830668b0da..988f8e72b9a8 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -182,7 +182,7 @@ def arv12(self): Atmospherically Resistant Vegetation Index 2 https://www.indexdatabase.de/db/i-single.php?id=396 :return: index - −0.18+1.17*(self.nir−self.red)/(self.nir+self.red) + -0.18+1.17*(self.nir-self.red)/(self.nir+self.red) """ return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py index 4526729b70b7..113c06a27a9e 100644 --- a/dynamic_programming/combination_sum_iv.py +++ b/dynamic_programming/combination_sum_iv.py @@ -18,7 +18,7 @@ The basic idea is to go over recursively to find the way such that the sum of chosen elements is “tar”. For every element, we have two choices 1. Include the element in our set of chosen elements. - 2. Don’t include the element in our set of chosen elements. + 2. Don't include the element in our set of chosen elements. """ diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py index 18c1a8179eb6..74bbea5ea8ec 100644 --- a/electronics/coulombs_law.py +++ b/electronics/coulombs_law.py @@ -20,8 +20,8 @@ def couloumbs_law( Reference ---------- - Coulomb (1785) "Premier mémoire sur l’électricité et le magnétisme," - Histoire de l’Académie Royale des Sciences, pp. 569–577. + Coulomb (1785) "Premier mémoire sur l'électricité et le magnétisme," + Histoire de l'Académie Royale des Sciences, pp. 569-577. Parameters ---------- diff --git a/hashes/fletcher16.py b/hashes/fletcher16.py index 7c23c98d72c5..add8e185bc06 100644 --- a/hashes/fletcher16.py +++ b/hashes/fletcher16.py @@ -1,6 +1,6 @@ """ The Fletcher checksum is an algorithm for computing a position-dependent -checksum devised by John G. Fletcher (1934–2012) at Lawrence Livermore Labs +checksum devised by John G. Fletcher (1934-2012) at Lawrence Livermore Labs in the late 1970s.[1] The objective of the Fletcher checksum was to provide error-detection properties approaching those of a cyclic redundancy check but with the lower computational effort associated diff --git a/linear_algebra/lu_decomposition.py b/linear_algebra/lu_decomposition.py index 1d364163d9a7..3620674835cd 100644 --- a/linear_algebra/lu_decomposition.py +++ b/linear_algebra/lu_decomposition.py @@ -1,5 +1,5 @@ """ -Lower–upper (LU) decomposition factors a matrix as a product of a lower +Lower-upper (LU) decomposition factors a matrix as a product of a lower triangular matrix and an upper triangular matrix. A square matrix has an LU decomposition under the following conditions: - If the matrix is invertible, then it has an LU decomposition if and only diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 1cc084043856..7c79bb70abfc 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -18,7 +18,7 @@ def schur_complement( the pseudo_inv argument. Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement - See also Convex Optimization – Boyd and Vandenberghe, A.5.5 + See also Convex Optimization - Boyd and Vandenberghe, A.5.5 >>> import numpy as np >>> a = np.array([[1, 2], [2, 1]]) >>> b = np.array([[0, 3], [3, 0]]) diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py index 5bafea96f41e..19f7dc994017 100644 --- a/machine_learning/polynomial_regression.py +++ b/machine_learning/polynomial_regression.py @@ -11,7 +11,7 @@ β = (XᵀX)⁻¹Xᵀy = X⁺y -where X is the design matrix, y is the response vector, and X⁺ denotes the Moore–Penrose +where X is the design matrix, y is the response vector, and X⁺ denotes the Moore-Penrose pseudoinverse of X. In the case of polynomial regression, the design matrix is |1 x₁ x₁² ⋯ x₁ᵐ| @@ -106,7 +106,7 @@ def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None: β = (XᵀX)⁻¹Xᵀy = X⁺y - where X⁺ denotes the Moore–Penrose pseudoinverse of the design matrix X. This + where X⁺ denotes the Moore-Penrose pseudoinverse of the design matrix X. This function computes X⁺ using singular value decomposition (SVD). References: diff --git a/maths/chudnovsky_algorithm.py b/maths/chudnovsky_algorithm.py index aaee7462822e..d122bf0756f7 100644 --- a/maths/chudnovsky_algorithm.py +++ b/maths/chudnovsky_algorithm.py @@ -5,7 +5,7 @@ def pi(precision: int) -> str: """ The Chudnovsky algorithm is a fast method for calculating the digits of PI, - based on Ramanujan’s PI formulae. + based on Ramanujan's PI formulae. https://en.wikipedia.org/wiki/Chudnovsky_algorithm diff --git a/maths/entropy.py b/maths/entropy.py index 39ec67bea038..b816f1d193f7 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -21,10 +21,10 @@ def calculate_prob(text: str) -> None: :return: Prints 1) Entropy of information based on 1 alphabet 2) Entropy of information based on couples of 2 alphabet - 3) print Entropy of H(X n∣Xn−1) + 3) print Entropy of H(X n|Xn-1) Text from random books. Also, random quotes. - >>> text = ("Behind Winston’s back the voice " + >>> text = ("Behind Winston's back the voice " ... "from the telescreen was still " ... "babbling and the overfulfilment") >>> calculate_prob(text) diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 292387414dee..af5c81133044 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -1,12 +1,12 @@ """ -In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne +In mathematics, the Lucas-Lehmer test (LLT) is a primality test for Mersenne numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test A Mersenne number is a number that is one less than a power of two. That is M_p = 2^p - 1 https://en.wikipedia.org/wiki/Mersenne_prime -The Lucas–Lehmer test is the primality test used by the +The Lucas-Lehmer test is the primality test used by the Great Internet Mersenne Prime Search (GIMPS) to locate large primes. """ diff --git a/maths/modular_division.py b/maths/modular_division.py index 260d5683705d..2f8f4479b27d 100644 --- a/maths/modular_division.py +++ b/maths/modular_division.py @@ -9,7 +9,7 @@ def modular_division(a: int, b: int, n: int) -> int: GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should - return an integer x such that 0≤x≤n−1, and b/a=x(modn) (that is, b=ax(modn)). + return an integer x such that 0≤x≤n-1, and b/a=x(modn) (that is, b=ax(modn)). Theorem: a has a multiplicative inverse modulo n iff gcd(a,n) = 1 diff --git a/maths/numerical_analysis/bisection_2.py b/maths/numerical_analysis/bisection_2.py index 45f26d8d88e4..68ba6577ce29 100644 --- a/maths/numerical_analysis/bisection_2.py +++ b/maths/numerical_analysis/bisection_2.py @@ -1,5 +1,5 @@ """ -Given a function on floating number f(x) and two floating numbers ‘a’ and ‘b’ such that +Given a function on floating number f(x) and two floating numbers `a` and `b` such that f(a) * f(b) < 0 and f(x) is continuous in [a, b]. Here f(x) represents algebraic or transcendental equation. Find root of function in interval [a, b] (Or find a value of x such that f(x) is 0) diff --git a/maths/numerical_analysis/nevilles_method.py b/maths/numerical_analysis/nevilles_method.py index 256b61f5f218..25c93ac6c531 100644 --- a/maths/numerical_analysis/nevilles_method.py +++ b/maths/numerical_analysis/nevilles_method.py @@ -1,7 +1,7 @@ """ Python program to show how to interpolate and evaluate a polynomial using Neville's method. -Neville’s method evaluates a polynomial that passes through a +Neville's method evaluates a polynomial that passes through a given set of x and y points for a particular x value (x0) using the Newton polynomial form. Reference: diff --git a/maths/simultaneous_linear_equation_solver.py b/maths/simultaneous_linear_equation_solver.py index 1287b2002d00..9685a33e82fe 100644 --- a/maths/simultaneous_linear_equation_solver.py +++ b/maths/simultaneous_linear_equation_solver.py @@ -2,10 +2,10 @@ https://en.wikipedia.org/wiki/Augmented_matrix This algorithm solves simultaneous linear equations of the form -λa + λb + λc + λd + ... = γ as [λ, λ, λ, λ, ..., γ] -Where λ & γ are individual coefficients, the no. of equations = no. of coefficients - 1 +λa + λb + λc + λd + ... = y as [λ, λ, λ, λ, ..., y] +Where λ & y are individual coefficients, the no. of equations = no. of coefficients - 1 -Note in order to work there must exist 1 equation where all instances of λ and γ != 0 +Note in order to work there must exist 1 equation where all instances of λ and y != 0 """ diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py index a93369c56bbd..16263fb798f1 100644 --- a/matrix/largest_square_area_in_matrix.py +++ b/matrix/largest_square_area_in_matrix.py @@ -31,7 +31,7 @@ Approach: We initialize another matrix (dp) with the same dimensions -as the original one initialized with all 0’s. +as the original one initialized with all 0's. dp_array(i,j) represents the side length of the maximum square whose bottom right corner is the cell with index (i,j) in the original matrix. @@ -39,7 +39,7 @@ Starting from index (0,0), for every 1 found in the original matrix, we update the value of the current element as -dp_array(i,j)=dp_array(dp(i−1,j),dp_array(i−1,j−1),dp_array(i,j−1)) + 1. +dp_array(i,j)=dp_array(dp(i-1,j),dp_array(i-1,j-1),dp_array(i,j-1)) + 1. """ diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index c16dde69cb56..88bde1db594d 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -89,7 +89,7 @@ def spiral_traversal(matrix: list[list]) -> list[int]: Algorithm: Step 1. first pop the 0 index list. (which is [1,2,3,4] and concatenate the output of [step 2]) - Step 2. Now perform matrix’s Transpose operation (Change rows to column + Step 2. Now perform matrix's Transpose operation (Change rows to column and vice versa) and reverse the resultant matrix. Step 3. Pass the output of [2nd step], to same recursive function till base case hits. diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 6131a13e945e..182f759c5fc7 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -2,10 +2,10 @@ """ -A Framework of Back Propagation Neural Network(BP) model +A Framework of Back Propagation Neural Network (BP) model Easy to use: - * add many layers as you want !!! + * add many layers as you want ! ! ! * clearly see how the loss decreasing Easy to expand: * more activation functions diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 3a76f3dfef08..0f3100b1bc2e 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """ -Davis–Putnam–Logemann–Loveland (DPLL) algorithm is a complete, backtracking-based +Davis-Putnam-Logemann-Loveland (DPLL) algorithm is a complete, backtracking-based search algorithm for deciding the satisfiability of propositional logic formulae in conjunctive normal form, i.e, for solving the Conjunctive Normal Form SATisfiability (CNF-SAT) problem. diff --git a/other/fischer_yates_shuffle.py b/other/fischer_yates_shuffle.py index 37e11479a4c9..5e90b10edd89 100644 --- a/other/fischer_yates_shuffle.py +++ b/other/fischer_yates_shuffle.py @@ -1,6 +1,6 @@ #!/usr/bin/python """ -The Fisher–Yates shuffle is an algorithm for generating a random permutation of a +The Fisher-Yates shuffle is an algorithm for generating a random permutation of a finite sequence. For more details visit wikipedia/Fischer-Yates-Shuffle. diff --git a/physics/archimedes_principle_of_buoyant_force.py b/physics/archimedes_principle_of_buoyant_force.py index 71043e0e1111..38f1a0a83832 100644 --- a/physics/archimedes_principle_of_buoyant_force.py +++ b/physics/archimedes_principle_of_buoyant_force.py @@ -3,7 +3,7 @@ fluid. This principle was discovered by the Greek mathematician Archimedes. Equation for calculating buoyant force: -Fb = ρ * V * g +Fb = p * V * g https://en.wikipedia.org/wiki/Archimedes%27_principle """ diff --git a/physics/center_of_mass.py b/physics/center_of_mass.py index 59c3b807f401..7a20e71be801 100644 --- a/physics/center_of_mass.py +++ b/physics/center_of_mass.py @@ -16,8 +16,8 @@ is the particle equivalent of a given object for the application of Newton's laws of motion. -In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are -located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center +In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are +located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center of mass corresponds to: R = (Σ(mi * ri) / Σ(mi)) @@ -36,8 +36,8 @@ def center_of_mass(particles: list[Particle]) -> Coord3D: Input Parameters ---------------- particles: list(Particle): - A list of particles where each particle is a tuple with it´s (x, y, z) position and - it´s mass. + A list of particles where each particle is a tuple with it's (x, y, z) position and + it's mass. Returns ------- diff --git a/physics/centripetal_force.py b/physics/centripetal_force.py index 04069d256468..a4c624582475 100644 --- a/physics/centripetal_force.py +++ b/physics/centripetal_force.py @@ -6,7 +6,7 @@ The unit of centripetal force is newton. The centripetal force is always directed perpendicular to the -direction of the object’s displacement. Using Newton’s second +direction of the object's displacement. Using Newton's second law of motion, it is found that the centripetal force of an object moving in a circular path always acts towards the centre of the circle. The Centripetal Force Formula is given as the product of mass (in kg) diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index f4fda4dff8cd..3b0fd83d45df 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -12,13 +12,13 @@ with respect to X, then the Lorentz transformation from X to X' is X' = BX, where - | γ -γβ 0 0| -B = |-γβ γ 0 0| + | y -γβ 0 0| +B = |-γβ y 0 0| | 0 0 1 0| | 0 0 0 1| is the matrix describing the Lorentz boost between X and X', -γ = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as +y = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as a fraction of c. Reference: https://en.wikipedia.org/wiki/Lorentz_transformation @@ -63,7 +63,7 @@ def beta(velocity: float) -> float: def gamma(velocity: float) -> float: """ - Calculate the Lorentz factor γ = 1 / √(1 - v²/c²) for a given velocity + Calculate the Lorentz factor y = 1 / √(1 - v²/c²) for a given velocity >>> gamma(4) 1.0000000000000002 >>> gamma(1e5) @@ -90,12 +90,12 @@ def transformation_matrix(velocity: float) -> np.ndarray: """ Calculate the Lorentz transformation matrix for movement in the x direction: - | γ -γβ 0 0| - |-γβ γ 0 0| + | y -γβ 0 0| + |-γβ y 0 0| | 0 0 1 0| | 0 0 0 1| - where γ is the Lorentz factor and β is the velocity as a fraction of c + where y is the Lorentz factor and β is the velocity as a fraction of c >>> transformation_matrix(29979245) array([[ 1.00503781, -0.10050378, 0. , 0. ], [-0.10050378, 1.00503781, 0. , 0. ], diff --git a/physics/reynolds_number.py b/physics/reynolds_number.py index dffe690f8822..c24a9e002855 100644 --- a/physics/reynolds_number.py +++ b/physics/reynolds_number.py @@ -8,10 +8,10 @@ viscous forces. R = Inertial Forces / Viscous Forces -R = (ρ * V * D)/μ +R = (p * V * D)/μ where : -ρ = Density of fluid (in Kg/m^3) +p = Density of fluid (in Kg/m^3) D = Diameter of pipe through which fluid flows (in m) V = Velocity of flow of the fluid (in m/s) μ = Viscosity of the fluid (in Ns/m^2) diff --git a/physics/terminal_velocity.py b/physics/terminal_velocity.py index cec54162e2b4..16714bd02671 100644 --- a/physics/terminal_velocity.py +++ b/physics/terminal_velocity.py @@ -8,13 +8,13 @@ object. The acceleration of the object is zero as the net force acting on the object is zero. -Vt = ((2 * m * g)/(ρ * A * Cd))^0.5 +Vt = ((2 * m * g)/(p * A * Cd))^0.5 where : Vt = Terminal velocity (in m/s) m = Mass of the falling object (in Kg) g = Acceleration due to gravity (value taken : imported from scipy) -ρ = Density of the fluid through which the object is falling (in Kg/m^3) +p = Density of the fluid through which the object is falling (in Kg/m^3) A = Projected area of the object (in m^2) Cd = Drag coefficient (dimensionless) diff --git a/project_euler/problem_004/sol1.py b/project_euler/problem_004/sol1.py index f237afdd942d..f80a3253e741 100644 --- a/project_euler/problem_004/sol1.py +++ b/project_euler/problem_004/sol1.py @@ -4,7 +4,7 @@ Largest palindrome product A palindromic number reads the same both ways. The largest palindrome made -from the product of two 2-digit numbers is 9009 = 91 × 99. +from the product of two 2-digit numbers is 9009 = 91 x 99. Find the largest palindrome made from the product of two 3-digit numbers. diff --git a/project_euler/problem_004/sol2.py b/project_euler/problem_004/sol2.py index abc880966d58..1fa75e7d0c83 100644 --- a/project_euler/problem_004/sol2.py +++ b/project_euler/problem_004/sol2.py @@ -4,7 +4,7 @@ Largest palindrome product A palindromic number reads the same both ways. The largest palindrome made -from the product of two 2-digit numbers is 9009 = 91 × 99. +from the product of two 2-digit numbers is 9009 = 91 x 99. Find the largest palindrome made from the product of two 3-digit numbers. diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index 69dd1b4736c1..adbac8d5ad1f 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -4,7 +4,7 @@ Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_008/sol2.py b/project_euler/problem_008/sol2.py index f83cb1db30b6..e48231e4023b 100644 --- a/project_euler/problem_008/sol2.py +++ b/project_euler/problem_008/sol2.py @@ -4,7 +4,7 @@ Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_008/sol3.py b/project_euler/problem_008/sol3.py index bf3bcb05b7e9..0d319b9684dd 100644 --- a/project_euler/problem_008/sol3.py +++ b/project_euler/problem_008/sol3.py @@ -4,7 +4,7 @@ Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_015/sol1.py b/project_euler/problem_015/sol1.py index fd9014a406f6..3c9dae1aed77 100644 --- a/project_euler/problem_015/sol1.py +++ b/project_euler/problem_015/sol1.py @@ -1,9 +1,9 @@ """ Problem 15: https://projecteuler.net/problem=15 -Starting in the top left corner of a 2×2 grid, and only being able to move to +Starting in the top left corner of a 2x2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner. -How many such routes are there through a 20×20 grid? +How many such routes are there through a 20x20 grid? """ from math import factorial diff --git a/project_euler/problem_020/sol1.py b/project_euler/problem_020/sol1.py index b472024e54c0..1439bdca38e6 100644 --- a/project_euler/problem_020/sol1.py +++ b/project_euler/problem_020/sol1.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol2.py b/project_euler/problem_020/sol2.py index a1d56ade7708..61684cd5ef6d 100644 --- a/project_euler/problem_020/sol2.py +++ b/project_euler/problem_020/sol2.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol3.py b/project_euler/problem_020/sol3.py index 1886e05463f4..8984def9c34e 100644 --- a/project_euler/problem_020/sol3.py +++ b/project_euler/problem_020/sol3.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol4.py b/project_euler/problem_020/sol4.py index b32ce309dfa6..511ac81e176b 100644 --- a/project_euler/problem_020/sol4.py +++ b/project_euler/problem_020/sol4.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_022/sol1.py b/project_euler/problem_022/sol1.py index b6386186e7df..c4af5dfa81df 100644 --- a/project_euler/problem_022/sol1.py +++ b/project_euler/problem_022/sol1.py @@ -10,7 +10,7 @@ For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would -obtain a score of 938 × 53 = 49714. +obtain a score of 938 x 53 = 49714. What is the total of all the name scores in the file? """ diff --git a/project_euler/problem_022/sol2.py b/project_euler/problem_022/sol2.py index f7092ea1cd12..9c22b6bba0cc 100644 --- a/project_euler/problem_022/sol2.py +++ b/project_euler/problem_022/sol2.py @@ -10,7 +10,7 @@ For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would -obtain a score of 938 × 53 = 49714. +obtain a score of 938 x 53 = 49714. What is the total of all the name scores in the file? """ diff --git a/project_euler/problem_025/sol1.py b/project_euler/problem_025/sol1.py index 803464b5d786..b3bbb56d20be 100644 --- a/project_euler/problem_025/sol1.py +++ b/project_euler/problem_025/sol1.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index 9e950b355f7a..a0f056023bc9 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_025/sol3.py b/project_euler/problem_025/sol3.py index 0b9f3a0c84ef..e33b159ac65c 100644 --- a/project_euler/problem_025/sol3.py +++ b/project_euler/problem_025/sol3.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_027/sol1.py b/project_euler/problem_027/sol1.py index c93e2b4fa251..48755ec19763 100644 --- a/project_euler/problem_027/sol1.py +++ b/project_euler/problem_027/sol1.py @@ -9,12 +9,12 @@ It turns out that the formula will produce 40 primes for the consecutive values n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible by 41, and certainly when n = 41, 412 + 41 + 41 is clearly divisible by 41. -The incredible formula n2 − 79n + 1601 was discovered, which produces 80 primes -for the consecutive values n = 0 to 79. The product of the coefficients, −79 and -1601, is −126479. +The incredible formula n2 - 79n + 1601 was discovered, which produces 80 primes +for the consecutive values n = 0 to 79. The product of the coefficients, -79 and +1601, is -126479. Considering quadratics of the form: n² + an + b, where |a| < 1000 and |b| < 1000 -where |n| is the modulus/absolute value of ne.g. |11| = 11 and |−4| = 4 +where |n| is the modulus/absolute value of ne.g. |11| = 11 and |-4| = 4 Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n = 0. diff --git a/project_euler/problem_031/sol1.py b/project_euler/problem_031/sol1.py index ba40cf383175..4c9c533eecb7 100644 --- a/project_euler/problem_031/sol1.py +++ b/project_euler/problem_031/sol1.py @@ -2,14 +2,14 @@ Coin sums Problem 31: https://projecteuler.net/problem=31 -In England the currency is made up of pound, £, and pence, p, and there are +In England the currency is made up of pound, f, and pence, p, and there are eight coins in general circulation: -1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p). -It is possible to make £2 in the following way: +1p, 2p, 5p, 10p, 20p, 50p, f1 (100p) and f2 (200p). +It is possible to make f2 in the following way: -1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p -How many different ways can £2 be made using any number of coins? +1xf1 + 1x50p + 2x20p + 1x5p + 1x2p + 3x1p +How many different ways can f2 be made using any number of coins? """ diff --git a/project_euler/problem_031/sol2.py b/project_euler/problem_031/sol2.py index f9e4dc384bff..574f8d4107a1 100644 --- a/project_euler/problem_031/sol2.py +++ b/project_euler/problem_031/sol2.py @@ -3,17 +3,17 @@ Coin sums -In England the currency is made up of pound, £, and pence, p, and there are +In England the currency is made up of pound, f, and pence, p, and there are eight coins in general circulation: -1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p). -It is possible to make £2 in the following way: +1p, 2p, 5p, 10p, 20p, 50p, f1 (100p) and f2 (200p). +It is possible to make f2 in the following way: -1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p -How many different ways can £2 be made using any number of coins? +1xf1 + 1x50p + 2x20p + 1x5p + 1x2p + 3x1p +How many different ways can f2 be made using any number of coins? Hint: - > There are 100 pence in a pound (£1 = 100p) + > There are 100 pence in a pound (f1 = 100p) > There are coins(in pence) are available: 1, 2, 5, 10, 20, 50, 100 and 200. > how many different ways you can combine these values to create 200 pence. diff --git a/project_euler/problem_032/sol32.py b/project_euler/problem_032/sol32.py index a402b5584061..c0ca2ce10791 100644 --- a/project_euler/problem_032/sol32.py +++ b/project_euler/problem_032/sol32.py @@ -3,7 +3,7 @@ digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1 through 5 pandigital. -The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing +The product 7254 is unusual, as the identity, 39 x 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital. Find the sum of all products whose multiplicand/multiplier/product identity can diff --git a/project_euler/problem_038/sol1.py b/project_euler/problem_038/sol1.py index 5bef273ea2a9..382892723b7d 100644 --- a/project_euler/problem_038/sol1.py +++ b/project_euler/problem_038/sol1.py @@ -3,9 +3,9 @@ Take the number 192 and multiply it by each of 1, 2, and 3: -192 × 1 = 192 -192 × 2 = 384 -192 × 3 = 576 +192 x 1 = 192 +192 x 2 = 384 +192 x 3 = 576 By concatenating each product we get the 1 to 9 pandigital, 192384576. We will call 192384576 the concatenated product of 192 and (1,2,3) diff --git a/project_euler/problem_040/sol1.py b/project_euler/problem_040/sol1.py index 69be377723a5..721bd063c28a 100644 --- a/project_euler/problem_040/sol1.py +++ b/project_euler/problem_040/sol1.py @@ -11,7 +11,7 @@ If dn represents the nth digit of the fractional part, find the value of the following expression. -d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000 +d1 x d10 x d100 x d1000 x d10000 x d100000 x d1000000 """ diff --git a/project_euler/problem_044/sol1.py b/project_euler/problem_044/sol1.py index 3b75b6a56a8e..2613563a4bf1 100644 --- a/project_euler/problem_044/sol1.py +++ b/project_euler/problem_044/sol1.py @@ -1,14 +1,14 @@ """ Problem 44: https://projecteuler.net/problem=44 -Pentagonal numbers are generated by the formula, Pn=n(3n−1)/2. The first ten +Pentagonal numbers are generated by the formula, Pn=n(3n-1)/2. The first ten pentagonal numbers are: 1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ... It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference, -70 − 22 = 48, is not pentagonal. +70 - 22 = 48, is not pentagonal. Find the pair of pentagonal numbers, Pj and Pk, for which their sum and difference -are pentagonal and D = |Pk − Pj| is minimised; what is the value of D? +are pentagonal and D = |Pk - Pj| is minimised; what is the value of D? """ diff --git a/project_euler/problem_045/sol1.py b/project_euler/problem_045/sol1.py index d921b2802c2d..8d016de6e542 100644 --- a/project_euler/problem_045/sol1.py +++ b/project_euler/problem_045/sol1.py @@ -3,8 +3,8 @@ Triangle, pentagonal, and hexagonal numbers are generated by the following formulae: Triangle T(n) = (n * (n + 1)) / 2 1, 3, 6, 10, 15, ... -Pentagonal P(n) = (n * (3 * n − 1)) / 2 1, 5, 12, 22, 35, ... -Hexagonal H(n) = n * (2 * n − 1) 1, 6, 15, 28, 45, ... +Pentagonal P(n) = (n * (3 * n - 1)) / 2 1, 5, 12, 22, 35, ... +Hexagonal H(n) = n * (2 * n - 1) 1, 6, 15, 28, 45, ... It can be verified that T(285) = P(165) = H(143) = 40755. Find the next triangle number that is also pentagonal and hexagonal. diff --git a/project_euler/problem_046/sol1.py b/project_euler/problem_046/sol1.py index 07dd9bbf84c8..f27f658e63e5 100644 --- a/project_euler/problem_046/sol1.py +++ b/project_euler/problem_046/sol1.py @@ -4,12 +4,12 @@ It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. -9 = 7 + 2 × 12 -15 = 7 + 2 × 22 -21 = 3 + 2 × 32 -25 = 7 + 2 × 32 -27 = 19 + 2 × 22 -33 = 31 + 2 × 12 +9 = 7 + 2 x 12 +15 = 7 + 2 x 22 +21 = 3 + 2 x 32 +25 = 7 + 2 x 32 +27 = 19 + 2 x 22 +33 = 31 + 2 x 12 It turns out that the conjecture was false. diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index 1287e0d9e107..c9c44a9832dd 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -5,14 +5,14 @@ The first two consecutive numbers to have two distinct prime factors are: -14 = 2 × 7 -15 = 3 × 5 +14 = 2 x 7 +15 = 3 x 5 The first three consecutive numbers to have three distinct prime factors are: -644 = 2² × 7 × 23 -645 = 3 × 5 × 43 -646 = 2 × 17 × 19. +644 = 2² x 7 x 23 +645 = 3 x 5 x 43 +646 = 2 x 17 x 19. Find the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers? diff --git a/project_euler/problem_053/sol1.py b/project_euler/problem_053/sol1.py index a32b73c545d6..192cbf25e50c 100644 --- a/project_euler/problem_053/sol1.py +++ b/project_euler/problem_053/sol1.py @@ -10,7 +10,7 @@ In general, -nCr = n!/(r!(n−r)!),where r ≤ n, n! = n×(n−1)×...×3×2×1, and 0! = 1. +nCr = n!/(r!(n-r)!),where r ≤ n, n! = nx(n-1)x...x3x2x1, and 0! = 1. It is not until n = 23, that a value exceeds one-million: 23C10 = 1144066. How many, not necessarily distinct, values of nCr, for 1 ≤ n ≤ 100, are greater diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index 2807e893ded0..a349f3a1dbc9 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -1,7 +1,7 @@ """ The first known prime found to exceed one million digits was discovered in 1999, -and is a Mersenne prime of the form 2**6972593 − 1; it contains exactly 2,098,960 -digits. Subsequently other Mersenne primes, of the form 2**p − 1, have been found +and is a Mersenne prime of the form 2**6972593 - 1; it contains exactly 2,098,960 +digits. Subsequently other Mersenne primes, of the form 2**p - 1, have been found which contain more digits. However, in 2004 there was found a massive non-Mersenne prime which contains 2,357,207 digits: (28433 * (2 ** 7830457 + 1)). diff --git a/project_euler/problem_104/sol1.py b/project_euler/problem_104/sol1.py index d84dbcfc9c65..a0267faa6a38 100644 --- a/project_euler/problem_104/sol1.py +++ b/project_euler/problem_104/sol1.py @@ -3,7 +3,7 @@ The Fibonacci sequence is defined by the recurrence relation: -Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. +Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. It turns out that F541, which contains 113 digits, is the first Fibonacci number for which the last nine digits are 1-9 pandigital (contain all the digits 1 to 9, but not necessarily in order). And F2749, which contains 575 digits, is the first diff --git a/project_euler/problem_120/sol1.py b/project_euler/problem_120/sol1.py index 0e6821214560..2f403972502f 100644 --- a/project_euler/problem_120/sol1.py +++ b/project_euler/problem_120/sol1.py @@ -3,7 +3,7 @@ Description: -Let r be the remainder when (a−1)^n + (a+1)^n is divided by a^2. +Let r be the remainder when (a-1)^n + (a+1)^n is divided by a^2. For example, if a = 7 and n = 3, then r = 42: 6^3 + 8^3 = 728 ≡ 42 mod 49. And as n varies, so too will r, but for a = 7 it turns out that r_max = 42. For 3 ≤ a ≤ 1000, find ∑ r_max. diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index 7239e13a51e9..3dd31a2e8505 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -4,7 +4,7 @@ Name: Prime square remainders Let pn be the nth prime: 2, 3, 5, 7, 11, ..., and -let r be the remainder when (pn−1)^n + (pn+1)^n is divided by pn^2. +let r be the remainder when (pn-1)^n + (pn+1)^n is divided by pn^2. For example, when n = 3, p3 = 5, and 43 + 63 = 280 ≡ 5 mod 25. The least value of n for which the remainder first exceeds 10^9 is 7037. diff --git a/project_euler/problem_135/sol1.py b/project_euler/problem_135/sol1.py index ac91fa4e2b9d..d57ace489191 100644 --- a/project_euler/problem_135/sol1.py +++ b/project_euler/problem_135/sol1.py @@ -3,9 +3,9 @@ Given the positive integers, x, y, and z, are consecutive terms of an arithmetic progression, the least value of the positive integer, n, for which the equation, -x2 − y2 − z2 = n, has exactly two solutions is n = 27: +x2 - y2 - z2 = n, has exactly two solutions is n = 27: -342 − 272 − 202 = 122 − 92 − 62 = 27 +342 - 272 - 202 = 122 - 92 - 62 = 27 It turns out that n = 1155 is the least value which has exactly ten solutions. diff --git a/project_euler/problem_144/sol1.py b/project_euler/problem_144/sol1.py index bc16bf985f41..9070455de79f 100644 --- a/project_euler/problem_144/sol1.py +++ b/project_euler/problem_144/sol1.py @@ -6,7 +6,7 @@ The specific white cell we will be considering is an ellipse with the equation 4x^2 + y^2 = 100 -The section corresponding to −0.01 ≤ x ≤ +0.01 at the top is missing, allowing the +The section corresponding to -0.01 ≤ x ≤ +0.01 at the top is missing, allowing the light to enter and exit through the hole.  The light beam in this problem starts at the point (0.0,10.1) just outside the white @@ -20,7 +20,7 @@ the laser beam and the wall of the white cell; the blue line shows the line tangent to the ellipse at the point of incidence of the first bounce. -The slope m of the tangent line at any point (x,y) of the given ellipse is: m = −4x/y +The slope m of the tangent line at any point (x,y) of the given ellipse is: m = -4x/y The normal line is perpendicular to this tangent line at the point of incidence. diff --git a/project_euler/problem_174/sol1.py b/project_euler/problem_174/sol1.py index 33c1b158adbb..9a75e8638880 100644 --- a/project_euler/problem_174/sol1.py +++ b/project_euler/problem_174/sol1.py @@ -14,7 +14,7 @@ Let N(n) be the number of t ≤ 1000000 such that t is type L(n); for example, N(15) = 832. -What is ∑ N(n) for 1 ≤ n ≤ 10? +What is sum N(n) for 1 ≤ n ≤ 10? """ from collections import defaultdict diff --git a/pyproject.toml b/pyproject.toml index 0185f4d7b987..ff22fba81c8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,7 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts + "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index c18f0d85d9f4..cae2068fabc1 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -3,7 +3,7 @@ def jaro_winkler(str1: str, str2: str) -> float: """ - Jaro–Winkler distance is a string metric measuring an edit distance between two + Jaro-Winkler distance is a string metric measuring an edit distance between two sequences. Output value is between 0.0 and 1.0. diff --git a/strings/manacher.py b/strings/manacher.py index fc8b01cd9c1c..af1b10cf81fb 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -5,7 +5,7 @@ def palindromic_string(input_string: str) -> str: >>> palindromic_string('ababa') 'ababa' - Manacher’s algorithm which finds Longest palindromic Substring in linear time. + Manacher's algorithm which finds Longest palindromic Substring in linear time. 1. first this convert input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. diff --git a/strings/prefix_function.py b/strings/prefix_function.py index 65bbe9100735..04987deef469 100644 --- a/strings/prefix_function.py +++ b/strings/prefix_function.py @@ -1,7 +1,7 @@ """ https://cp-algorithms.com/string/prefix-function.html -Prefix function Knuth–Morris–Pratt algorithm +Prefix function Knuth-Morris-Pratt algorithm Different algorithm than Knuth-Morris-Pratt pattern finding From d016fda51c08a604738e556a7ccb19e0f9c81dcb Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:56:14 +0300 Subject: [PATCH 065/260] Enable ruff RUF003 rule (#11376) * Enable ruff RUF003 rule * Update pyproject.toml --------- Co-authored-by: Christian Clauss --- dynamic_programming/fast_fibonacci.py | 2 +- graphs/ant_colony_optimization_algorithms.py | 4 ++-- machine_learning/polynomial_regression.py | 2 +- pyproject.toml | 3 --- strings/credit_card_validator.py | 2 +- 5 files changed, 5 insertions(+), 8 deletions(-) diff --git a/dynamic_programming/fast_fibonacci.py b/dynamic_programming/fast_fibonacci.py index 9f956ca2f979..d04a5ac8249b 100644 --- a/dynamic_programming/fast_fibonacci.py +++ b/dynamic_programming/fast_fibonacci.py @@ -26,7 +26,7 @@ def _fib(n: int) -> tuple[int, int]: if n == 0: # (F(0), F(1)) return (0, 1) - # F(2n) = F(n)[2F(n+1) − F(n)] + # F(2n) = F(n)[2F(n+1) - F(n)] # F(2n+1) = F(n+1)^2+F(n)^2 a, b = _fib(n // 2) c = a * (b * 2 - a) diff --git a/graphs/ant_colony_optimization_algorithms.py b/graphs/ant_colony_optimization_algorithms.py index 652ad6144297..13637da44874 100644 --- a/graphs/ant_colony_optimization_algorithms.py +++ b/graphs/ant_colony_optimization_algorithms.py @@ -33,7 +33,7 @@ def main( pheromone_evaporation: float, alpha: float, beta: float, - q: float, # Pheromone system parameters Q,which is a constant + q: float, # Pheromone system parameters Q, which is a constant ) -> tuple[list[int], float]: """ Ant colony algorithm main function @@ -117,7 +117,7 @@ def pheromone_update( cities: dict[int, list[int]], pheromone_evaporation: float, ants_route: list[list[int]], - q: float, # Pheromone system parameters Q,which is a constant + q: float, # Pheromone system parameters Q, which is a constant best_path: list[int], best_distance: float, ) -> tuple[list[list[float]], list[int], float]: diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py index 19f7dc994017..212f40bea197 100644 --- a/machine_learning/polynomial_regression.py +++ b/machine_learning/polynomial_regression.py @@ -146,7 +146,7 @@ def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None: "Design matrix is not full rank, can't compute coefficients" ) - # np.linalg.pinv() computes the Moore–Penrose pseudoinverse using SVD + # np.linalg.pinv() computes the Moore-Penrose pseudoinverse using SVD self.params = np.linalg.pinv(X) @ y_train def predict(self, data: np.ndarray) -> np.ndarray: diff --git a/pyproject.toml b/pyproject.toml index ff22fba81c8a..1134b773308e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,9 +10,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF001", # String contains ambiguous {}. Did you mean {}? - "RUF002", # Docstring contains ambiguous {}. Did you mean {}? - "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME diff --git a/strings/credit_card_validator.py b/strings/credit_card_validator.py index 78bf45740a63..b8da1c745124 100644 --- a/strings/credit_card_validator.py +++ b/strings/credit_card_validator.py @@ -36,7 +36,7 @@ def luhn_validation(credit_card_number: str) -> bool: digit = int(cc_number[i]) digit *= 2 # If doubling of a number results in a two digit number - # i.e greater than 9(e.g., 6 × 2 = 12), + # i.e greater than 9(e.g., 6 x 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: From 3925b8155bebd84eababfba0f5a12e5129cfaa44 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 24 Apr 2024 07:32:25 +0300 Subject: [PATCH 066/260] Fix ARG005 per file ignore (#11383) --- machine_learning/linear_discriminant_analysis.py | 2 +- pyproject.toml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 606e11f3698e..86f28aef671a 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -256,7 +256,7 @@ def valid_input( input_type: Callable[[object], num], # Usually float or int input_msg: str, err_msg: str, - condition: Callable[[num], bool] = lambda x: True, + condition: Callable[[num], bool] = lambda _: True, default: str | None = None, ) -> num: """ diff --git a/pyproject.toml b/pyproject.toml index 1134b773308e..37ebeeb9ce37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,6 @@ max-complexity = 17 # default: 10 "graphs/minimum_spanning_tree_prims.py" = ["SIM114"] "hashes/enigma_machine.py" = ["BLE001"] "machine_learning/decision_tree.py" = ["SIM114"] -"machine_learning/linear_discriminant_analysis.py" = ["ARG005"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] "matrix/sherman_morrison.py" = ["SIM103", "SIM114"] "other/l*u_cache.py" = ["RUF012"] From 2d6be5fbb0be2b738d2c246138db9ccda9b6a853 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 30 Apr 2024 07:40:26 +0300 Subject: [PATCH 067/260] Enable ruff UP031 rule (#11388) --- data_structures/arrays/sudoku_solver.py | 4 ++-- neural_network/input_data.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 5c1cff06f9d4..a8157a520c97 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -150,7 +150,7 @@ def time_solve(grid): display(grid_values(grid)) if values: display(values) - print("(%.5f seconds)\n" % t) + print(f"({t:.5f} seconds)\n") return (t, solved(values)) times, results = zip(*[time_solve(grid) for grid in grids]) @@ -217,4 +217,4 @@ def shuffled(seq): start = time.monotonic() solve(puzzle) t = time.monotonic() - start - print("Solved: %.5f sec" % t) + print(f"Solved: {t:.5f} sec") diff --git a/neural_network/input_data.py b/neural_network/input_data.py index d189e3f9e0d9..f90287fe3f5b 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -156,7 +156,8 @@ def __init__( self._rng = np.random.default_rng(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): - raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) + msg = f"Invalid image dtype {dtype!r}, expected uint8 or float32" + raise TypeError(msg) if fake_data: self._num_examples = 10000 self.one_hot = one_hot From a7e0b141d8eac30e8f9c4f01c3050e6cdb90f7d4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 06:58:03 +0200 Subject: [PATCH 068/260] [pre-commit.ci] pre-commit autoupdate (#11387) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/MarcoGorelli/auto-walrus: 0.3.3 → 0.3.4](https://github.com/MarcoGorelli/auto-walrus/compare/0.3.3...0.3.4) - [github.com/astral-sh/ruff-pre-commit: v0.4.1 → v0.4.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.1...v0.4.2) - [github.com/pre-commit/mirrors-mypy: v1.9.0 → v1.10.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.9.0...v1.10.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eedf6d939748..744efc55f41b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: 0.3.3 + rev: 0.3.4 hooks: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.1 + rev: v0.4.2 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.9.0 + rev: v1.10.0 hooks: - id: mypy args: From c026b1952f92836c58e63017f4c75e76c43448a1 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 1 May 2024 13:42:54 +0300 Subject: [PATCH 069/260] adding a matrix equalization algorithm (#11360) * adding a matrix equalization algorithm * Adding url for more details * Implementing suggestions --- matrix/matrix_equalization.py | 55 +++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 matrix/matrix_equalization.py diff --git a/matrix/matrix_equalization.py b/matrix/matrix_equalization.py new file mode 100644 index 000000000000..e7e76505cf63 --- /dev/null +++ b/matrix/matrix_equalization.py @@ -0,0 +1,55 @@ +from sys import maxsize + + +def array_equalization(vector: list[int], step_size: int) -> int: + """ + This algorithm equalizes all elements of the input vector + to a common value, by making the minimal number of + "updates" under the constraint of a step size (step_size). + + >>> array_equalization([1, 1, 6, 2, 4, 6, 5, 1, 7, 2, 2, 1, 7, 2, 2], 4) + 4 + >>> array_equalization([22, 81, 88, 71, 22, 81, 632, 81, 81, 22, 92], 2) + 5 + >>> array_equalization([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 5) + 0 + >>> array_equalization([22, 22, 22, 33, 33, 33], 2) + 2 + >>> array_equalization([1, 2, 3], 0) + Traceback (most recent call last): + ValueError: Step size must be positive and non-zero. + >>> array_equalization([1, 2, 3], -1) + Traceback (most recent call last): + ValueError: Step size must be positive and non-zero. + >>> array_equalization([1, 2, 3], 0.5) + Traceback (most recent call last): + ValueError: Step size must be an integer. + >>> array_equalization([1, 2, 3], maxsize) + 1 + """ + if step_size <= 0: + raise ValueError("Step size must be positive and non-zero.") + if not isinstance(step_size, int): + raise ValueError("Step size must be an integer.") + + unique_elements = set(vector) + min_updates = maxsize + + for element in unique_elements: + elem_index = 0 + updates = 0 + while elem_index < len(vector): + if vector[elem_index] != element: + updates += 1 + elem_index += step_size + else: + elem_index += 1 + min_updates = min(min_updates, updates) + + return min_updates + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 5131e3145dcec9e232c8e8a807ad387f4f9a3d38 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 1 May 2024 22:27:59 +0300 Subject: [PATCH 070/260] Fix some ARG002 per file ignores (#11382) * Fix some ARG002 per file ignores * Fix * updating DIRECTORY.md * Fix review issue * Fix review issue --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + audio_filters/show_response.py | 3 ++- data_structures/hashing/hash_table.py | 3 +++ data_structures/hashing/quadratic_probing.py | 2 +- pyproject.toml | 3 --- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index f6d6cb463faa..4a053a3f1b7f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -773,6 +773,7 @@ * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) * [Matrix Class](matrix/matrix_class.py) + * [Matrix Equalization](matrix/matrix_equalization.py) * [Matrix Multiplication Recursion](matrix/matrix_multiplication_recursion.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) diff --git a/audio_filters/show_response.py b/audio_filters/show_response.py index 097b8152b4e6..f9c9537c047c 100644 --- a/audio_filters/show_response.py +++ b/audio_filters/show_response.py @@ -1,5 +1,6 @@ from __future__ import annotations +from abc import abstractmethod from math import pi from typing import Protocol @@ -8,6 +9,7 @@ class FilterType(Protocol): + @abstractmethod def process(self, sample: float) -> float: """ Calculate y[n] @@ -15,7 +17,6 @@ def process(self, sample: float) -> float: >>> issubclass(FilterType, Protocol) True """ - return 0.0 def get_bounds( diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 7fe57068f6a3..40fcad9a3dab 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +from abc import abstractmethod + from .number_theory.prime_numbers import next_prime @@ -173,6 +175,7 @@ def _set_value(self, key, data): self.values[key] = data self._keys[key] = data + @abstractmethod def _collision_resolution(self, key, data=None): """ This method is a type of open addressing which is used for handling collision. diff --git a/data_structures/hashing/quadratic_probing.py b/data_structures/hashing/quadratic_probing.py index 2f3401ec8918..56d4926eee9b 100644 --- a/data_structures/hashing/quadratic_probing.py +++ b/data_structures/hashing/quadratic_probing.py @@ -11,7 +11,7 @@ class QuadraticProbing(HashTable): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - def _collision_resolution(self, key, data=None): + def _collision_resolution(self, key, data=None): # noqa: ARG002 """ Quadratic probing is an open addressing scheme used for resolving collisions in hash table. diff --git a/pyproject.toml b/pyproject.toml index 37ebeeb9ce37..4c512ca896b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,11 +76,8 @@ max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] -"audio_filters/show_response.py" = ["ARG002"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] "data_structures/binary_tree/treap.py" = ["SIM114"] -"data_structures/hashing/hash_table.py" = ["ARG002"] -"data_structures/hashing/quadratic_probing.py" = ["ARG002"] "data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] "data_structures/heap/max_heap.py" = ["SIM114"] "graphs/minimum_spanning_tree_prims.py" = ["SIM114"] From ea53051576a9c5e7398ca2ae6a0823ca54ac3947 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Fri, 3 May 2024 00:43:59 +0800 Subject: [PATCH 071/260] Use `spawn` start method in multiprocessing programs (#11391) * Use `spawn` start method in multiprocessing programs * Set `spawn` start method in doctest * Use `with` statement for locks * Pass multiprocessing context explicitly --- sorts/odd_even_transposition_parallel.py | 79 ++++++++++++++++-------- 1 file changed, 53 insertions(+), 26 deletions(-) diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 9d2bcdbd7576..5d4e09b211c0 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -11,11 +11,11 @@ synchronization could be used. """ -from multiprocessing import Lock, Pipe, Process +import multiprocessing as mp # lock used to ensure that two processes do not access a pipe at the same time # NOTE This breaks testing on build runner. May work better locally -# process_lock = Lock() +# process_lock = mp.Lock() """ The function run by the processes that sorts the list @@ -29,8 +29,17 @@ """ -def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): - process_lock = Lock() +def oe_process( + position, + value, + l_send, + r_send, + lr_cv, + rr_cv, + result_pipe, + multiprocessing_context, +): + process_lock = multiprocessing_context.Lock() # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to @@ -38,27 +47,23 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): for i in range(10): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor - process_lock.acquire() - r_send[1].send(value) - process_lock.release() + with process_lock: + r_send[1].send(value) # receive your right neighbor's value - process_lock.acquire() - temp = rr_cv[0].recv() - process_lock.release() + with process_lock: + temp = rr_cv[0].recv() # take the lower value since you are on the left value = min(value, temp) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor - process_lock.acquire() - l_send[1].send(value) - process_lock.release() + with process_lock: + l_send[1].send(value) # receive your left neighbor's value - process_lock.acquire() - temp = lr_cv[0].recv() - process_lock.release() + with process_lock: + temp = lr_cv[0].recv() # take the higher value since you are on the right value = max(value, temp) @@ -94,39 +99,60 @@ def odd_even_transposition(arr): >>> odd_even_transposition(unsorted_list) == sorted(unsorted_list + [1]) False """ + # spawn method is considered safer than fork + multiprocessing_context = mp.get_context("spawn") + process_array_ = [] result_pipe = [] # initialize the list of pipes where the values will be retrieved for _ in arr: - result_pipe.append(Pipe()) + result_pipe.append(multiprocessing_context.Pipe()) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop - temp_rs = Pipe() - temp_rr = Pipe() + temp_rs = multiprocessing_context.Pipe() + temp_rr = multiprocessing_context.Pipe() process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, - args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), + args=( + 0, + arr[0], + None, + temp_rs, + None, + temp_rr, + result_pipe[0], + multiprocessing_context, + ), ) ) temp_lr = temp_rs temp_ls = temp_rr for i in range(1, len(arr) - 1): - temp_rs = Pipe() - temp_rr = Pipe() + temp_rs = multiprocessing_context.Pipe() + temp_rr = multiprocessing_context.Pipe() process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, - args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), + args=( + i, + arr[i], + temp_ls, + temp_rs, + temp_lr, + temp_rr, + result_pipe[i], + multiprocessing_context, + ), ) ) temp_lr = temp_rs temp_ls = temp_rr process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, args=( len(arr) - 1, @@ -136,6 +162,7 @@ def odd_even_transposition(arr): temp_lr, None, result_pipe[len(arr) - 1], + multiprocessing_context, ), ) ) From 1868c0b6375188a9034478a2711e40c343d00c2e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 May 2024 21:38:58 +0200 Subject: [PATCH 072/260] [pre-commit.ci] pre-commit autoupdate (#11394) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.2 → v0.4.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.2...v0.4.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 744efc55f41b..210b7494036e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.2 + rev: v0.4.3 hooks: - id: ruff - id: ruff-format From c599f6c9107a1b09c08ddce17053d7b5d0895a83 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 10 May 2024 22:59:53 +0300 Subject: [PATCH 073/260] Fix some SIM114 per file ignores (#11395) * updating DIRECTORY.md * Fix some SIM114 per file ignores * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix review issue --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/binary_tree/treap.py | 4 +--- data_structures/heap/max_heap.py | 2 +- graphs/minimum_spanning_tree_prims.py | 2 +- machine_learning/decision_tree.py | 2 +- matrix/sherman_morrison.py | 2 +- pyproject.toml | 6 +----- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index e7ddf931b83a..3114c6fa1c26 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -39,9 +39,7 @@ def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]: Left tree contains all values less than split value. Right tree contains all values greater or equal, than split value """ - if root is None: # None tree is split into 2 Nones - return None, None - elif root.value is None: + if root is None or root.value is None: # None tree is split into 2 Nones return None, None elif value < root.value: """ diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py index 5a9f9cf88433..589f2595a8da 100644 --- a/data_structures/heap/max_heap.py +++ b/data_structures/heap/max_heap.py @@ -38,7 +38,7 @@ def insert(self, value: int) -> None: def __swap_down(self, i: int) -> None: """Swap the element down""" while self.__size >= 2 * i: - if 2 * i + 1 > self.__size: + if 2 * i + 1 > self.__size: # noqa: SIM114 bigger_child = 2 * i elif self.__heap[2 * i] > self.__heap[2 * i + 1]: bigger_child = 2 * i diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 90c9f4c91e86..d0b45d7ef139 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -16,7 +16,7 @@ def top_to_bottom(self, heap, start, size, positions): if start > size // 2 - 1: return else: - if 2 * start + 2 >= size: + if 2 * start + 2 >= size: # noqa: SIM114 smallest_child = 2 * start + 1 elif heap[2 * start + 1] < heap[2 * start + 2]: smallest_child = 2 * start + 1 diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index e48905eeac6a..d0bd6ab0b555 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -105,7 +105,7 @@ def train(self, x, y): the predictor """ for i in range(len(x)): - if len(x[:i]) < self.min_leaf_size: + if len(x[:i]) < self.min_leaf_size: # noqa: SIM114 continue elif len(x[i:]) < self.min_leaf_size: continue diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 7f10ae706e85..e2a09c1d0070 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -65,7 +65,7 @@ def validate_indices(self, loc: tuple[int, int]) -> bool: >>> a.validate_indices((0, 0)) True """ - if not (isinstance(loc, (list, tuple)) and len(loc) == 2): + if not (isinstance(loc, (list, tuple)) and len(loc) == 2): # noqa: SIM114 return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False diff --git a/pyproject.toml b/pyproject.toml index 4c512ca896b4..c07bc9c48e51 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,14 +77,10 @@ max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] -"data_structures/binary_tree/treap.py" = ["SIM114"] "data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] -"data_structures/heap/max_heap.py" = ["SIM114"] -"graphs/minimum_spanning_tree_prims.py" = ["SIM114"] "hashes/enigma_machine.py" = ["BLE001"] -"machine_learning/decision_tree.py" = ["SIM114"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] -"matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"matrix/sherman_morrison.py" = ["SIM103"] "other/l*u_cache.py" = ["RUF012"] "physics/newtons_second_law_of_motion.py" = ["BLE001"] "project_euler/problem_099/sol1.py" = ["SIM115"] From 1f368da06d361e3d1415a2ec7d8857068b746586 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 13:38:55 +0200 Subject: [PATCH 074/260] [pre-commit.ci] pre-commit autoupdate (#11402) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.3 → v0.4.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.3...v0.4.4) - [github.com/tox-dev/pyproject-fmt: 1.8.0 → 2.0.4](https://github.com/tox-dev/pyproject-fmt/compare/1.8.0...2.0.4) - [github.com/abravalheri/validate-pyproject: v0.16 → v0.17](https://github.com/abravalheri/validate-pyproject/compare/v0.16...v0.17) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +- pyproject.toml | 184 +++++++++++++++++++++++----------------- 2 files changed, 107 insertions(+), 83 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 210b7494036e..521769096369 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.3 + rev: v0.4.4 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.8.0" + rev: "2.0.4" hooks: - id: pyproject-fmt @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.16 + rev: v0.17 hooks: - id: validate-pyproject diff --git a/pyproject.toml b/pyproject.toml index c07bc9c48e51..89ed22bc6ab1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,61 +1,61 @@ [tool.ruff] -lint.ignore = [ # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] -lint.select = [ # https://beta.ruff.rs/docs/rules - "A", # flake8-builtins - "ARG", # flake8-unused-arguments - "ASYNC", # flake8-async - "B", # flake8-bugbear - "BLE", # flake8-blind-except - "C4", # flake8-comprehensions - "C90", # McCabe cyclomatic complexity - "DJ", # flake8-django - "DTZ", # flake8-datetimez - "E", # pycodestyle - "EM", # flake8-errmsg - "EXE", # flake8-executable - "F", # Pyflakes - "FA", # flake8-future-annotations - "FLY", # flynt - "G", # flake8-logging-format - "I", # isort - "ICN", # flake8-import-conventions - "INP", # flake8-no-pep420 - "INT", # flake8-gettext - "ISC", # flake8-implicit-str-concat - "N", # pep8-naming - "NPY", # NumPy-specific rules - "PD", # pandas-vet - "PGH", # pygrep-hooks - "PIE", # flake8-pie - "PL", # Pylint - "PT", # flake8-pytest-style - "PYI", # flake8-pyi - "RSE", # flake8-raise - "RUF", # Ruff-specific rules - "S", # flake8-bandit - "SIM", # flake8-simplify - "SLF", # flake8-self - "T10", # flake8-debugger - "TD", # flake8-todos - "TID", # flake8-tidy-imports - "UP", # pyupgrade - "W", # pycodestyle - "YTT", # flake8-2020 +lint.ignore = [ # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] +lint.select = [ # https://beta.ruff.rs/docs/rules + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "ASYNC", # flake8-async + "B", # flake8-bugbear + "BLE", # flake8-blind-except + "C4", # flake8-comprehensions + "C90", # McCabe cyclomatic complexity + "DJ", # flake8-django + "DTZ", # flake8-datetimez + "E", # pycodestyle + "EM", # flake8-errmsg + "EXE", # flake8-executable + "F", # Pyflakes + "FA", # flake8-future-annotations + "FLY", # flynt + "G", # flake8-logging-format + "I", # isort + "ICN", # flake8-import-conventions + "INP", # flake8-no-pep420 + "INT", # flake8-gettext + "ISC", # flake8-implicit-str-concat + "N", # pep8-naming + "NPY", # NumPy-specific rules + "PD", # pandas-vet + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # Pylint + "PT", # flake8-pytest-style + "PYI", # flake8-pyi + "RSE", # flake8-raise + "RUF", # Ruff-specific rules + "S", # flake8-bandit + "SIM", # flake8-simplify + "SLF", # flake8-self + "T10", # flake8-debugger + "TD", # flake8-todos + "TID", # flake8-tidy-imports + "UP", # pyupgrade + "W", # pycodestyle + "YTT", # flake8-2020 # "ANN", # flake8-annotations # FIX ME? # "COM", # flake8-commas # "D", # pydocstyle -- FIX ME? @@ -71,27 +71,51 @@ lint.select = [ # https://beta.ruff.rs/docs/rules output-format = "full" target-version = "py312" -[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 17 # default: 10 +[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE +max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] -"arithmetic_analysis/newton_raphson.py" = ["PGH001"] -"data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] -"data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] -"hashes/enigma_machine.py" = ["BLE001"] -"machine_learning/sequential_minimum_optimization.py" = ["SIM115"] -"matrix/sherman_morrison.py" = ["SIM103"] -"other/l*u_cache.py" = ["RUF012"] -"physics/newtons_second_law_of_motion.py" = ["BLE001"] -"project_euler/problem_099/sol1.py" = ["SIM115"] -"sorts/external_sort.py" = ["SIM115"] +"arithmetic_analysis/newton_raphson.py" = [ + "PGH001", +] +"data_structures/binary_tree/binary_search_tree_recursive.py" = [ + "BLE001", +] +"data_structures/hashing/tests/test_hash_map.py" = [ + "BLE001", +] +"hashes/enigma_machine.py" = [ + "BLE001", +] +"machine_learning/sequential_minimum_optimization.py" = [ + "SIM115", +] +"matrix/sherman_morrison.py" = [ + "SIM103", +] +"other/l*u_cache.py" = [ + "RUF012", +] +"physics/newtons_second_law_of_motion.py" = [ + "BLE001", +] +"project_euler/problem_099/sol1.py" = [ + "SIM115", +] +"sorts/external_sort.py" = [ + "SIM115", +] -[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES -allow-magic-value-types = ["float", "int", "str"] -max-args = 10 # default: 5 -max-branches = 20 # default: 12 -max-returns = 8 # default: 6 -max-statements = 88 # default: 50 +[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES +allow-magic-value-types = [ + "float", + "int", + "str", +] +max-args = 10 # default: 5 +max-branches = 20 # default: 12 +max-returns = 8 # default: 6 +max-statements = 88 # default: 50 [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" @@ -99,17 +123,17 @@ skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_n [tool.pytest.ini_options] markers = [ - "mat_ops: mark a test as utilizing matrix operations.", + "mat_ops: mark a test as utilizing matrix operations.", ] addopts = [ - "--durations=10", - "--doctest-modules", - "--showlocals", + "--durations=10", + "--doctest-modules", + "--showlocals", ] [tool.coverage.report] omit = [ ".env/*", - "project_euler/*" + "project_euler/*", ] sort = "Cover" From 0139143abb286027bd3954f3862aab4558642019 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 22:44:57 +0200 Subject: [PATCH 075/260] [pre-commit.ci] pre-commit autoupdate (#11408) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 2.0.4 → 2.1.1](https://github.com/tox-dev/pyproject-fmt/compare/2.0.4...2.1.1) - [github.com/abravalheri/validate-pyproject: v0.17 → v0.18](https://github.com/abravalheri/validate-pyproject/compare/v0.17...v0.18) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +-- pyproject.toml | 79 ++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 521769096369..b63457ca85e3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.0.4" + rev: "2.1.1" hooks: - id: pyproject-fmt @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.17 + rev: v0.18 hooks: - id: validate-pyproject diff --git a/pyproject.toml b/pyproject.toml index 89ed22bc6ab1..5b8ce4e72dfd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,9 @@ [tool.ruff] -lint.ignore = [ # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] -lint.select = [ # https://beta.ruff.rs/docs/rules +target-version = "py312" + +output-format = "full" +lint.select = [ + # https://beta.ruff.rs/docs/rules "A", # flake8-builtins "ARG", # flake8-unused-arguments "ASYNC", # flake8-async @@ -68,54 +56,63 @@ lint.select = [ # https://beta.ruff.rs/docs/rules # "TCH", # flake8-type-checking # "TRY", # tryceratops ] -output-format = "full" -target-version = "py312" - -[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 17 # default: 10 - -[tool.ruff.lint.per-file-ignores] -"arithmetic_analysis/newton_raphson.py" = [ +lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ "PGH001", ] -"data_structures/binary_tree/binary_search_tree_recursive.py" = [ +lint.per-file-ignores."data_structures/binary_tree/binary_search_tree_recursive.py" = [ "BLE001", ] -"data_structures/hashing/tests/test_hash_map.py" = [ +lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ "BLE001", ] -"hashes/enigma_machine.py" = [ +lint.per-file-ignores."hashes/enigma_machine.py" = [ "BLE001", ] -"machine_learning/sequential_minimum_optimization.py" = [ +lint.per-file-ignores."machine_learning/sequential_minimum_optimization.py" = [ "SIM115", ] -"matrix/sherman_morrison.py" = [ +lint.per-file-ignores."matrix/sherman_morrison.py" = [ "SIM103", ] -"other/l*u_cache.py" = [ +lint.per-file-ignores."other/l*u_cache.py" = [ "RUF012", ] -"physics/newtons_second_law_of_motion.py" = [ +lint.per-file-ignores."physics/newtons_second_law_of_motion.py" = [ "BLE001", ] -"project_euler/problem_099/sol1.py" = [ +lint.per-file-ignores."project_euler/problem_099/sol1.py" = [ "SIM115", ] -"sorts/external_sort.py" = [ +lint.per-file-ignores."sorts/external_sort.py" = [ "SIM115", ] - -[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES -allow-magic-value-types = [ +lint.mccabe.max-complexity = 17 # default: 10 +lint.pylint.allow-magic-value-types = [ "float", "int", "str", ] -max-args = 10 # default: 5 -max-branches = 20 # default: 12 -max-returns = 8 # default: 6 -max-statements = 88 # default: 50 +lint.pylint.max-args = 10 # default: 5 +lint.pylint.max-branches = 20 # default: 12 +lint.pylint.max-returns = 8 # default: 6 +lint.pylint.max-statements = 88 # default: 50 +lint.ignore = [ + # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" From 82aa909db7736d8022532bee4dc381072d8c5b1f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 May 2024 21:56:48 -0400 Subject: [PATCH 076/260] [pre-commit.ci] pre-commit autoupdate (#11417) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.4 → v0.4.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.4...v0.4.5) - [github.com/codespell-project/codespell: v2.2.6 → v2.3.0](https://github.com/codespell-project/codespell/compare/v2.2.6...v2.3.0) - [github.com/tox-dev/pyproject-fmt: 2.1.1 → 2.1.3](https://github.com/tox-dev/pyproject-fmt/compare/2.1.1...2.1.3) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * iterable * at most --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 6 +++--- graphs/dijkstra_algorithm.py | 2 +- project_euler/problem_047/sol1.py | 2 +- pyproject.toml | 35 ++++++++++++++++--------------- 4 files changed, 23 insertions(+), 22 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b63457ca85e3..43bf547dec6e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,20 +16,20 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.4 + rev: v0.4.5 hooks: - id: ruff - id: ruff-format - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.3.0 hooks: - id: codespell additional_dependencies: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.1" + rev: "2.1.3" hooks: - id: pyproject-fmt diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 2efa2cb634ff..51412b790bac 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -215,7 +215,7 @@ def decrease_key(self, tup, new_d): [(5, 'A'), (15, 'B')] """ idx = self.pos[tup[1]] - # assuming the new_d is atmost old_d + # assuming the new_d is at most old_d self.array[idx] = (new_d, tup[1]) while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]: self.swap(idx, self.par(idx)) diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index c9c44a9832dd..4ecd4f4b44c1 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -58,7 +58,7 @@ def upf_len(num: int) -> int: def equality(iterable: list) -> bool: """ - Check equality of ALL elements in an interable. + Check equality of ALL elements in an iterable >>> equality([1, 2, 3, 4]) False >>> equality([2, 2, 2, 2]) diff --git a/pyproject.toml b/pyproject.toml index 5b8ce4e72dfd..429f4fab9a52 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,24 @@ lint.select = [ # "TCH", # flake8-type-checking # "TRY", # tryceratops ] +lint.ignore = [ + # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] + lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ "PGH001", ] @@ -96,23 +114,6 @@ lint.pylint.max-args = 10 # default: 5 lint.pylint.max-branches = 20 # default: 12 lint.pylint.max-returns = 8 # default: 6 lint.pylint.max-statements = 88 # default: 50 -lint.ignore = [ - # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" From b8afb214f8c8d185dc42dafb9676becf512ca7fa Mon Sep 17 00:00:00 2001 From: Marco-campione-github <80974790+Marco-campione-github@users.noreply.github.com> Date: Fri, 31 May 2024 10:11:09 +0200 Subject: [PATCH 077/260] Changed the N to self.N in show_data in segment_tree.py (#11276) --- data_structures/binary_tree/segment_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index c7069b3f6069..084fcf84955d 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -98,7 +98,7 @@ def query_recursive(self, idx, left, right, a, b): def show_data(self): show_list = [] - for i in range(1, N + 1): + for i in range(1, self.N + 1): show_list += [self.query(i, i)] print(show_list) From 70bd06db4642a2323ff397b041d40bc95ed6a5bf Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Sat, 1 Jun 2024 05:09:03 -0400 Subject: [PATCH 078/260] add doctest/document to actual_power and document to power (#11187) * Update power.py * Update divide_and_conquer/power.py --------- Co-authored-by: Tianyi Zheng --- divide_and_conquer/power.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/divide_and_conquer/power.py b/divide_and_conquer/power.py index f2e023afd536..faf6a3476d40 100644 --- a/divide_and_conquer/power.py +++ b/divide_and_conquer/power.py @@ -2,6 +2,20 @@ def actual_power(a: int, b: int): """ Function using divide and conquer to calculate a^b. It only works for integer a,b. + + :param a: The base of the power operation, an integer. + :param b: The exponent of the power operation, a non-negative integer. + :return: The result of a^b. + + Examples: + >>> actual_power(3, 2) + 9 + >>> actual_power(5, 3) + 125 + >>> actual_power(2, 5) + 32 + >>> actual_power(7, 0) + 1 """ if b == 0: return 1 @@ -13,6 +27,10 @@ def actual_power(a: int, b: int): def power(a: int, b: int) -> float: """ + :param a: The base (integer). + :param b: The exponent (integer). + :return: The result of a^b, as a float for negative exponents. + >>> power(4,6) 4096 >>> power(2,3) From 723cf9c42839c47e9e6fb83362a7391177355505 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Jun 2024 02:17:07 -0700 Subject: [PATCH 079/260] Remove duplicate implementation of median of two arrays algorithm (#11420) * Remove duplicate implementation of median of two arrays algorithm Remove maths/median_of_two_arrays.py because the repo has two implementations of this algorithm, with data_structures/arrays/median_two_array.py being the other. Even though maths/median_of_two_arrays.py is the older implementation, the newer implementation is better documented, has better error handling, and is already located in a more appropriate directory. * updating DIRECTORY.md --------- Co-authored-by: tianyizheng02 --- DIRECTORY.md | 1 - maths/median_of_two_arrays.py | 33 --------------------------------- 2 files changed, 34 deletions(-) delete mode 100644 maths/median_of_two_arrays.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 4a053a3f1b7f..2094fc3a980e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -661,7 +661,6 @@ * [Manhattan Distance](maths/manhattan_distance.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) - * [Median Of Two Arrays](maths/median_of_two_arrays.py) * [Minkowski Distance](maths/minkowski_distance.py) * [Mobius Function](maths/mobius_function.py) * [Modular Division](maths/modular_division.py) diff --git a/maths/median_of_two_arrays.py b/maths/median_of_two_arrays.py deleted file mode 100644 index 55aa587a9c4b..000000000000 --- a/maths/median_of_two_arrays.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import annotations - - -def median_of_two_arrays(nums1: list[float], nums2: list[float]) -> float: - """ - >>> median_of_two_arrays([1, 2], [3]) - 2 - >>> median_of_two_arrays([0, -1.1], [2.5, 1]) - 0.5 - >>> median_of_two_arrays([], [2.5, 1]) - 1.75 - >>> median_of_two_arrays([], [0]) - 0 - >>> median_of_two_arrays([], []) - Traceback (most recent call last): - ... - IndexError: list index out of range - """ - all_numbers = sorted(nums1 + nums2) - div, mod = divmod(len(all_numbers), 2) - if mod == 1: - return all_numbers[div] - else: - return (all_numbers[div] + all_numbers[div - 1]) / 2 - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - array_1 = [float(x) for x in input("Enter the elements of first array: ").split()] - array_2 = [float(x) for x in input("Enter the elements of second array: ").split()] - print(f"The median of two arrays is: {median_of_two_arrays(array_1, array_2)}") From edee8e644b09a21a1f70d3a59d57feed51c74004 Mon Sep 17 00:00:00 2001 From: Vishal Kumar Gupta Date: Sun, 2 Jun 2024 02:41:40 +0100 Subject: [PATCH 080/260] use format to remove '0b' (#11307) * use format to remove '0b' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: error message for float input --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- bit_manipulation/binary_and_operator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bit_manipulation/binary_and_operator.py b/bit_manipulation/binary_and_operator.py index 36f6c668d9b3..f33b8b1c0ab4 100644 --- a/bit_manipulation/binary_and_operator.py +++ b/bit_manipulation/binary_and_operator.py @@ -26,7 +26,7 @@ def binary_and(a: int, b: int) -> str: >>> binary_and(0, 1.1) Traceback (most recent call last): ... - TypeError: 'float' object cannot be interpreted as an integer + ValueError: Unknown format code 'b' for object of type 'float' >>> binary_and("0", "1") Traceback (most recent call last): ... @@ -35,8 +35,8 @@ def binary_and(a: int, b: int) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive") - a_binary = str(bin(a))[2:] # remove the leading "0b" - b_binary = str(bin(b))[2:] # remove the leading "0b" + a_binary = format(a, "b") + b_binary = format(b, "b") max_len = max(len(a_binary), len(b_binary)) From 2f1704dae579295ea2f47584ef80b4b321a284d7 Mon Sep 17 00:00:00 2001 From: Mandeep Singh <135956602+MannCode@users.noreply.github.com> Date: Sun, 2 Jun 2024 18:27:35 -0700 Subject: [PATCH 081/260] issue #11150 Ensure explicit column selection and data type setting in data reading process. (#11302) * issue #11150 Ensure explicit column selection and data type setting in data reading process. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/sequential_minimum_optimization.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 3abdd6ccbed8..2ebdeb764a80 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -463,7 +463,11 @@ def test_cancel_data(): with open(r"cancel_data.csv", "w") as f: f.write(content) - data = pd.read_csv(r"cancel_data.csv", header=None) + data = pd.read_csv( + "cancel_data.csv", + header=None, + dtype={0: str}, # Assuming the first column contains string data + ) # 1: pre-processing data del data[data.columns.tolist()[0]] From ffaa976f6c5a5de30e284ae2fc8122f40cd3fa6a Mon Sep 17 00:00:00 2001 From: Harsh buddhdev Date: Sun, 2 Jun 2024 23:00:26 -0400 Subject: [PATCH 082/260] Fixes #9943 (#10252) * added doctest for all_permutations.py * added doctest for all_subsequences.py * added doctest for all_subsequences.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doctest added * updated * Update backtracking/all_subsequences.py --------- Co-authored-by: Harsh Buddhdev Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/all_permutations.py | 36 ++++++++++++++++++++++ backtracking/all_subsequences.py | 52 +++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index c483cd62c99b..f376e6fa0945 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -23,6 +23,42 @@ def create_state_space_tree( Creates a state space tree to iterate through each branch using DFS. We know that each state has exactly len(sequence) - index children. It terminates when it reaches the end of the given sequence. + + :param sequence: The input sequence for which permutations are generated. + :param current_sequence: The current permutation being built. + :param index: The current index in the sequence. + :param index_used: list to track which elements are used in permutation. + + Example 1: + >>> sequence = [1, 2, 3] + >>> current_sequence = [] + >>> index_used = [False, False, False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + [1, 2, 3] + [1, 3, 2] + [2, 1, 3] + [2, 3, 1] + [3, 1, 2] + [3, 2, 1] + + Example 2: + >>> sequence = ["A", "B", "C"] + >>> current_sequence = [] + >>> index_used = [False, False, False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + ['A', 'B', 'C'] + ['A', 'C', 'B'] + ['B', 'A', 'C'] + ['B', 'C', 'A'] + ['C', 'A', 'B'] + ['C', 'B', 'A'] + + Example 3: + >>> sequence = [1] + >>> current_sequence = [] + >>> index_used = [False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + [1] """ if index == len(sequence): diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py index 7844a829d046..18696054eb7e 100644 --- a/backtracking/all_subsequences.py +++ b/backtracking/all_subsequences.py @@ -22,6 +22,56 @@ def create_state_space_tree( Creates a state space tree to iterate through each branch using DFS. We know that each state has exactly two children. It terminates when it reaches the end of the given sequence. + + :param sequence: The input sequence for which subsequences are generated. + :param current_subsequence: The current subsequence being built. + :param index: The current index in the sequence. + + Example: + >>> sequence = [3, 2, 1] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + [1] + [2] + [2, 1] + [3] + [3, 1] + [3, 2] + [3, 2, 1] + + >>> sequence = ["A", "B"] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + ['B'] + ['A'] + ['A', 'B'] + + >>> sequence = [] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + + >>> sequence = [1, 2, 3, 4] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + [4] + [3] + [3, 4] + [2] + [2, 4] + [2, 3] + [2, 3, 4] + [1] + [1, 4] + [1, 3] + [1, 3, 4] + [1, 2] + [1, 2, 4] + [1, 2, 3] + [1, 2, 3, 4] """ if index == len(sequence): @@ -35,7 +85,7 @@ def create_state_space_tree( if __name__ == "__main__": - seq: list[Any] = [3, 1, 2, 4] + seq: list[Any] = [1, 2, 3] generate_all_subsequences(seq) seq.clear() From c919579869ae9f57d6878336af6de6bc9a001c61 Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Mon, 3 Jun 2024 11:15:01 +0800 Subject: [PATCH 083/260] Add KL divergence loss algorithm (#11238) * Add KL divergence loss algorithm * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 16e5a3278b73..150035661eb7 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -629,6 +629,40 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> return np.mean(loss) +def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Kullback-Leibler divergence (KL divergence) loss between true labels + and predicted probabilities. + + KL divergence loss quantifies dissimilarity between true labels and predicted + probabilities. It's often used in training generative models. + + KL = Σ(y_true * ln(y_true / y_pred)) + + Reference: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence + + Parameters: + - y_true: True class probabilities + - y_pred: Predicted class probabilities + + >>> true_labels = np.array([0.2, 0.3, 0.5]) + >>> predicted_probs = np.array([0.3, 0.3, 0.4]) + >>> kullback_leibler_divergence(true_labels, predicted_probs) + 0.030478754035472025 + >>> true_labels = np.array([0.2, 0.3, 0.5]) + >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) + >>> kullback_leibler_divergence(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + kl_loss = y_true * np.log(y_true / y_pred) + return np.sum(kl_loss) + + if __name__ == "__main__": import doctest From 5827aac79a36f0d43e9bd9f1c9ca11da07b2d623 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 18:21:27 -0300 Subject: [PATCH 084/260] [pre-commit.ci] pre-commit autoupdate (#11430) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.5 → v0.4.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.5...v0.4.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43bf547dec6e..a04f4f8b2165 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.5 + rev: v0.4.7 hooks: - id: ruff - id: ruff-format From 41a1cdf38d9cb1a14c9149d2d815efa2259679ef Mon Sep 17 00:00:00 2001 From: Yuri Batista Ishizawa Date: Tue, 11 Jun 2024 06:45:00 -0300 Subject: [PATCH 085/260] Add rainfall intensity calculation function (#11432) * Add rainfall intensity calculation function * chore: improve fuction and coefficient documentation * Update physics/rainfall_intensity.py --------- Co-authored-by: Tianyi Zheng --- physics/rainfall_intensity.py | 143 ++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 physics/rainfall_intensity.py diff --git a/physics/rainfall_intensity.py b/physics/rainfall_intensity.py new file mode 100644 index 000000000000..cee8d50ddc2f --- /dev/null +++ b/physics/rainfall_intensity.py @@ -0,0 +1,143 @@ +""" +Rainfall Intensity +================== +This module contains functions to calculate the intensity of +a rainfall event for a given duration and return period. + +This function uses the Sherman intensity-duration-frequency curve. + +References +---------- +- Aparicio, F. (1997): Fundamentos de Hidrología de Superficie. + Balderas, México, Limusa. 303 p. +- https://en.wikipedia.org/wiki/Intensity-duration-frequency_curve +""" + + +def rainfall_intensity( + coefficient_k: float, + coefficient_a: float, + coefficient_b: float, + coefficient_c: float, + return_period: float, + duration: float, +) -> float: + """ + Calculate the intensity of a rainfall event for a given duration and return period. + It's based on the Sherman intensity-duration-frequency curve: + + I = k * T^a / (D + b)^c + + where: + I = Intensity of the rainfall event [mm/h] + k, a, b, c = Coefficients obtained through statistical distribution adjust + T = Return period in years + D = Rainfall event duration in minutes + + Parameters + ---------- + coefficient_k : float + Coefficient obtained through statistical distribution adjust. + coefficient_a : float + Coefficient obtained through statistical distribution adjust. + coefficient_b : float + Coefficient obtained through statistical distribution adjust. + coefficient_c : float + Coefficient obtained through statistical distribution adjust. + return_period : float + Return period in years. + duration : float + Rainfall event duration in minutes. + + Returns + ------- + intensity : float + Intensity of the rainfall event in mm/h. + + Raises + ------ + ValueError + If any of the parameters are not positive. + + Examples + -------- + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 60) + 49.83339231138578 + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 30) + 77.36319588106228 + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 5, 60) + 43.382487747633625 + + >>> rainfall_intensity(0, 0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, -0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, -11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, -0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 0, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(0, 0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 0, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 0) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + """ + if ( + coefficient_k <= 0 + or coefficient_a <= 0 + or coefficient_b <= 0 + or coefficient_c <= 0 + or return_period <= 0 + or duration <= 0 + ): + raise ValueError("All parameters must be positive.") + intensity = (coefficient_k * (return_period**coefficient_a)) / ( + (duration + coefficient_b) ** coefficient_c + ) + return intensity + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 446742387e83f94f3d54ce640cb07004180130ee Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 13 Jun 2024 14:47:29 -0700 Subject: [PATCH 086/260] Fix grammar and spelling mistakes in sequential_minimum_optimization.py (#11427) --- .../sequential_minimum_optimization.py | 135 +++++++++--------- 1 file changed, 66 insertions(+), 69 deletions(-) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 2ebdeb764a80..625fc28fe60c 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -1,11 +1,9 @@ """ - Implementation of sequential minimal optimization (SMO) for support vector machines - (SVM). +Sequential minimal optimization (SMO) for support vector machines (SVM) - Sequential minimal optimization (SMO) is an algorithm for solving the quadratic - programming (QP) problem that arises during the training of support vector - machines. - It was invented by John Platt in 1998. +Sequential minimal optimization (SMO) is an algorithm for solving the quadratic +programming (QP) problem that arises during the training of SVMs. It was invented by +John Platt in 1998. Input: 0: type: numpy.ndarray. @@ -124,8 +122,7 @@ def fit(self): b_old = self._b self._b = b - # 4: update error value,here we only calculate those non-bound samples' - # error + # 4: update error, here we only calculate the error for non-bound samples self._unbound = [i for i in self._all_samples if self._is_unbound(i)] for s in self.unbound: if s in (i1, i2): @@ -136,7 +133,7 @@ def fit(self): + (self._b - b_old) ) - # if i1 or i2 is non-bound,update there error value to zero + # if i1 or i2 is non-bound, update their error value to zero if self._is_unbound(i1): self._error[i1] = 0 if self._is_unbound(i2): @@ -161,7 +158,7 @@ def predict(self, test_samples, classify=True): results.append(result) return np.array(results) - # Check if alpha violate KKT condition + # Check if alpha violates the KKT condition def _check_obey_kkt(self, index): alphas = self.alphas tol = self._tol @@ -172,20 +169,19 @@ def _check_obey_kkt(self, index): # Get value calculated from kernel function def _k(self, i1, i2): - # for test samples,use Kernel function + # for test samples, use kernel function if isinstance(i2, np.ndarray): return self.Kernel(self.samples[i1], i2) - # for train samples,Kernel values have been saved in matrix + # for training samples, kernel values have been saved in matrix else: return self._K_matrix[i1, i2] - # Get sample's error + # Get error for sample def _e(self, index): """ Two cases: - 1:Sample[index] is non-bound,Fetch error from list: _error - 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi - + 1: Sample[index] is non-bound, fetch error from list: _error + 2: sample[index] is bound, use predicted value minus true value: g(xi) - yi """ # get from error data if self._is_unbound(index): @@ -196,7 +192,7 @@ def _e(self, index): yi = self.tags[index] return gx - yi - # Calculate Kernel matrix of all possible i1,i2 ,saving time + # Calculate kernel matrix of all possible i1, i2, saving time def _calculate_k_matrix(self): k_matrix = np.zeros([self.length, self.length]) for i in self._all_samples: @@ -206,7 +202,7 @@ def _calculate_k_matrix(self): ) return k_matrix - # Predict test sample's tag + # Predict tag for test sample def _predict(self, sample): k = self._k predicted_value = ( @@ -222,30 +218,31 @@ def _predict(self, sample): # Choose alpha1 and alpha2 def _choose_alphas(self): - locis = yield from self._choose_a1() - if not locis: + loci = yield from self._choose_a1() + if not loci: return None - return locis + return loci def _choose_a1(self): """ - Choose first alpha ;steps: - 1:First loop over all sample - 2:Second loop over all non-bound samples till all non-bound samples does not - voilate kkt condition. - 3:Repeat this two process endlessly,till all samples does not voilate kkt - condition samples after first loop. + Choose first alpha + Steps: + 1: First loop over all samples + 2: Second loop over all non-bound samples until no non-bound samples violate + the KKT condition. + 3: Repeat these two processes until no samples violate the KKT condition + after the first loop. """ while True: all_not_obey = True # all sample - print("scanning all sample!") + print("Scanning all samples!") for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: all_not_obey = False yield from self._choose_a2(i1) # non-bound sample - print("scanning non-bound sample!") + print("Scanning non-bound samples!") while True: not_obey = True for i1 in [ @@ -256,20 +253,21 @@ def _choose_a1(self): not_obey = False yield from self._choose_a2(i1) if not_obey: - print("all non-bound samples fit the KKT condition!") + print("All non-bound samples satisfy the KKT condition!") break if all_not_obey: - print("all samples fit the KKT condition! Optimization done!") + print("All samples satisfy the KKT condition!") break return False def _choose_a2(self, i1): """ - Choose the second alpha by using heuristic algorithm ;steps: - 1: Choose alpha2 which gets the maximum step size (|E1 - E2|). - 2: Start in a random point,loop over all non-bound samples till alpha1 and + Choose the second alpha using a heuristic algorithm + Steps: + 1: Choose alpha2 that maximizes the step size (|E1 - E2|). + 2: Start in a random point, loop over all non-bound samples till alpha1 and alpha2 are optimized. - 3: Start in a random point,loop over all samples till alpha1 and alpha2 are + 3: Start in a random point, loop over all samples till alpha1 and alpha2 are optimized. """ self._unbound = [i for i in self._all_samples if self._is_unbound(i)] @@ -306,7 +304,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): if i1 == i2: return None, None - # calculate L and H which bound the new alpha2 + # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) # noqa: E741 @@ -320,7 +318,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): k22 = k(i2, i2) k12 = k(i1, i2) - # select the new alpha2 which could get the minimal objectives + # select the new alpha2 which could achieve the minimal objectives if (eta := k11 + k22 - 2.0 * k12) > 0.0: a2_new_unc = a2 + (y2 * (e1 - e2)) / eta # a2_new has a boundary @@ -335,7 +333,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): l1 = a1 + s * (a2 - l) h1 = a1 + s * (a2 - h) - # way 1 + # Method 1 f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) ol = ( @@ -353,9 +351,8 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): + s * h * h1 * k(i1, i2) ) """ - # way 2 - Use objective function check which alpha2 new could get the minimal - objectives + Method 2: Use objective function to check which alpha2_new could achieve the + minimal objectives """ if ol < (oh - self._eps): a2_new = l @@ -375,7 +372,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): return a1_new, a2_new - # Normalise data using min_max way + # Normalize data using min-max method def _norm(self, data): if self._init: self._min = np.min(data, axis=0) @@ -424,7 +421,7 @@ def _rbf(self, v1, v2): def _check(self): if self._kernel == self._rbf and self.gamma < 0: - raise ValueError("gamma value must greater than 0") + raise ValueError("gamma value must be non-negative") def _get_kernel(self, kernel_name): maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} @@ -444,27 +441,27 @@ def call_func(*args, **kwargs): start_time = time.time() func(*args, **kwargs) end_time = time.time() - print(f"smo algorithm cost {end_time - start_time} seconds") + print(f"SMO algorithm cost {end_time - start_time} seconds") return call_func @count_time -def test_cancel_data(): - print("Hello!\nStart test svm by smo algorithm!") +def test_cancer_data(): + print("Hello!\nStart test SVM using the SMO algorithm!") # 0: download dataset and load into pandas' dataframe - if not os.path.exists(r"cancel_data.csv"): + if not os.path.exists(r"cancer_data.csv"): request = urllib.request.Request( # noqa: S310 CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) response = urllib.request.urlopen(request) # noqa: S310 content = response.read().decode("utf-8") - with open(r"cancel_data.csv", "w") as f: + with open(r"cancer_data.csv", "w") as f: f.write(content) data = pd.read_csv( - "cancel_data.csv", + "cancer_data.csv", header=None, dtype={0: str}, # Assuming the first column contains string data ) @@ -479,14 +476,14 @@ def test_cancel_data(): train_data, test_data = samples[:328, :], samples[328:, :] test_tags, test_samples = test_data[:, 0], test_data[:, 1:] - # 3: choose kernel function,and set initial alphas to zero(optional) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + # 3: choose kernel function, and set initial alphas to zero (optional) + my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) al = np.zeros(train_data.shape[0]) # 4: calculating best alphas using SMO algorithm and predict test_data samples mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, alpha_list=al, cost=0.4, b=0.0, @@ -501,30 +498,30 @@ def test_cancel_data(): for i in range(test_tags.shape[0]): if test_tags[i] == predict[i]: score += 1 - print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}") + print(f"\nAll: {test_num}\nCorrect: {score}\nIncorrect: {test_num - score}") print(f"Rough Accuracy: {score / test_tags.shape[0]}") def test_demonstration(): # change stdout - print("\nStart plot,please wait!!!") + print("\nStarting plot, please wait!") sys.stdout = open(os.devnull, "w") ax1 = plt.subplot2grid((2, 2), (0, 0)) ax2 = plt.subplot2grid((2, 2), (0, 1)) ax3 = plt.subplot2grid((2, 2), (1, 0)) ax4 = plt.subplot2grid((2, 2), (1, 1)) - ax1.set_title("linear svm,cost:0.1") + ax1.set_title("Linear SVM, cost = 0.1") test_linear_kernel(ax1, cost=0.1) - ax2.set_title("linear svm,cost:500") + ax2.set_title("Linear SVM, cost = 500") test_linear_kernel(ax2, cost=500) - ax3.set_title("rbf kernel svm,cost:0.1") + ax3.set_title("RBF kernel SVM, cost = 0.1") test_rbf_kernel(ax3, cost=0.1) - ax4.set_title("rbf kernel svm,cost:500") + ax4.set_title("RBF kernel SVM, cost = 500") test_rbf_kernel(ax4, cost=500) sys.stdout = sys.__stdout__ - print("Plot done!!!") + print("Plot done!") def test_linear_kernel(ax, cost): @@ -535,10 +532,10 @@ def test_linear_kernel(ax, cost): scaler = StandardScaler() train_x_scaled = scaler.fit_transform(train_x, train_y) train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) + my_kernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, cost=cost, tolerance=0.001, auto_norm=False, @@ -555,10 +552,10 @@ def test_rbf_kernel(ax, cost): scaler = StandardScaler() train_x_scaled = scaler.fit_transform(train_x, train_y) train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, cost=cost, tolerance=0.001, auto_norm=False, @@ -571,11 +568,11 @@ def plot_partition_boundary( model, train_data, ax, resolution=100, colors=("b", "k", "r") ): """ - We can not get the optimum w of our kernel svm model which is different from linear - svm. For this reason, we generate randomly distributed points with high desity and - prediced values of these points are calculated by using our trained model. Then we - could use this prediced values to draw contour map. - And this contour map can represent svm's partition boundary. + We cannot get the optimal w of our kernel SVM model, which is different from a + linear SVM. For this reason, we generate randomly distributed points with high + density, and predicted values of these points are calculated using our trained + model. Then we could use this predicted values to draw contour map, and this contour + map represents the SVM's partition boundary. """ train_data_x = train_data[:, 1] train_data_y = train_data[:, 2] @@ -620,6 +617,6 @@ def plot_partition_boundary( if __name__ == "__main__": - test_cancel_data() + test_cancer_data() test_demonstration() plt.show() From af6a45e982213ef52a2f747dec6b58d668bfce5b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 17 Jun 2024 00:19:32 +0300 Subject: [PATCH 087/260] Remove some per file ignores (#11381) * Remove some per file ignores * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + pyproject.toml | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2094fc3a980e..04551fad3685 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -863,6 +863,7 @@ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) + * [Rainfall Intensity](physics/rainfall_intensity.py) * [Reynolds Number](physics/reynolds_number.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) diff --git a/pyproject.toml b/pyproject.toml index 429f4fab9a52..bb8657183164 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,12 +74,6 @@ lint.ignore = [ "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ - "PGH001", -] -lint.per-file-ignores."data_structures/binary_tree/binary_search_tree_recursive.py" = [ - "BLE001", -] lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ "BLE001", ] From df94d460ac8d220f97851f358abc0102ae47d3db Mon Sep 17 00:00:00 2001 From: raj <64704676+ra230537@users.noreply.github.com> Date: Sun, 16 Jun 2024 19:17:55 -0300 Subject: [PATCH 088/260] Fix/fixes get top billionaries code (#11466) * fix: modify the depracated code and add new tests * fix: remove test from pr * fix: remove the useless utc import * fix: add explicit tz argument * fix: fixes ruff checking * Remove UP017 #noqa comments from code * Update get_top_billionaires.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_top_billionaires.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/get_top_billionaires.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 24828b6d787c..99f6e0be948a 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -65,7 +65,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: "Country": person["countryOfCitizenship"], "Gender": person["gender"], "Worth ($)": f"{person['finalWorth'] / 1000:.1f} Billion", - "Age": years_old(person["birthDate"]), + "Age": str(years_old(person["birthDate"] / 1000)), } for person in response_json["personList"]["personsLists"] ] @@ -95,4 +95,7 @@ def display_billionaires(forbes_billionaires: list[dict[str, int | str]]) -> Non if __name__ == "__main__": + from doctest import testmod + + testmod() display_billionaires(get_forbes_real_time_billionaires()) From 31d1cd8402ba48aca26d9f1d2774f929610e7180 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 08:31:32 -0400 Subject: [PATCH 089/260] [pre-commit.ci] pre-commit autoupdate (#11435) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.7 → v0.4.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.7...v0.4.8) * Update .pre-commit-config.yaml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a04f4f8b2165..fc8545b5159b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.7 + rev: v0.4.9 hooks: - id: ruff - id: ruff-format From 1cfca52db73ee18b9e9e08febe9e7d42f96e43db Mon Sep 17 00:00:00 2001 From: Snoppy Date: Mon, 17 Jun 2024 21:27:07 +0800 Subject: [PATCH 090/260] chore: fix typos (#11467) * chore: fix typos Signed-off-by: snoppy * Apply suggestions from code review Co-authored-by: Tianyi Zheng --------- Signed-off-by: snoppy Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- computer_vision/haralick_descriptors.py | 2 +- graphs/strongly_connected_components.py | 2 +- maths/points_are_collinear_3d.py | 10 +++++----- neural_network/convolution_neural_network.py | 8 ++++---- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 712bd49668f8..634f0495797b 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -141,7 +141,7 @@ def transform( center_x, center_y = (x // 2 for x in kernel.shape) - # Use padded image when applying convolotion + # Use padded image when applying convolution # to not go out of bounds of the original the image transformed = np.zeros(image.shape, dtype=np.uint8) padded = np.pad(image, 1, "constant", constant_values=constant) diff --git a/graphs/strongly_connected_components.py b/graphs/strongly_connected_components.py index 325e5c1f33a3..4d4cf88035b5 100644 --- a/graphs/strongly_connected_components.py +++ b/graphs/strongly_connected_components.py @@ -38,7 +38,7 @@ def find_components( reversed_graph: dict[int, list[int]], vert: int, visited: list[bool] ) -> list[int]: """ - Use depth first search to find strongliy connected + Use depth first search to find strongly connected vertices. Now graph is reversed >>> find_components({0: [1], 1: [2], 2: [0]}, 0, 5 * [False]) [0, 1, 2] diff --git a/maths/points_are_collinear_3d.py b/maths/points_are_collinear_3d.py index 3bc0b3b9ebe5..c7adddda9494 100644 --- a/maths/points_are_collinear_3d.py +++ b/maths/points_are_collinear_3d.py @@ -76,9 +76,9 @@ def get_3d_vectors_cross(ab: Vector3d, ac: Vector3d) -> Vector3d: def is_zero_vector(vector: Vector3d, accuracy: int) -> bool: """ - Check if vector is equal to (0, 0, 0) of not. + Check if vector is equal to (0, 0, 0) or not. - Sine the algorithm is very accurate, we will never get a zero vector, + Since the algorithm is very accurate, we will never get a zero vector, so we need to round the vector axis, because we want a result that is either True or False. In other applications, we can return a float that represents the collinearity ratio. @@ -97,9 +97,9 @@ def are_collinear(a: Point3d, b: Point3d, c: Point3d, accuracy: int = 10) -> boo """ Check if three points are collinear or not. - 1- Create tow vectors AB and AC. - 2- Get the cross vector of the tow vectors. - 3- Calcolate the length of the cross vector. + 1- Create two vectors AB and AC. + 2- Get the cross vector of the two vectors. + 3- Calculate the length of the cross vector. 4- If the length is zero then the points are collinear, else they are not. The use of the accuracy parameter is explained in is_zero_vector docstring. diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 3c551924442d..d4ac360a98de 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -1,7 +1,7 @@ """ - - - - - -- - - - - - - - - - - - - - - - - - - - - - - Name - - CNN - Convolution Neural Network For Photo Recognizing -Goal - - Recognize Handing Writing Word Photo +Goal - - Recognize Handwriting Word Photo Detail: Total 5 layers neural network * Convolution layer * Pooling layer @@ -135,7 +135,7 @@ def convolute(self, data, convs, w_convs, thre_convs, conv_step): ) data_featuremap.append(featuremap) - # expanding the data slice to One dimenssion + # expanding the data slice to one dimension focus1_list = [] for each_focus in data_focus: focus1_list.extend(self.Expand_Mat(each_focus)) @@ -304,7 +304,7 @@ def draw_error(): plt.grid(True, alpha=0.5) plt.show() - print("------------------Training Complished---------------------") + print("------------------Training Complete---------------------") print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}")) if draw_e: draw_error() @@ -353,5 +353,5 @@ def convolution(self, data): if __name__ == "__main__": """ - I will put the example on other file + I will put the example in another file """ From 75b86671879cfbb83d241c3a3487b32c6dac9d91 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Jun 2024 00:00:47 +0200 Subject: [PATCH 091/260] [pre-commit.ci] pre-commit autoupdate (#11472) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.9 → v0.4.10](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.9...v0.4.10) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fc8545b5159b..1eddff7ab0e6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.9 + rev: v0.4.10 hooks: - id: ruff - id: ruff-format From 6882a8b80806f2dc53d53a0ecc00c2c98bec3fba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Vitor?= <92267577+ShiryuReb@users.noreply.github.com> Date: Wed, 26 Jun 2024 03:06:57 -0300 Subject: [PATCH 092/260] Tests/add new test case weight_conversion (#11468) * add new test * add new test --- conversions/weight_conversion.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index e8326e0b688f..0777aead9f02 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -297,6 +297,12 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 1.660540199e-23 >>> weight_conversion("atomic-mass-unit","atomic-mass-unit",2) 1.999999998903455 + >>> weight_conversion("slug", "kilogram", 1) + Traceback (most recent call last): + ... + ValueError: Invalid 'from_type' or 'to_type' value: 'slug', 'kilogram' + Supported values are: kilogram, gram, milligram, metric-ton, long-ton, short-ton, \ +pound, stone, ounce, carrat, atomic-mass-unit """ if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART: msg = ( From 716bdeb68b1e81aafe886e382319c6dab882dacc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 07:02:29 +0200 Subject: [PATCH 093/260] [pre-commit.ci] pre-commit autoupdate (#11473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.10 → v0.5.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.10...v0.5.0) - [github.com/pre-commit/mirrors-mypy: v1.10.0 → v1.10.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.0...v1.10.1) * Fix ruff issues * Fix ruff issues --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 5 ++- backtracking/knight_tour.py | 6 +-- data_structures/binary_tree/is_sorted.py | 6 +-- data_structures/binary_tree/red_black_tree.py | 37 +++++-------------- docs/source/__init__.py | 0 graphs/graph_adjacency_matrix.py | 8 ++-- graphs/multi_heuristic_astar.py | 4 +- graphs/tarjans_scc.py | 2 +- hashes/md5.py | 4 +- maths/radix2_fft.py | 1 - project_euler/problem_034/__init__.py | 1 - project_euler/problem_035/__init__.py | 1 - project_euler/problem_037/__init__.py | 1 - project_euler/problem_037/sol1.py | 9 ++--- project_euler/problem_039/__init__.py | 1 - project_euler/problem_041/__init__.py | 1 - project_euler/problem_043/__init__.py | 1 - project_euler/problem_044/__init__.py | 1 - project_euler/problem_045/__init__.py | 1 - project_euler/problem_046/__init__.py | 1 - project_euler/problem_055/__init__.py | 1 - project_euler/problem_058/__init__.py | 1 - project_euler/problem_063/__init__.py | 1 - project_euler/problem_072/sol1.py | 2 +- project_euler/problem_089/__init__.py | 1 - project_euler/problem_097/__init__.py | 1 - searches/binary_tree_traversal.py | 6 +-- sorts/external_sort.py | 5 +-- source/__init__.py | 0 .../can_string_be_rearranged_as_palindrome.py | 4 +- strings/is_valid_email_address.py | 4 +- strings/text_justification.py | 12 +++--- 32 files changed, 44 insertions(+), 85 deletions(-) create mode 100644 docs/source/__init__.py create mode 100644 source/__init__.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1eddff7ab0e6..a3f5a5e51855 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.10 + rev: v0.5.0 hooks: - id: ruff - id: ruff-format @@ -47,10 +47,11 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.0 + rev: v1.10.1 hooks: - id: mypy args: + - --explicit-package-bases - --ignore-missing-imports - --install-types # See mirrors-mypy README.md - --non-interactive diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index 5f7dee8d97bf..8906aaa1094c 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -24,10 +24,10 @@ def get_valid_pos(position: tuple[int, int], n: int) -> list[tuple[int, int]]: ] permissible_positions = [] - for position in positions: - y_test, x_test = position + for inner_position in positions: + y_test, x_test = inner_position if 0 <= y_test < n and 0 <= x_test < n: - permissible_positions.append(position) + permissible_positions.append(inner_position) return permissible_positions diff --git a/data_structures/binary_tree/is_sorted.py b/data_structures/binary_tree/is_sorted.py index 509a426611e5..91fc8ca82633 100644 --- a/data_structures/binary_tree/is_sorted.py +++ b/data_structures/binary_tree/is_sorted.py @@ -80,9 +80,9 @@ def is_sorted(self) -> bool: """ if self.left and (self.data < self.left.data or not self.left.is_sorted): return False - if self.right and (self.data > self.right.data or not self.right.is_sorted): - return False - return True + return not ( + self.right and (self.data > self.right.data or not self.right.is_sorted) + ) if __name__ == "__main__": diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index a9ecf897c701..752db1e7026c 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -1,8 +1,3 @@ -""" -psf/black : true -ruff : passed -""" - from __future__ import annotations from collections.abc import Iterator @@ -321,9 +316,7 @@ def check_coloring(self) -> bool: return False if self.left and not self.left.check_coloring(): return False - if self.right and not self.right.check_coloring(): - return False - return True + return not (self.right and not self.right.check_coloring()) def black_height(self) -> int | None: """Returns the number of black nodes from this node to the @@ -561,9 +554,7 @@ def test_rotations() -> bool: right_rot.right.right = RedBlackTree(10, parent=right_rot.right) right_rot.right.right.left = RedBlackTree(5, parent=right_rot.right.right) right_rot.right.right.right = RedBlackTree(20, parent=right_rot.right.right) - if tree != right_rot: - return False - return True + return tree == right_rot def test_insertion_speed() -> bool: @@ -606,13 +597,11 @@ def test_insert_and_search() -> bool: tree.insert(12) tree.insert(10) tree.insert(11) - if 5 in tree or -6 in tree or -10 in tree or 13 in tree: + if any(i in tree for i in (5, -6, -10, 13)): # Found something not in there return False - if not (11 in tree and 12 in tree and -8 in tree and 0 in tree): - # Didn't find something in there - return False - return True + # Find all these things in there + return all(i in tree for i in (11, 12, -8, 0)) def test_insert_delete() -> bool: @@ -634,9 +623,7 @@ def test_insert_delete() -> bool: tree = tree.remove(9) if not tree.check_color_properties(): return False - if list(tree.inorder_traverse()) != [-8, 0, 4, 8, 10, 11, 12]: - return False - return True + return list(tree.inorder_traverse()) == [-8, 0, 4, 8, 10, 11, 12] def test_floor_ceil() -> bool: @@ -664,9 +651,7 @@ def test_min_max() -> bool: tree.insert(24) tree.insert(20) tree.insert(22) - if tree.get_max() != 22 or tree.get_min() != -16: - return False - return True + return not (tree.get_max() != 22 or tree.get_min() != -16) def test_tree_traversal() -> bool: @@ -682,9 +667,7 @@ def test_tree_traversal() -> bool: return False if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]: return False - if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]: - return False - return True + return list(tree.postorder_traverse()) == [-16, 8, 20, 24, 22, 16, 0] def test_tree_chaining() -> bool: @@ -695,9 +678,7 @@ def test_tree_chaining() -> bool: return False if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]: return False - if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]: - return False - return True + return list(tree.postorder_traverse()) == [-16, 8, 20, 24, 22, 16, 0] def print_results(msg: str, passes: bool) -> None: diff --git a/docs/source/__init__.py b/docs/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index 059a6aa9ffb5..568c84166e4b 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -156,9 +156,11 @@ def remove_vertex(self, vertex: T) -> None: self.vertex_to_index.pop(vertex) # decrement indices for vertices shifted by the deleted vertex in the adj matrix - for vertex in self.vertex_to_index: - if self.vertex_to_index[vertex] >= start_index: - self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1 + for inner_vertex in self.vertex_to_index: + if self.vertex_to_index[inner_vertex] >= start_index: + self.vertex_to_index[inner_vertex] = ( + self.vertex_to_index[inner_vertex] - 1 + ) def contains_vertex(self, vertex: T) -> bool: """ diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 6af9a187a4e9..47509beb8efb 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -123,9 +123,7 @@ def do_something(back_pointer, goal, start): def valid(p: TPos): if p[0] < 0 or p[0] > n - 1: return False - if p[1] < 0 or p[1] > n - 1: - return False - return True + return not (p[1] < 0 or p[1] > n - 1) def expand_state( diff --git a/graphs/tarjans_scc.py b/graphs/tarjans_scc.py index a75dc4d2ca95..b4a3bd5c4c35 100644 --- a/graphs/tarjans_scc.py +++ b/graphs/tarjans_scc.py @@ -103,4 +103,4 @@ def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]: edges = list(zip(source, target)) g = create_graph(n_vertices, edges) - assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) + assert tarjan(g) == [[5], [6], [4], [3, 2, 1, 0]] diff --git a/hashes/md5.py b/hashes/md5.py index 2187006ec8a9..622a50d290e1 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -82,8 +82,8 @@ def reformat_hex(i: int) -> bytes: hex_rep = format(i, "08x")[-8:] little_endian_hex = b"" - for i in [3, 2, 1, 0]: - little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8") + for j in [3, 2, 1, 0]: + little_endian_hex += hex_rep[2 * j : 2 * j + 2].encode("utf-8") return little_endian_hex diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 2c5cdc004d1d..d41dc82d5588 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -84,7 +84,6 @@ def __dft(self, which): # Corner case if len(dft) <= 1: return dft[0] - # next_ncol = self.c_max_length // 2 while next_ncol > 0: new_dft = [[] for i in range(next_ncol)] diff --git a/project_euler/problem_034/__init__.py b/project_euler/problem_034/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_034/__init__.py +++ b/project_euler/problem_034/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_035/__init__.py b/project_euler/problem_035/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_035/__init__.py +++ b/project_euler/problem_035/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_037/__init__.py b/project_euler/problem_037/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_037/__init__.py +++ b/project_euler/problem_037/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index 9c09065f4bd0..c66eb9fb1735 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -85,11 +85,10 @@ def validate(n: int) -> bool: >>> validate(3797) True """ - if len(str(n)) > 3 and ( - not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])) - ): - return False - return True + return not ( + len(str(n)) > 3 + and (not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3]))) + ) def compute_truncated_primes(count: int = 11) -> list[int]: diff --git a/project_euler/problem_039/__init__.py b/project_euler/problem_039/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_039/__init__.py +++ b/project_euler/problem_039/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_041/__init__.py b/project_euler/problem_041/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_041/__init__.py +++ b/project_euler/problem_041/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_043/__init__.py b/project_euler/problem_043/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_043/__init__.py +++ b/project_euler/problem_043/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_044/__init__.py b/project_euler/problem_044/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_044/__init__.py +++ b/project_euler/problem_044/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_045/__init__.py b/project_euler/problem_045/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_045/__init__.py +++ b/project_euler/problem_045/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_046/__init__.py b/project_euler/problem_046/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_046/__init__.py +++ b/project_euler/problem_046/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_055/__init__.py b/project_euler/problem_055/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_055/__init__.py +++ b/project_euler/problem_055/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_058/__init__.py b/project_euler/problem_058/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_058/__init__.py +++ b/project_euler/problem_058/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_063/__init__.py b/project_euler/problem_063/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_063/__init__.py +++ b/project_euler/problem_063/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_072/sol1.py b/project_euler/problem_072/sol1.py index 5a28be564556..f09db0673323 100644 --- a/project_euler/problem_072/sol1.py +++ b/project_euler/problem_072/sol1.py @@ -43,7 +43,7 @@ def solution(limit: int = 1_000_000) -> int: ind = np.arange(2 * i, limit + 1, i) # indexes for selection phi[ind] -= phi[ind] // i - return np.sum(phi[2 : limit + 1]) + return int(np.sum(phi[2 : limit + 1])) if __name__ == "__main__": diff --git a/project_euler/problem_089/__init__.py b/project_euler/problem_089/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_089/__init__.py +++ b/project_euler/problem_089/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_097/__init__.py b/project_euler/problem_097/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_097/__init__.py +++ b/project_euler/problem_097/__init__.py @@ -1 +0,0 @@ -# diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 4897ef17299c..47af57f7f94d 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -36,7 +36,7 @@ def build_tree() -> TreeNode: right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) - raise + raise ValueError("Something went wrong") def pre_order(node: TreeNode) -> None: @@ -164,8 +164,8 @@ def level_order_actual(node: TreeNode) -> None: if node_dequeued.right: list_.append(node_dequeued.right) print() - for node in list_: - q.put(node) + for inner_node in list_: + q.put(inner_node) # iteration version diff --git a/sorts/external_sort.py b/sorts/external_sort.py index e6b0d47f79f5..3fa7cacc0592 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -77,10 +77,7 @@ def refresh(self): self.empty.add(i) self.files[i].close() - if len(self.empty) == self.num_buffers: - return False - - return True + return len(self.empty) != self.num_buffers def unshift(self, index): value = self.buffers[index] diff --git a/source/__init__.py b/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/strings/can_string_be_rearranged_as_palindrome.py b/strings/can_string_be_rearranged_as_palindrome.py index 21d653db1405..95cda8b72180 100644 --- a/strings/can_string_be_rearranged_as_palindrome.py +++ b/strings/can_string_be_rearranged_as_palindrome.py @@ -72,9 +72,7 @@ def can_string_be_rearranged_as_palindrome(input_str: str = "") -> bool: for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 - if odd_char > 1: - return False - return True + return not odd_char > 1 def benchmark(input_str: str = "") -> None: diff --git a/strings/is_valid_email_address.py b/strings/is_valid_email_address.py index 205394f81297..c3bf7df7349d 100644 --- a/strings/is_valid_email_address.py +++ b/strings/is_valid_email_address.py @@ -101,9 +101,7 @@ def is_valid_email_address(email: str) -> bool: return False # (7.) Validate the placement of "." characters - if domain.startswith(".") or domain.endswith(".") or ".." in domain: - return False - return True + return not (domain.startswith(".") or domain.endswith(".") or ".." in domain) if __name__ == "__main__": diff --git a/strings/text_justification.py b/strings/text_justification.py index b0ef12231224..e025edcfe13f 100644 --- a/strings/text_justification.py +++ b/strings/text_justification.py @@ -67,19 +67,19 @@ def justify(line: list, width: int, max_width: int) -> str: answer = [] line: list[str] = [] width = 0 - for word in words: - if width + len(word) + len(line) <= max_width: + for inner_word in words: + if width + len(inner_word) + len(line) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) - # len(word) = length of current word + # len(inner_word) = length of current inner_word # len(line) = number of overall_spaces_count to insert between words - line.append(word) - width += len(word) + line.append(inner_word) + width += len(inner_word) else: # justify the line and add it to result answer.append(justify(line, width, max_width)) # reset new line and new width - line, width = [word], len(word) + line, width = [inner_word], len(inner_word) remaining_spaces = max_width - width - len(line) answer.append(" ".join(line) + (remaining_spaces + 1) * " ") return answer From c1dc8e97f7992c132c671da2da60da9d926d0fca Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Thu, 4 Jul 2024 23:46:24 +0530 Subject: [PATCH 094/260] Create count_vowels.py (#11474) * Create count_vowels.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/count_vowels.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 strings/count_vowels.py diff --git a/strings/count_vowels.py b/strings/count_vowels.py new file mode 100644 index 000000000000..8a52b331c81b --- /dev/null +++ b/strings/count_vowels.py @@ -0,0 +1,34 @@ +def count_vowels(s: str) -> int: + """ + Count the number of vowels in a given string. + + :param s: Input string to count vowels in. + :return: Number of vowels in the input string. + + Examples: + >>> count_vowels("hello world") + 3 + >>> count_vowels("HELLO WORLD") + 3 + >>> count_vowels("123 hello world") + 3 + >>> count_vowels("") + 0 + >>> count_vowels("a quick brown fox") + 5 + >>> count_vowels("the quick BROWN fox") + 5 + >>> count_vowels("PYTHON") + 1 + """ + if not isinstance(s, str): + raise ValueError("Input must be a string") + + vowels = "aeiouAEIOU" + return sum(1 for char in s if char in vowels) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 9190888f89c55d927881c7b08f6df361ab1b0af4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 22:55:30 +0200 Subject: [PATCH 095/260] [pre-commit.ci] pre-commit autoupdate (#11481) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.0 → v0.5.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.0...v0.5.1) - [github.com/tox-dev/pyproject-fmt: 2.1.3 → 2.1.4](https://github.com/tox-dev/pyproject-fmt/compare/2.1.3...2.1.4) * updating DIRECTORY.md * grid = np.char.chararray((n, n)) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + graphs/multi_heuristic_astar.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a3f5a5e51855..7fd689adca3b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.0 + rev: v0.5.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.3" + rev: "2.1.4" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 04551fad3685..54bb8f148c32 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1260,6 +1260,7 @@ * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) + * [Count Vowels](strings/count_vowels.py) * [Credit Card Validator](strings/credit_card_validator.py) * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 47509beb8efb..38b07e1ca675 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -79,7 +79,7 @@ def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): def do_something(back_pointer, goal, start): - grid = np.chararray((n, n)) + grid = np.char.chararray((n, n)) for i in range(n): for j in range(n): grid[i][j] = "*" From 2d8f22ab615085d36c53346283528f33b18a3b6d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 21:52:48 +0200 Subject: [PATCH 096/260] [pre-commit.ci] pre-commit autoupdate (#11489) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.1 → v0.5.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.1...v0.5.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7fd689adca3b..c72b55fdec44 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.1 + rev: v0.5.2 hooks: - id: ruff - id: ruff-format From d9ded0727a7a209bfcbf9bd81c5c75183cfd026f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 Jul 2024 10:40:10 +0200 Subject: [PATCH 097/260] [pre-commit.ci] pre-commit autoupdate (#11495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.2 → v0.5.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.2...v0.5.4) - [github.com/pre-commit/mirrors-mypy: v1.10.1 → v1.11.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.1...v1.11.0) * ruff rule PLR1714 Consider merging multiple comparisons * ruff rule RUF005 Consider `[*self.urls, "", "#"]` instead of concatenation * Update emails_from_url.py * Update emails_from_url.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- web_programming/emails_from_url.py | 7 +------ 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c72b55fdec44..e9f57a7b746a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.2 + rev: v0.5.4 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.1 + rev: v1.11.0 hooks: - id: mypy args: diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 43fd78dcf5a4..d41dc4893608 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -31,12 +31,7 @@ def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None # Check the list of defined attributes. for name, value in attrs: # If href is defined, not empty nor # print it and not already in urls. - if ( - name == "href" - and value != "#" - and value != "" - and value not in self.urls - ): + if name == "href" and value not in (*self.urls, "", "#"): url = parse.urljoin(self.domain, value) self.urls.append(url) From 146800307c5d2a4393d57b7c97c63b89a21abba1 Mon Sep 17 00:00:00 2001 From: Ihor Pryyma <83470037+Ihor-Pryyma@users.noreply.github.com> Date: Thu, 25 Jul 2024 18:56:31 +0300 Subject: [PATCH 098/260] Add doctests to interpolation_search.py (#11492) * Add doctests to interpolation_search.py * update docs * update tests * update tests 2 * clean code --- searches/interpolation_search.py | 139 ++++++++++++++++--------------- 1 file changed, 70 insertions(+), 69 deletions(-) diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 0591788aa40b..cb3e0011d0da 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -3,13 +3,41 @@ """ -def interpolation_search(sorted_collection, item): - """Pure implementation of interpolation search algorithm in Python - Be careful collection must be ascending sorted, otherwise result will be - unpredictable - :param sorted_collection: some ascending sorted collection with comparable items - :param item: item value to search - :return: index of found item or None if item is not found +def interpolation_search(sorted_collection: list[int], item: int) -> int | None: + """ + Searches for an item in a sorted collection by interpolation search algorithm. + + Args: + sorted_collection: sorted list of integers + item: item value to search + + Returns: + int: The index of the found item, or None if the item is not found. + Examples: + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([1, 2, 3, 4, 5], 4) + 3 + >>> interpolation_search([1, 2, 3, 4, 5], 6) is None + True + >>> interpolation_search([], 1) is None + True + >>> interpolation_search([100], 100) + 0 + >>> interpolation_search([1, 2, 3, 4, 5], 0) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 7) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([1, 2, 3, 4, 5], 0) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 7) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([5, 5, 5, 5, 5], 3) is None + True """ left = 0 right = len(sorted_collection) - 1 @@ -19,8 +47,7 @@ def interpolation_search(sorted_collection, item): if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left - else: - return None + return None point = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] @@ -33,7 +60,7 @@ def interpolation_search(sorted_collection, item): current_item = sorted_collection[point] if current_item == item: return point - elif point < left: + if point < left: right = left left = point elif point > right: @@ -46,22 +73,42 @@ def interpolation_search(sorted_collection, item): return None -def interpolation_search_by_recursion(sorted_collection, item, left, right): +def interpolation_search_by_recursion( + sorted_collection: list[int], item: int, left: int = 0, right: int | None = None +) -> int | None: """Pure implementation of interpolation search algorithm in Python by recursion Be careful collection must be ascending sorted, otherwise result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) - :param sorted_collection: some ascending sorted collection with comparable items - :param item: item value to search - :return: index of found item or None if item is not found - """ + Args: + sorted_collection: some sorted collection with comparable items + item: item value to search + left: left index in collection + right: right index in collection + + Returns: + index of item in collection or None if item is not present + + Examples: + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 0) + 0 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 15) + 4 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 5) + 1 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 100) is None + True + >>> interpolation_search_by_recursion([5, 5, 5, 5, 5], 3) is None + True + """ + if right is None: + right = len(sorted_collection) - 1 # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left - else: - return None + return None point = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] @@ -73,64 +120,18 @@ def interpolation_search_by_recursion(sorted_collection, item, left, right): if sorted_collection[point] == item: return point - elif point < left: + if point < left: return interpolation_search_by_recursion(sorted_collection, item, point, left) - elif point > right: + if point > right: return interpolation_search_by_recursion(sorted_collection, item, right, left) - elif sorted_collection[point] > item: + if sorted_collection[point] > item: return interpolation_search_by_recursion( sorted_collection, item, left, point - 1 ) - else: - return interpolation_search_by_recursion( - sorted_collection, item, point + 1, right - ) - - -def __assert_sorted(collection): - """Check if collection is ascending sorted, if not - raises :py:class:`ValueError` - :param collection: collection - :return: True if collection is ascending sorted - :raise: :py:class:`ValueError` if collection is not ascending sorted - Examples: - >>> __assert_sorted([0, 1, 2, 4]) - True - >>> __assert_sorted([10, -1, 5]) - Traceback (most recent call last): - ... - ValueError: Collection must be ascending sorted - """ - if collection != sorted(collection): - raise ValueError("Collection must be ascending sorted") - return True + return interpolation_search_by_recursion(sorted_collection, item, point + 1, right) if __name__ == "__main__": - import sys + import doctest - """ - user_input = input('Enter numbers separated by comma:\n').strip() - collection = [int(item) for item in user_input.split(',')] - try: - __assert_sorted(collection) - except ValueError: - sys.exit('Sequence must be ascending sorted to apply interpolation search') - - target_input = input('Enter a single number to be found in the list:\n') - target = int(target_input) - """ - - debug = 0 - if debug == 1: - collection = [10, 30, 40, 45, 50, 66, 77, 93] - try: - __assert_sorted(collection) - except ValueError: - sys.exit("Sequence must be ascending sorted to apply interpolation search") - target = 67 - - result = interpolation_search(collection, target) - if result is not None: - print(f"{target} found at positions: {result}") - else: - print("Not found") + doctest.testmod() From 240d1b7cd47df86d86b26f4d658b26e3656a27d9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2024 21:41:09 +0200 Subject: [PATCH 099/260] [pre-commit.ci] pre-commit autoupdate (#11500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.4 → v0.5.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.4...v0.5.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9f57a7b746a..09542dd7e255 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.4 + rev: v0.5.5 hooks: - id: ruff - id: ruff-format From dfe67954f7218703e3aadca1768a0ad4c97c73a1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 00:11:14 +0200 Subject: [PATCH 100/260] [pre-commit.ci] pre-commit autoupdate (#11507) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.5 → v0.5.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.5...v0.5.6) - [github.com/tox-dev/pyproject-fmt: 2.1.4 → 2.2.1](https://github.com/tox-dev/pyproject-fmt/compare/2.1.4...2.2.1) - [github.com/pre-commit/mirrors-mypy: v1.11.0 → v1.11.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.0...v1.11.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 09542dd7e255..c112b6d86da0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.5 + rev: v0.5.6 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.4" + rev: "2.2.1" hooks: - id: pyproject-fmt @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.0 + rev: v1.11.1 hooks: - id: mypy args: From ed1900f1b37234f25486cfb3223988b3295a5549 Mon Sep 17 00:00:00 2001 From: CarlosZamG <54159355+CarlosZamG@users.noreply.github.com> Date: Tue, 6 Aug 2024 02:44:58 -0600 Subject: [PATCH 101/260] Fix typo in integration_by_simpson_approx.py (#11501) --- maths/numerical_analysis/integration_by_simpson_approx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/numerical_analysis/integration_by_simpson_approx.py b/maths/numerical_analysis/integration_by_simpson_approx.py index f77ae76135ee..934299997aac 100644 --- a/maths/numerical_analysis/integration_by_simpson_approx.py +++ b/maths/numerical_analysis/integration_by_simpson_approx.py @@ -4,7 +4,7 @@ Purpose : You have one function f(x) which takes float integer and returns float you have to integrate the function in limits a to b. -The approximation proposed by Thomas Simpsons in 1743 is one way to calculate +The approximation proposed by Thomas Simpson in 1743 is one way to calculate integration. ( read article : https://cp-algorithms.com/num_methods/simpson-integration.html ) From 31c424fc8654877d3731bdcb50dcc1ce5d6860ab Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 22:55:46 +0200 Subject: [PATCH 102/260] [pre-commit.ci] pre-commit autoupdate (#11515) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.6 → v0.5.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.6...v0.5.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c112b6d86da0..c797af6c5088 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.6 + rev: v0.5.7 hooks: - id: ruff - id: ruff-format From 48418280b1331d1efaa14dc48da62d313dfcee43 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 22 Aug 2024 09:42:40 -0700 Subject: [PATCH 103/260] Remove separate directory for `gaussian_elimination_pivoting.py` (#11445) * updating DIRECTORY.md * Remove separate directory for gaussian_elimination_pivoting.py Delete the directory linear_algebra/src/gaussian_elimination_pivoting/ and move its algorithm file, gaussian_elimination_pivoting.py, into the parent src/ directory. The gaussian_elimination_pivoting/ directory only exists because gaussian_elimination_pivoting.py reads an example numpy array from matrix.txt, but this input file and IO operation is entirely unnecessary because gaussian_elimination_pivoting.py already has the exact same array hard-coded into a variable. * updating DIRECTORY.md --------- Co-authored-by: tianyizheng02 --- DIRECTORY.md | 3 +- .../gaussian_elimination_pivoting.py | 33 ++++++++----------- .../gaussian_elimination_pivoting/__init__.py | 0 .../gaussian_elimination_pivoting/matrix.txt | 4 --- 4 files changed, 14 insertions(+), 26 deletions(-) rename linear_algebra/src/{gaussian_elimination_pivoting => }/gaussian_elimination_pivoting.py (83%) delete mode 100644 linear_algebra/src/gaussian_elimination_pivoting/__init__.py delete mode 100644 linear_algebra/src/gaussian_elimination_pivoting/matrix.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index 54bb8f148c32..11de569a2c25 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -540,8 +540,7 @@ * [Lu Decomposition](linear_algebra/lu_decomposition.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) - * Gaussian Elimination Pivoting - * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py) + * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting.py) * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) diff --git a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting.py similarity index 83% rename from linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py rename to linear_algebra/src/gaussian_elimination_pivoting.py index 2a86350e9fc6..ecaacce19a31 100644 --- a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py +++ b/linear_algebra/src/gaussian_elimination_pivoting.py @@ -1,15 +1,5 @@ import numpy as np -matrix = np.array( - [ - [5.0, -5.0, -3.0, 4.0, -11.0], - [1.0, -4.0, 6.0, -4.0, -10.0], - [-2.0, -5.0, 4.0, -5.0, -12.0], - [-3.0, -3.0, 5.0, -5.0, 8.0], - ], - dtype=float, -) - def solve_linear_system(matrix: np.ndarray) -> np.ndarray: """ @@ -87,15 +77,18 @@ def solve_linear_system(matrix: np.ndarray) -> np.ndarray: if __name__ == "__main__": from doctest import testmod - from pathlib import Path testmod() - file_path = Path(__file__).parent / "matrix.txt" - try: - matrix = np.loadtxt(file_path) - except FileNotFoundError: - print(f"Error: {file_path} not found. Using default matrix instead.") - - # Example usage: - print(f"Matrix:\n{matrix}") - print(f"{solve_linear_system(matrix) = }") + + example_matrix = np.array( + [ + [5.0, -5.0, -3.0, 4.0, -11.0], + [1.0, -4.0, 6.0, -4.0, -10.0], + [-2.0, -5.0, 4.0, -5.0, -12.0], + [-3.0, -3.0, 5.0, -5.0, 8.0], + ], + dtype=float, + ) + + print(f"Matrix:\n{example_matrix}") + print(f"{solve_linear_system(example_matrix) = }") diff --git a/linear_algebra/src/gaussian_elimination_pivoting/__init__.py b/linear_algebra/src/gaussian_elimination_pivoting/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt b/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt deleted file mode 100644 index dd895ad856ee..000000000000 --- a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt +++ /dev/null @@ -1,4 +0,0 @@ -5.0 -5.0 -3.0 4.0 -11.0 -1.0 -4.0 6.0 -4.0 -10.0 --2.0 -5.0 4.0 -5.0 -12.0 --3.0 -3.0 5.0 -5.0 8.0 \ No newline at end of file From e3fa014a5ab4887f93aae7bb193b152bb155323a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 25 Aug 2024 18:33:11 +0300 Subject: [PATCH 104/260] Fix ruff (#11527) * updating DIRECTORY.md * Fix ruff * Fix * Fix * Fix * Revert "Fix" This reverts commit 5bc3bf342208dd707da02dea7173c059317b6bc6. * find_max.py: noqa: PLR1730 --------- Co-authored-by: MaximSmolskiy Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- .../binary_tree/number_of_possible_binary_trees.py | 3 +-- divide_and_conquer/closest_pair_of_points.py | 6 ++---- graphs/kahns_algorithm_long.py | 3 +-- maths/find_max.py | 2 +- maths/special_numbers/bell_numbers.py | 3 +-- matrix/tests/test_matrix_operation.py | 12 ++++++------ project_euler/problem_008/sol1.py | 3 +-- project_euler/problem_009/sol2.py | 3 +-- project_euler/problem_011/sol1.py | 3 +-- project_euler/problem_011/sol2.py | 12 ++++-------- scheduling/highest_response_ratio_next.py | 3 +-- scheduling/shortest_job_first.py | 3 +-- 13 files changed, 22 insertions(+), 36 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c797af6c5088..06f8ba00494a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.7 + rev: v0.6.2 hooks: - id: ruff - id: ruff-format diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py index 1c3dff37e7d9..b39cbafd0a61 100644 --- a/data_structures/binary_tree/number_of_possible_binary_trees.py +++ b/data_structures/binary_tree/number_of_possible_binary_trees.py @@ -31,8 +31,7 @@ def binomial_coefficient(n: int, k: int) -> int: """ result = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) - if k > (n - k): - k = n - k + k = min(k, n - k) # Calculate C(n,k) for i in range(k): result *= n - i diff --git a/divide_and_conquer/closest_pair_of_points.py b/divide_and_conquer/closest_pair_of_points.py index cb7fa00d1c8f..534cbba9b718 100644 --- a/divide_and_conquer/closest_pair_of_points.py +++ b/divide_and_conquer/closest_pair_of_points.py @@ -54,8 +54,7 @@ def dis_between_closest_pair(points, points_counts, min_dis=float("inf")): for i in range(points_counts - 1): for j in range(i + 1, points_counts): current_dis = euclidean_distance_sqr(points[i], points[j]) - if current_dis < min_dis: - min_dis = current_dis + min_dis = min(min_dis, current_dis) return min_dis @@ -76,8 +75,7 @@ def dis_between_closest_in_strip(points, points_counts, min_dis=float("inf")): for i in range(min(6, points_counts - 1), points_counts): for j in range(max(0, i - 6), i): current_dis = euclidean_distance_sqr(points[i], points[j]) - if current_dis < min_dis: - min_dis = current_dis + min_dis = min(min_dis, current_dis) return min_dis diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index 63cbeb909a8a..1f16b90c0745 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -17,8 +17,7 @@ def longest_distance(graph): for x in graph[vertex]: indegree[x] -= 1 - if long_dist[vertex] + 1 > long_dist[x]: - long_dist[x] = long_dist[vertex] + 1 + long_dist[x] = max(long_dist[x], long_dist[vertex] + 1) if indegree[x] == 0: queue.append(x) diff --git a/maths/find_max.py b/maths/find_max.py index 729a80ab421c..4765d300634e 100644 --- a/maths/find_max.py +++ b/maths/find_max.py @@ -20,7 +20,7 @@ def find_max_iterative(nums: list[int | float]) -> int | float: raise ValueError("find_max_iterative() arg is an empty sequence") max_num = nums[0] for x in nums: - if x > max_num: + if x > max_num: # noqa: PLR1730 max_num = x return max_num diff --git a/maths/special_numbers/bell_numbers.py b/maths/special_numbers/bell_numbers.py index 660ec6e6aa09..5d99334d7add 100644 --- a/maths/special_numbers/bell_numbers.py +++ b/maths/special_numbers/bell_numbers.py @@ -61,8 +61,7 @@ def _binomial_coefficient(total_elements: int, elements_to_choose: int) -> int: if elements_to_choose in {0, total_elements}: return 1 - if elements_to_choose > total_elements - elements_to_choose: - elements_to_choose = total_elements - elements_to_choose + elements_to_choose = min(elements_to_choose, total_elements - elements_to_choose) coefficient = 1 for i in range(elements_to_choose): diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index addc870ca205..21ed7e371fd8 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -31,7 +31,7 @@ logger.addHandler(stream_handler) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -51,7 +51,7 @@ def test_addition(mat1, mat2): matop.add(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -71,7 +71,7 @@ def test_subtraction(mat1, mat2): assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -93,21 +93,21 @@ def test_multiplication(mat1, mat2): assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops def test_scalar_multiply(): act = (3.5 * np.array(mat_a)).tolist() theo = matop.scalar_multiply(mat_a, 3.5) assert theo == act -@pytest.mark.mat_ops() +@pytest.mark.mat_ops def test_identity(): act = (np.identity(5)).tolist() theo = matop.identity(5) assert theo == act -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize("mat", [mat_a, mat_b, mat_c, mat_d, mat_e, mat_f]) def test_transpose(mat): if (np.array(mat)).shape < (2, 2): diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index adbac8d5ad1f..a38b2045f996 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -75,8 +75,7 @@ def solution(n: str = N) -> int: product = 1 for j in range(13): product *= int(n[i + j]) - if product > largest_product: - largest_product = product + largest_product = max(largest_product, product) return largest_product diff --git a/project_euler/problem_009/sol2.py b/project_euler/problem_009/sol2.py index 722ad522ee45..443a529571cc 100644 --- a/project_euler/problem_009/sol2.py +++ b/project_euler/problem_009/sol2.py @@ -39,8 +39,7 @@ def solution(n: int = 1000) -> int: c = n - a - b if c * c == (a * a + b * b): candidate = a * b * c - if candidate >= product: - product = candidate + product = max(product, candidate) return product diff --git a/project_euler/problem_011/sol1.py b/project_euler/problem_011/sol1.py index ad45f0983a7c..3d3e864f927b 100644 --- a/project_euler/problem_011/sol1.py +++ b/project_euler/problem_011/sol1.py @@ -63,8 +63,7 @@ def largest_product(grid): max_product = max( vert_product, horz_product, lr_diag_product, rl_diag_product ) - if max_product > largest: - largest = max_product + largest = max(largest, max_product) return largest diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 09bf315702c5..7637deafc3cb 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -45,15 +45,13 @@ def solution(): for i in range(20): for j in range(17): temp = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # down for i in range(17): for j in range(20): temp = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * grid[i + 3][j] - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # diagonal 1 for i in range(17): @@ -64,8 +62,7 @@ def solution(): * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # diagonal 2 for i in range(17): @@ -76,8 +73,7 @@ def solution(): * grid[i + 2][j - 2] * grid[i + 3][j - 3] ) - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) return maximum diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index b549835616bf..f858be2ee44a 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -46,8 +46,7 @@ def calculate_turn_around_time( i = 0 while finished_process[i] == 1: i += 1 - if current_time < arrival_time[i]: - current_time = arrival_time[i] + current_time = max(current_time, arrival_time[i]) response_ratio = 0 # Index showing the location of the process being performed diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index 6899ec87c591..91012ee3ac35 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -66,8 +66,7 @@ def calculate_waitingtime( finar = finish_time - arrival_time[short] waiting_time[short] = finar - burst_time[short] - if waiting_time[short] < 0: - waiting_time[short] = 0 + waiting_time[short] = max(waiting_time[short], 0) # Increment time increment_time += 1 From c8e131b86c35c8fa4ca14aa85edbd4a106575882 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 21:49:42 +0200 Subject: [PATCH 105/260] [pre-commit.ci] pre-commit autoupdate (#11522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/abravalheri/validate-pyproject: v0.18 → v0.19](https://github.com/abravalheri/validate-pyproject/compare/v0.18...v0.19) - [github.com/pre-commit/mirrors-mypy: v1.11.1 → v1.11.2](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.1...v1.11.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 06f8ba00494a..2724dff230e7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.18 + rev: v0.19 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.1 + rev: v1.11.2 hooks: - id: mypy args: From bd8085cfc18784a21d792a44dcd683e11e802c6b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 21:41:55 +0200 Subject: [PATCH 106/260] [pre-commit.ci] pre-commit autoupdate (#11535) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.2 → v0.6.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.2...v0.6.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2724dff230e7..e363197497ac 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.2 + rev: v0.6.3 hooks: - id: ruff - id: ruff-format From f16d38f26f13683cf3ea75caf0474dedde059b86 Mon Sep 17 00:00:00 2001 From: Ramy <126559907+Ramy-Badr-Ahmed@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:39:09 +0200 Subject: [PATCH 107/260] kd tree data structure implementation (#11532) * Implemented KD-Tree Data Structure * Implemented KD-Tree Data Structure. updated DIRECTORY.md. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/example_usage.py * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/hypercube_points.py * added typehints and docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docstring for search() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added tests. Updated docstrings/typehints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated tests and used | for type annotations * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * E501 for build_kdtree.py, hypercube_points.py, nearest_neighbour_search.py * I001 for example_usage.py and test_kdtree.py * I001 for example_usage.py and test_kdtree.py * Update data_structures/kd_tree/build_kdtree.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Added new test cases requested in Review. Refactored the test_build_kdtree() to include various checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considered ruff errors * Considered ruff errors * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kd_node.py * imported annotations from __future__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 ++ data_structures/kd_tree/__init__.py | 0 data_structures/kd_tree/build_kdtree.py | 35 ++++++ data_structures/kd_tree/example/__init__.py | 0 .../kd_tree/example/example_usage.py | 38 +++++++ .../kd_tree/example/hypercube_points.py | 21 ++++ data_structures/kd_tree/kd_node.py | 30 ++++++ .../kd_tree/nearest_neighbour_search.py | 71 +++++++++++++ data_structures/kd_tree/tests/__init__.py | 0 data_structures/kd_tree/tests/test_kdtree.py | 100 ++++++++++++++++++ 10 files changed, 301 insertions(+) create mode 100644 data_structures/kd_tree/__init__.py create mode 100644 data_structures/kd_tree/build_kdtree.py create mode 100644 data_structures/kd_tree/example/__init__.py create mode 100644 data_structures/kd_tree/example/example_usage.py create mode 100644 data_structures/kd_tree/example/hypercube_points.py create mode 100644 data_structures/kd_tree/kd_node.py create mode 100644 data_structures/kd_tree/nearest_neighbour_search.py create mode 100644 data_structures/kd_tree/tests/__init__.py create mode 100644 data_structures/kd_tree/tests/test_kdtree.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 11de569a2c25..1ca537b991c8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -285,6 +285,12 @@ * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) + * KD Tree + * [KD Tree Node](data_structures/kd_tree/kd_node.py) + * [Build KD Tree](data_structures/kd_tree/build_kdtree.py) + * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) + * [Hypercibe Points](data_structures/kd_tree/example/hypercube_points.py) + * [Example Usage](data_structures/kd_tree/example/example_usage.py) ## Digital Image Processing * [Change Brightness](digital_image_processing/change_brightness.py) diff --git a/data_structures/kd_tree/__init__.py b/data_structures/kd_tree/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/kd_tree/build_kdtree.py b/data_structures/kd_tree/build_kdtree.py new file mode 100644 index 000000000000..c5b800a2c992 --- /dev/null +++ b/data_structures/kd_tree/build_kdtree.py @@ -0,0 +1,35 @@ +from data_structures.kd_tree.kd_node import KDNode + + +def build_kdtree(points: list[list[float]], depth: int = 0) -> KDNode | None: + """ + Builds a KD-Tree from a list of points. + + Args: + points: The list of points to build the KD-Tree from. + depth: The current depth in the tree + (used to determine axis for splitting). + + Returns: + The root node of the KD-Tree, + or None if no points are provided. + """ + if not points: + return None + + k = len(points[0]) # Dimensionality of the points + axis = depth % k + + # Sort point list and choose median as pivot element + points.sort(key=lambda point: point[axis]) + median_idx = len(points) // 2 + + # Create node and construct subtrees + left_points = points[:median_idx] + right_points = points[median_idx + 1 :] + + return KDNode( + point=points[median_idx], + left=build_kdtree(left_points, depth + 1), + right=build_kdtree(right_points, depth + 1), + ) diff --git a/data_structures/kd_tree/example/__init__.py b/data_structures/kd_tree/example/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/kd_tree/example/example_usage.py b/data_structures/kd_tree/example/example_usage.py new file mode 100644 index 000000000000..e270f0cdd245 --- /dev/null +++ b/data_structures/kd_tree/example/example_usage.py @@ -0,0 +1,38 @@ +import numpy as np + +from data_structures.kd_tree.build_kdtree import build_kdtree +from data_structures.kd_tree.example.hypercube_points import hypercube_points +from data_structures.kd_tree.nearest_neighbour_search import nearest_neighbour_search + + +def main() -> None: + """ + Demonstrates the use of KD-Tree by building it from random points + in a 10-dimensional hypercube and performing a nearest neighbor search. + """ + num_points: int = 5000 + cube_size: float = 10.0 # Size of the hypercube (edge length) + num_dimensions: int = 10 + + # Generate random points within the hypercube + points: np.ndarray = hypercube_points(num_points, cube_size, num_dimensions) + hypercube_kdtree = build_kdtree(points.tolist()) + + # Generate a random query point within the same space + rng = np.random.default_rng() + query_point: list[float] = rng.random(num_dimensions).tolist() + + # Perform nearest neighbor search + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + hypercube_kdtree, query_point + ) + + # Print the results + print(f"Query point: {query_point}") + print(f"Nearest point: {nearest_point}") + print(f"Distance: {nearest_dist:.4f}") + print(f"Nodes visited: {nodes_visited}") + + +if __name__ == "__main__": + main() diff --git a/data_structures/kd_tree/example/hypercube_points.py b/data_structures/kd_tree/example/hypercube_points.py new file mode 100644 index 000000000000..2d8800ac9338 --- /dev/null +++ b/data_structures/kd_tree/example/hypercube_points.py @@ -0,0 +1,21 @@ +import numpy as np + + +def hypercube_points( + num_points: int, hypercube_size: float, num_dimensions: int +) -> np.ndarray: + """ + Generates random points uniformly distributed within an n-dimensional hypercube. + + Args: + num_points: Number of points to generate. + hypercube_size: Size of the hypercube. + num_dimensions: Number of dimensions of the hypercube. + + Returns: + An array of shape (num_points, num_dimensions) + with generated points. + """ + rng = np.random.default_rng() + shape = (num_points, num_dimensions) + return hypercube_size * rng.random(shape) diff --git a/data_structures/kd_tree/kd_node.py b/data_structures/kd_tree/kd_node.py new file mode 100644 index 000000000000..e1011027938d --- /dev/null +++ b/data_structures/kd_tree/kd_node.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + +class KDNode: + """ + Represents a node in a KD-Tree. + + Attributes: + point: The point stored in this node. + left: The left child node. + right: The right child node. + """ + + def __init__( + self, + point: list[float], + left: KDNode | None = None, + right: KDNode | None = None, + ) -> None: + """ + Initializes a KDNode with the given point and child nodes. + + Args: + point (list[float]): The point stored in this node. + left (Optional[KDNode]): The left child node. + right (Optional[KDNode]): The right child node. + """ + self.point = point + self.left = left + self.right = right diff --git a/data_structures/kd_tree/nearest_neighbour_search.py b/data_structures/kd_tree/nearest_neighbour_search.py new file mode 100644 index 000000000000..d9727736f21c --- /dev/null +++ b/data_structures/kd_tree/nearest_neighbour_search.py @@ -0,0 +1,71 @@ +from data_structures.kd_tree.kd_node import KDNode + + +def nearest_neighbour_search( + root: KDNode | None, query_point: list[float] +) -> tuple[list[float] | None, float, int]: + """ + Performs a nearest neighbor search in a KD-Tree for a given query point. + + Args: + root (KDNode | None): The root node of the KD-Tree. + query_point (list[float]): The point for which the nearest neighbor + is being searched. + + Returns: + tuple[list[float] | None, float, int]: + - The nearest point found in the KD-Tree to the query point, + or None if no point is found. + - The squared distance to the nearest point. + - The number of nodes visited during the search. + """ + nearest_point: list[float] | None = None + nearest_dist: float = float("inf") + nodes_visited: int = 0 + + def search(node: KDNode | None, depth: int = 0) -> None: + """ + Recursively searches for the nearest neighbor in the KD-Tree. + + Args: + node: The current node in the KD-Tree. + depth: The current depth in the KD-Tree. + """ + nonlocal nearest_point, nearest_dist, nodes_visited + if node is None: + return + + nodes_visited += 1 + + # Calculate the current distance (squared distance) + current_point = node.point + current_dist = sum( + (query_coord - point_coord) ** 2 + for query_coord, point_coord in zip(query_point, current_point) + ) + + # Update nearest point if the current node is closer + if nearest_point is None or current_dist < nearest_dist: + nearest_point = current_point + nearest_dist = current_dist + + # Determine which subtree to search first (based on axis and query point) + k = len(query_point) # Dimensionality of points + axis = depth % k + + if query_point[axis] <= current_point[axis]: + nearer_subtree = node.left + further_subtree = node.right + else: + nearer_subtree = node.right + further_subtree = node.left + + # Search the nearer subtree first + search(nearer_subtree, depth + 1) + + # If the further subtree has a closer point + if (query_point[axis] - current_point[axis]) ** 2 < nearest_dist: + search(further_subtree, depth + 1) + + search(root, 0) + return nearest_point, nearest_dist, nodes_visited diff --git a/data_structures/kd_tree/tests/__init__.py b/data_structures/kd_tree/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py new file mode 100644 index 000000000000..81f2cc990074 --- /dev/null +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -0,0 +1,100 @@ +import numpy as np +import pytest + +from data_structures.kd_tree.build_kdtree import build_kdtree +from data_structures.kd_tree.example.hypercube_points import hypercube_points +from data_structures.kd_tree.kd_node import KDNode +from data_structures.kd_tree.nearest_neighbour_search import nearest_neighbour_search + + +@pytest.mark.parametrize( + ("num_points", "cube_size", "num_dimensions", "depth", "expected_result"), + [ + (0, 10.0, 2, 0, None), # Empty points list + (10, 10.0, 2, 2, KDNode), # Depth = 2, 2D points + (10, 10.0, 3, -2, KDNode), # Depth = -2, 3D points + ], +) +def test_build_kdtree(num_points, cube_size, num_dimensions, depth, expected_result): + """ + Test that KD-Tree is built correctly. + + Cases: + - Empty points list. + - Positive depth value. + - Negative depth value. + """ + points = ( + hypercube_points(num_points, cube_size, num_dimensions).tolist() + if num_points > 0 + else [] + ) + + kdtree = build_kdtree(points, depth=depth) + + if expected_result is None: + # Empty points list case + assert kdtree is None, f"Expected None for empty points list, got {kdtree}" + else: + # Check if root node is not None + assert kdtree is not None, "Expected a KDNode, got None" + + # Check if root has correct dimensions + assert ( + len(kdtree.point) == num_dimensions + ), f"Expected point dimension {num_dimensions}, got {len(kdtree.point)}" + + # Check that the tree is balanced to some extent (simplistic check) + assert isinstance( + kdtree, KDNode + ), f"Expected KDNode instance, got {type(kdtree)}" + + +def test_nearest_neighbour_search(): + """ + Test the nearest neighbor search function. + """ + num_points = 10 + cube_size = 10.0 + num_dimensions = 2 + points = hypercube_points(num_points, cube_size, num_dimensions) + kdtree = build_kdtree(points.tolist()) + + rng = np.random.default_rng() + query_point = rng.random(num_dimensions).tolist() + + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + kdtree, query_point + ) + + # Check that nearest point is not None + assert nearest_point is not None + + # Check that distance is a non-negative number + assert nearest_dist >= 0 + + # Check that nodes visited is a non-negative integer + assert nodes_visited >= 0 + + +def test_edge_cases(): + """ + Test edge cases such as an empty KD-Tree. + """ + empty_kdtree = build_kdtree([]) + query_point = [0.0] * 2 # Using a default 2D query point + + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + empty_kdtree, query_point + ) + + # With an empty KD-Tree, nearest_point should be None + assert nearest_point is None + assert nearest_dist == float("inf") + assert nodes_visited == 0 + + +if __name__ == "__main__": + import pytest + + pytest.main() From 729c1f923bb621ed246983a5d3309135c3b1fc8c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 22:15:17 +0200 Subject: [PATCH 108/260] [pre-commit.ci] pre-commit autoupdate (#11557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.3 → v0.6.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.3...v0.6.4) - [github.com/tox-dev/pyproject-fmt: 2.2.1 → 2.2.3](https://github.com/tox-dev/pyproject-fmt/compare/2.2.1...2.2.3) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e363197497ac..ff76e87a3aa1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.3 + rev: v0.6.4 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.1" + rev: "2.2.3" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 1ca537b991c8..e965d3b32ccf 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -243,6 +243,15 @@ * [Min Heap](data_structures/heap/min_heap.py) * [Randomized Heap](data_structures/heap/randomized_heap.py) * [Skew Heap](data_structures/heap/skew_heap.py) + * Kd Tree + * [Build Kdtree](data_structures/kd_tree/build_kdtree.py) + * Example + * [Example Usage](data_structures/kd_tree/example/example_usage.py) + * [Hypercube Points](data_structures/kd_tree/example/hypercube_points.py) + * [Kd Node](data_structures/kd_tree/kd_node.py) + * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) + * Tests + * [Test Kdtree](data_structures/kd_tree/tests/test_kdtree.py) * Linked List * [Circular Linked List](data_structures/linked_list/circular_linked_list.py) * [Deque Doubly](data_structures/linked_list/deque_doubly.py) @@ -285,12 +294,6 @@ * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) - * KD Tree - * [KD Tree Node](data_structures/kd_tree/kd_node.py) - * [Build KD Tree](data_structures/kd_tree/build_kdtree.py) - * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) - * [Hypercibe Points](data_structures/kd_tree/example/hypercube_points.py) - * [Example Usage](data_structures/kd_tree/example/example_usage.py) ## Digital Image Processing * [Change Brightness](digital_image_processing/change_brightness.py) From 77bbe584216c0925e249e0baab77fef34561ecaa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:14:55 +0200 Subject: [PATCH 109/260] [pre-commit.ci] pre-commit autoupdate (#11568) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.4 → v0.6.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.4...v0.6.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff76e87a3aa1..a4a45686537d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.4 + rev: v0.6.5 hooks: - id: ruff - id: ruff-format From 50cc00bb2da26fd234dabdfa7f93c96d6b7d72d5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:45:14 +0200 Subject: [PATCH 110/260] [pre-commit.ci] pre-commit autoupdate (#11579) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.5 → v0.6.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.5...v0.6.7) - [github.com/tox-dev/pyproject-fmt: 2.2.3 → 2.2.4](https://github.com/tox-dev/pyproject-fmt/compare/2.2.3...2.2.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a4a45686537d..7b219597f7b6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.5 + rev: v0.6.7 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.3" + rev: "2.2.4" hooks: - id: pyproject-fmt From 9b5641d2d333d04eb474ecbcb15c40ccf18a3d7b Mon Sep 17 00:00:00 2001 From: apples53 Date: Tue, 24 Sep 2024 13:00:36 +0530 Subject: [PATCH 111/260] balance parenthesis (add closing bracket) (#11563) * balance parenthesis (add closing bracket) * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- fuzzy_logic/fuzzy_operations.py.DISABLED.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fuzzy_logic/fuzzy_operations.py.DISABLED.txt b/fuzzy_logic/fuzzy_operations.py.DISABLED.txt index 0786ef8b0c67..67fd587f4baf 100644 --- a/fuzzy_logic/fuzzy_operations.py.DISABLED.txt +++ b/fuzzy_logic/fuzzy_operations.py.DISABLED.txt @@ -28,7 +28,7 @@ if __name__ == "__main__": union = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) intersection = fuzz.fuzzy_and(X, young, X, middle_aged)[1] - # 3. Complement (A) = (1- min(µA(x)) + # 3. Complement (A) = (1 - min(µA(x))) complement_a = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) difference = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] From 976e385c1d9df92c075575125475b22c423205b9 Mon Sep 17 00:00:00 2001 From: Ramy Date: Sat, 28 Sep 2024 15:37:00 +0200 Subject: [PATCH 112/260] Implemented Suffix Tree Data Structure (#11554) * Implemented KD-Tree Data Structure * Implemented KD-Tree Data Structure. updated DIRECTORY.md. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/example_usage.py * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/hypercube_points.py * added typehints and docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docstring for search() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added tests. Updated docstrings/typehints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated tests and used | for type annotations * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * E501 for build_kdtree.py, hypercube_points.py, nearest_neighbour_search.py * I001 for example_usage.py and test_kdtree.py * I001 for example_usage.py and test_kdtree.py * Update data_structures/kd_tree/build_kdtree.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Added new test cases requested in Review. Refactored the test_build_kdtree() to include various checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considered ruff errors * Considered ruff errors * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kd_node.py * imported annotations from __future__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Implementation of the suffix tree data structure * Adding data to DIRECTORY.md * Minor file renaming * minor correction * renaming in DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-1 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-3 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-5 * Implemented Suffix Tree Data Structure. Added some comments to my files in #11532, #11554. * updating DIRECTORY.md * Implemented Suffix Tree Data Structure. Added some comments to my files in #11532, #11554. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Ramy-Badr-Ahmed --- DIRECTORY.md | 7 ++ data_structures/kd_tree/build_kdtree.py | 8 +++ .../kd_tree/example/example_usage.py | 8 +++ .../kd_tree/example/hypercube_points.py | 8 +++ data_structures/kd_tree/kd_node.py | 8 +++ .../kd_tree/nearest_neighbour_search.py | 8 +++ data_structures/kd_tree/tests/test_kdtree.py | 8 +++ data_structures/suffix_tree/__init__.py | 0 .../suffix_tree/example/__init__.py | 0 .../suffix_tree/example/example_usage.py | 37 +++++++++++ data_structures/suffix_tree/suffix_tree.py | 66 +++++++++++++++++++ .../suffix_tree/suffix_tree_node.py | 36 ++++++++++ data_structures/suffix_tree/tests/__init__.py | 0 .../suffix_tree/tests/test_suffix_tree.py | 59 +++++++++++++++++ 14 files changed, 253 insertions(+) create mode 100644 data_structures/suffix_tree/__init__.py create mode 100644 data_structures/suffix_tree/example/__init__.py create mode 100644 data_structures/suffix_tree/example/example_usage.py create mode 100644 data_structures/suffix_tree/suffix_tree.py create mode 100644 data_structures/suffix_tree/suffix_tree_node.py create mode 100644 data_structures/suffix_tree/tests/__init__.py create mode 100644 data_structures/suffix_tree/tests/test_suffix_tree.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e965d3b32ccf..955001e2aa23 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -291,6 +291,13 @@ * [Stack With Doubly Linked List](data_structures/stacks/stack_with_doubly_linked_list.py) * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) + * Suffix Tree + * Example + * [Example Usage](data_structures/suffix_tree/example/example_usage.py) + * [Suffix Tree](data_structures/suffix_tree/suffix_tree.py) + * [Suffix Tree Node](data_structures/suffix_tree/suffix_tree_node.py) + * Tests + * [Test Suffix Tree](data_structures/suffix_tree/tests/test_suffix_tree.py) * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) diff --git a/data_structures/kd_tree/build_kdtree.py b/data_structures/kd_tree/build_kdtree.py index c5b800a2c992..074a5dac4d42 100644 --- a/data_structures/kd_tree/build_kdtree.py +++ b/data_structures/kd_tree/build_kdtree.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from data_structures.kd_tree.kd_node import KDNode diff --git a/data_structures/kd_tree/example/example_usage.py b/data_structures/kd_tree/example/example_usage.py index e270f0cdd245..892c3b8c4a2a 100644 --- a/data_structures/kd_tree/example/example_usage.py +++ b/data_structures/kd_tree/example/example_usage.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np from data_structures.kd_tree.build_kdtree import build_kdtree diff --git a/data_structures/kd_tree/example/hypercube_points.py b/data_structures/kd_tree/example/hypercube_points.py index 2d8800ac9338..66744856e6d5 100644 --- a/data_structures/kd_tree/example/hypercube_points.py +++ b/data_structures/kd_tree/example/hypercube_points.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np diff --git a/data_structures/kd_tree/kd_node.py b/data_structures/kd_tree/kd_node.py index e1011027938d..5a22ef609077 100644 --- a/data_structures/kd_tree/kd_node.py +++ b/data_structures/kd_tree/kd_node.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from __future__ import annotations diff --git a/data_structures/kd_tree/nearest_neighbour_search.py b/data_structures/kd_tree/nearest_neighbour_search.py index d9727736f21c..8104944c08f0 100644 --- a/data_structures/kd_tree/nearest_neighbour_search.py +++ b/data_structures/kd_tree/nearest_neighbour_search.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from data_structures.kd_tree.kd_node import KDNode diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py index 81f2cc990074..dce5e4f34ff4 100644 --- a/data_structures/kd_tree/tests/test_kdtree.py +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np import pytest diff --git a/data_structures/suffix_tree/__init__.py b/data_structures/suffix_tree/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/suffix_tree/example/__init__.py b/data_structures/suffix_tree/example/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/suffix_tree/example/example_usage.py b/data_structures/suffix_tree/example/example_usage.py new file mode 100644 index 000000000000..724ac57e8bfb --- /dev/null +++ b/data_structures/suffix_tree/example/example_usage.py @@ -0,0 +1,37 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from data_structures.suffix_tree.suffix_tree import SuffixTree + + +def main() -> None: + """ + Demonstrate the usage of the SuffixTree class. + + - Initializes a SuffixTree with a predefined text. + - Defines a list of patterns to search for within the suffix tree. + - Searches for each pattern in the suffix tree. + + Patterns tested: + - "ana" (found) --> True + - "ban" (found) --> True + - "na" (found) --> True + - "xyz" (not found) --> False + - "mon" (found) --> True + """ + text = "monkey banana" + suffix_tree = SuffixTree(text) + + patterns = ["ana", "ban", "na", "xyz", "mon"] + for pattern in patterns: + found = suffix_tree.search(pattern) + print(f"Pattern '{pattern}' found: {found}") + + +if __name__ == "__main__": + main() diff --git a/data_structures/suffix_tree/suffix_tree.py b/data_structures/suffix_tree/suffix_tree.py new file mode 100644 index 000000000000..ad54fb0ba009 --- /dev/null +++ b/data_structures/suffix_tree/suffix_tree.py @@ -0,0 +1,66 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from data_structures.suffix_tree.suffix_tree_node import SuffixTreeNode + + +class SuffixTree: + def __init__(self, text: str) -> None: + """ + Initializes the suffix tree with the given text. + + Args: + text (str): The text for which the suffix tree is to be built. + """ + self.text: str = text + self.root: SuffixTreeNode = SuffixTreeNode() + self.build_suffix_tree() + + def build_suffix_tree(self) -> None: + """ + Builds the suffix tree for the given text by adding all suffixes. + """ + text = self.text + n = len(text) + for i in range(n): + suffix = text[i:] + self._add_suffix(suffix, i) + + def _add_suffix(self, suffix: str, index: int) -> None: + """ + Adds a suffix to the suffix tree. + + Args: + suffix (str): The suffix to add. + index (int): The starting index of the suffix in the original text. + """ + node = self.root + for char in suffix: + if char not in node.children: + node.children[char] = SuffixTreeNode() + node = node.children[char] + node.is_end_of_string = True + node.start = index + node.end = index + len(suffix) - 1 + + def search(self, pattern: str) -> bool: + """ + Searches for a pattern in the suffix tree. + + Args: + pattern (str): The pattern to search for. + + Returns: + bool: True if the pattern is found, False otherwise. + """ + node = self.root + for char in pattern: + if char not in node.children: + return False + node = node.children[char] + return True diff --git a/data_structures/suffix_tree/suffix_tree_node.py b/data_structures/suffix_tree/suffix_tree_node.py new file mode 100644 index 000000000000..e5b628645063 --- /dev/null +++ b/data_structures/suffix_tree/suffix_tree_node.py @@ -0,0 +1,36 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from __future__ import annotations + + +class SuffixTreeNode: + def __init__( + self, + children: dict[str, SuffixTreeNode] | None = None, + is_end_of_string: bool = False, + start: int | None = None, + end: int | None = None, + suffix_link: SuffixTreeNode | None = None, + ) -> None: + """ + Initializes a suffix tree node. + + Parameters: + children (dict[str, SuffixTreeNode] | None): The children of this node. + is_end_of_string (bool): Indicates if this node represents + the end of a string. + start (int | None): The start index of the suffix in the text. + end (int | None): The end index of the suffix in the text. + suffix_link (SuffixTreeNode | None): Link to another suffix tree node. + """ + self.children = children or {} + self.is_end_of_string = is_end_of_string + self.start = start + self.end = end + self.suffix_link = suffix_link diff --git a/data_structures/suffix_tree/tests/__init__.py b/data_structures/suffix_tree/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/suffix_tree/tests/test_suffix_tree.py b/data_structures/suffix_tree/tests/test_suffix_tree.py new file mode 100644 index 000000000000..45c6790ac48a --- /dev/null +++ b/data_structures/suffix_tree/tests/test_suffix_tree.py @@ -0,0 +1,59 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +import unittest + +from data_structures.suffix_tree.suffix_tree import SuffixTree + + +class TestSuffixTree(unittest.TestCase): + def setUp(self) -> None: + """Set up the initial conditions for each test.""" + self.text = "banana" + self.suffix_tree = SuffixTree(self.text) + + def test_search_existing_patterns(self) -> None: + """Test searching for patterns that exist in the suffix tree.""" + patterns = ["ana", "ban", "na"] + for pattern in patterns: + with self.subTest(pattern=pattern): + assert self.suffix_tree.search( + pattern + ), f"Pattern '{pattern}' should be found." + + def test_search_non_existing_patterns(self) -> None: + """Test searching for patterns that do not exist in the suffix tree.""" + patterns = ["xyz", "apple", "cat"] + for pattern in patterns: + with self.subTest(pattern=pattern): + assert not self.suffix_tree.search( + pattern + ), f"Pattern '{pattern}' should not be found." + + def test_search_empty_pattern(self) -> None: + """Test searching for an empty pattern.""" + assert self.suffix_tree.search(""), "An empty pattern should be found." + + def test_search_full_text(self) -> None: + """Test searching for the full text.""" + assert self.suffix_tree.search( + self.text + ), "The full text should be found in the suffix tree." + + def test_search_substrings(self) -> None: + """Test searching for substrings of the full text.""" + substrings = ["ban", "ana", "a", "na"] + for substring in substrings: + with self.subTest(substring=substring): + assert self.suffix_tree.search( + substring + ), f"Substring '{substring}' should be found." + + +if __name__ == "__main__": + unittest.main() From a9ca110d6b6e4921119fdcca3b2a01e7f649f1ed Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 30 Sep 2024 12:49:31 +0200 Subject: [PATCH 113/260] Scripts for closing pull requests for Hacktoberfest (#11587) * Scripts for closing pull requests for Hacktoberfest * --limit=500 * Lose 2024 --- ...ose_pull_requests_with_awaiting_changes.sh | 22 +++++++++++++++++++ .../close_pull_requests_with_failing_tests.sh | 22 +++++++++++++++++++ ...requests_with_require_descriptive_names.sh | 21 ++++++++++++++++++ .../close_pull_requests_with_require_tests.sh | 22 +++++++++++++++++++ ...e_pull_requests_with_require_type_hints.sh | 21 ++++++++++++++++++ 5 files changed, 108 insertions(+) create mode 100755 scripts/close_pull_requests_with_awaiting_changes.sh create mode 100755 scripts/close_pull_requests_with_failing_tests.sh create mode 100755 scripts/close_pull_requests_with_require_descriptive_names.sh create mode 100755 scripts/close_pull_requests_with_require_tests.sh create mode 100755 scripts/close_pull_requests_with_require_type_hints.sh diff --git a/scripts/close_pull_requests_with_awaiting_changes.sh b/scripts/close_pull_requests_with_awaiting_changes.sh new file mode 100755 index 000000000000..55e19c980596 --- /dev/null +++ b/scripts/close_pull_requests_with_awaiting_changes.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "awaiting changes" label is present + awaiting_changes=$(echo "$pr_labels" | jq -r '.[] | select(.name == "awaiting changes")') + echo "Checking PR #$pr_number $pr_title ($awaiting_changes) ($pr_labels)" + + # If awaiting_changes, close the pull request + if [[ -n "$awaiting_changes" ]]; then + echo "Closing PR #$pr_number $pr_title due to awaiting_changes label" + gh pr close "$pr_number" --comment "Closing awaiting_changes PRs to prepare for Hacktoberfest" + sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_failing_tests.sh b/scripts/close_pull_requests_with_failing_tests.sh new file mode 100755 index 000000000000..3ec5960aed27 --- /dev/null +++ b/scripts/close_pull_requests_with_failing_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "tests are failing" label is present + tests_are_failing=$(echo "$pr_labels" | jq -r '.[] | select(.name == "tests are failing")') + echo "Checking PR #$pr_number $pr_title ($tests_are_failing) ($pr_labels)" + + # If there are failing tests, close the pull request + if [[ -n "$tests_are_failing" ]]; then + echo "Closing PR #$pr_number $pr_title due to tests_are_failing label" + gh pr close "$pr_number" --comment "Closing tests_are_failing PRs to prepare for Hacktoberfest" + sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_require_descriptive_names.sh b/scripts/close_pull_requests_with_require_descriptive_names.sh new file mode 100755 index 000000000000..0fc3cec1d247 --- /dev/null +++ b/scripts/close_pull_requests_with_require_descriptive_names.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require descriptive names" label is present + require_descriptive_names=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require descriptive names")') + echo "Checking PR #$pr_number $pr_title ($require_descriptive_names) ($pr_labels)" + + # If there are require_descriptive_names, close the pull request + if [[ -n "$require_descriptive_names" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_descriptive_names label" + gh pr close "$pr_number" --comment "Closing require_descriptive_names PRs to prepare for Hacktoberfest" + fi +done diff --git a/scripts/close_pull_requests_with_require_tests.sh b/scripts/close_pull_requests_with_require_tests.sh new file mode 100755 index 000000000000..89a54996b584 --- /dev/null +++ b/scripts/close_pull_requests_with_require_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require_tests" label is present + require_tests=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require tests")') + echo "Checking PR #$pr_number $pr_title ($require_tests) ($pr_labels)" + + # If there require tests, close the pull request + if [[ -n "$require_tests" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_tests label" + gh pr close "$pr_number" --comment "Closing require_tests PRs to prepare for Hacktoberfest" + # sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_require_type_hints.sh b/scripts/close_pull_requests_with_require_type_hints.sh new file mode 100755 index 000000000000..df5d88289cf0 --- /dev/null +++ b/scripts/close_pull_requests_with_require_type_hints.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require type hints" label is present + require_type_hints=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require type hints")') + echo "Checking PR #$pr_number $pr_title ($require_type_hints) ($pr_labels)" + + # If require_type_hints, close the pull request + if [[ -n "$require_type_hints" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_type_hints label" + gh pr close "$pr_number" --comment "Closing require_type_hints PRs to prepare for Hacktoberfest" + fi +done From a7bfa224554f277ed68be9e4ef3f6d1cd89008af Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 22:16:17 +0200 Subject: [PATCH 114/260] [pre-commit.ci] pre-commit autoupdate (#11594) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.7 → v0.6.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.7...v0.6.8) - [github.com/abravalheri/validate-pyproject: v0.19 → v0.20.2](https://github.com/abravalheri/validate-pyproject/compare/v0.19...v0.20.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7b219597f7b6..8a8e5c1f6ad9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.7 + rev: v0.6.8 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.19 + rev: v0.20.2 hooks: - id: validate-pyproject From 0177ae1cd596f4f3c0ee7490666d74504deb0298 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 30 Sep 2024 23:01:15 +0200 Subject: [PATCH 115/260] Upgrade to Python 3.13 (#11588) --- .github/workflows/build.yml | 6 ++- DIRECTORY.md | 1 - computer_vision/haralick_descriptors.py | 8 ++-- data_structures/heap/binomial_heap.py | 6 +-- electronics/circular_convolution.py | 6 +-- fractals/julia_sets.py | 18 ++++----- graphics/bezier_curve.py | 8 ++-- graphs/dijkstra_binary_grid.py | 2 +- linear_algebra/src/power_iteration.py | 2 +- linear_programming/simplex.py | 32 +++++++-------- machine_learning/decision_tree.py | 8 ++-- machine_learning/forecasting/run.py | 8 ++-- machine_learning/k_nearest_neighbours.py | 2 +- machine_learning/logistic_regression.py | 4 +- machine_learning/loss_functions.py | 40 +++++++++---------- machine_learning/mfcc.py | 13 +++--- .../multilayer_perceptron_classifier.py | 2 +- machine_learning/scoring_functions.py | 22 +++++----- machine_learning/similarity_search.py | 2 +- machine_learning/support_vector_machines.py | 6 +-- maths/euclidean_distance.py | 8 ++-- maths/euler_method.py | 2 +- maths/euler_modified.py | 4 +- maths/gaussian.py | 16 ++++---- maths/minkowski_distance.py | 2 +- maths/numerical_analysis/adams_bashforth.py | 8 ++-- maths/numerical_analysis/runge_kutta.py | 2 +- .../runge_kutta_fehlberg_45.py | 4 +- maths/numerical_analysis/runge_kutta_gills.py | 2 +- maths/softmax.py | 2 +- .../two_hidden_layers_neural_network.py | 6 +-- other/bankers_algorithm.py | 8 ++-- physics/in_static_equilibrium.py | 2 +- requirements.txt | 4 +- ..._tweets.py => get_user_tweets.py.DISABLED} | 0 35 files changed, 135 insertions(+), 131 deletions(-) rename web_programming/{get_user_tweets.py => get_user_tweets.py.DISABLED} (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a113b4608678..dad2b2fac086 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: 3.12 + python-version: 3.13 allow-prereleases: true - uses: actions/cache@v4 with: @@ -26,6 +26,10 @@ jobs: # TODO: #8818 Re-enable quantum tests run: pytest --ignore=quantum/q_fourier_transform.py + --ignore=computer_vision/cnn_classification.py + --ignore=dynamic_programming/k_means_clustering_tensorflow.py + --ignore=machine_learning/lstm/lstm_prediction.py + --ignore=neural_network/input_data.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered diff --git a/DIRECTORY.md b/DIRECTORY.md index 955001e2aa23..56ab8377f16b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1343,7 +1343,6 @@ * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) - * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) * [Instagram Crawler](web_programming/instagram_crawler.py) * [Instagram Pic](web_programming/instagram_pic.py) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 634f0495797b..54632160dcf2 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) 3.1622776601683795 """ - return np.sqrt(((original - reference) ** 2).mean()) + return float(np.sqrt(((original - reference) ** 2).mean())) def normalize_image( @@ -273,7 +273,7 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]: >>> morphological = opening_filter(binary) >>> mask_1 = binary_mask(gray, morphological)[0] >>> concurrency = matrix_concurrency(mask_1, (0, 1)) - >>> haralick_descriptors(concurrency) + >>> [float(f) for f in haralick_descriptors(concurrency)] [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] """ # Function np.indices could be used for bigger input types, @@ -335,7 +335,7 @@ def get_descriptors( return np.concatenate(descriptors, axis=None) -def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: +def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float: """ Simple method for calculating the euclidean distance between two points, with type np.ndarray. @@ -346,7 +346,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: >>> euclidean(a, b) 3.3166247903554 """ - return np.sqrt(np.sum(np.square(point_1 - point_2))) + return float(np.sqrt(np.sum(np.square(point_1 - point_2)))) def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 099bd2871023..9cfdf0c12fe0 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -73,7 +73,7 @@ class BinomialHeap: 30 Deleting - delete() test - >>> [first_heap.delete_min() for _ in range(20)] + >>> [int(first_heap.delete_min()) for _ in range(20)] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Create a new Heap @@ -118,7 +118,7 @@ class BinomialHeap: values in merged heap; (merge is inplace) >>> results = [] >>> while not first_heap.is_empty(): - ... results.append(first_heap.delete_min()) + ... results.append(int(first_heap.delete_min())) >>> results [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34] """ @@ -354,7 +354,7 @@ def delete_min(self): # Merge heaps self.merge_heaps(new_heap) - return min_value + return int(min_value) def pre_order(self): """ diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index 768f2ad941bc..d06e76be759b 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -39,7 +39,7 @@ def circular_convolution(self) -> list[float]: Usage: >>> convolution = CircularConvolution() >>> convolution.circular_convolution() - [10, 10, 6, 14] + [10.0, 10.0, 6.0, 14.0] >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6] >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5] @@ -54,7 +54,7 @@ def circular_convolution(self) -> list[float]: >>> convolution.first_signal = [1, -1, 2, 3, -1] >>> convolution.second_signal = [1, 2, 3] >>> convolution.circular_convolution() - [8, -2, 3, 4, 11] + [8.0, -2.0, 3.0, 4.0, 11.0] """ @@ -91,7 +91,7 @@ def circular_convolution(self) -> list[float]: final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) # rounding-off to two decimal places - return [round(i, 2) for i in final_signal] + return [float(round(i, 2)) for i in final_signal] if __name__ == "__main__": diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 1eef4573ba19..bea599d44339 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -40,11 +40,11 @@ def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. - >>> eval_exponential(0, 0) + >>> float(eval_exponential(0, 0)) 1.0 - >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 + >>> bool(abs(eval_exponential(1, np.pi*1.j)) < 1e-15) True - >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 + >>> bool(abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15) True """ return np.exp(z_values) + c_parameter @@ -98,20 +98,20 @@ def iterate_function( >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0])) 0j - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1])) (1+0j) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2])) (256+0j) """ diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 9d906f179c92..6c7dcd4f06e7 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -30,9 +30,9 @@ def basis_function(self, t: float) -> list[float]: returns the x, y values of basis function at time t >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.basis_function(0) + >>> [float(x) for x in curve.basis_function(0)] [1.0, 0.0] - >>> curve.basis_function(1) + >>> [float(x) for x in curve.basis_function(1)] [0.0, 1.0] """ assert 0 <= t <= 1, "Time t must be between 0 and 1." @@ -55,9 +55,9 @@ def bezier_curve_function(self, t: float) -> tuple[float, float]: The last point in the curve is when t = 1. >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.bezier_curve_function(0) + >>> tuple(float(x) for x in curve.bezier_curve_function(0)) (1.0, 1.0) - >>> curve.bezier_curve_function(1) + >>> tuple(float(x) for x in curve.bezier_curve_function(1)) (1.0, 2.0) """ diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py index c23d8234328a..06293a87da2d 100644 --- a/graphs/dijkstra_binary_grid.py +++ b/graphs/dijkstra_binary_grid.py @@ -69,7 +69,7 @@ def dijkstra( x, y = predecessors[x, y] path.append(source) # add the source manually path.reverse() - return matrix[destination], path + return float(matrix[destination]), path for i in range(len(dx)): nx, ny = x + dx[i], y + dy[i] diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 24fbd9a5e002..83c2ce48c3a0 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -78,7 +78,7 @@ def power_iteration( if is_complex: lambda_ = np.real(lambda_) - return lambda_, vector + return float(lambda_), vector def test_power_iteration() -> None: diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index dc171bacd3a2..a8affe1b72d2 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -107,8 +107,8 @@ def generate_col_titles(self) -> list[str]: def find_pivot(self) -> tuple[Any, Any]: """Finds the pivot row and column. - >>> Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), - ... 2, 0).find_pivot() + >>> tuple(int(x) for x in Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], + ... [1,2,0,1,7.]]), 2, 0).find_pivot()) (1, 0) """ objective = self.objectives[-1] @@ -215,8 +215,8 @@ def run_simplex(self) -> dict[Any, Any]: Max: x1 + x2 ST: x1 + 3x2 <= 4 3x1 + x2 <= 4 - >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), - ... 2, 0).run_simplex() + >>> {key: float(value) for key, value in Tableau(np.array([[-1,-1,0,0,0], + ... [1,3,1,0,4],[3,1,0,1,4.]]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Standard linear program with 3 variables: @@ -224,21 +224,21 @@ def run_simplex(self) -> dict[Any, Any]: ST: 2x1 + x2 + x3 ≤ 2 x1 + 2x2 + 3x3 ≤ 5 2x1 + 2x2 + x3 ≤ 6 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [-3,-1,-3,0,0,0,0], ... [2,1,1,1,0,0,2], ... [1,2,3,0,1,0,5], ... [2,2,1,0,0,1,6.] - ... ]),3,0).run_simplex() # doctest: +ELLIPSIS + ... ]),3,0).run_simplex().items()} # doctest: +ELLIPSIS {'P': 5.4, 'x1': 0.199..., 'x3': 1.6} # Optimal tableau input: - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0, 0, 0.25, 0.25, 2], ... [0, 1, 0.375, -0.125, 1], ... [1, 0, -0.125, 0.375, 1] - ... ]), 2, 0).run_simplex() + ... ]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Non-standard: >= constraints @@ -246,25 +246,25 @@ def run_simplex(self) -> dict[Any, Any]: ST: x1 + x2 + x3 <= 40 2x1 + x2 - x3 >= 10 - x2 + x3 >= 10 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 0, 0, 0, -1, -1, 0, 0, 20], ... [-2, -3, -1, 0, 0, 0, 0, 0, 0], ... [1, 1, 1, 1, 0, 0, 0, 0, 40], ... [2, 1, -1, 0, -1, 0, 1, 0, 10], ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] - ... ]), 3, 2).run_simplex() + ... ]), 3, 2).run_simplex().items()} {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} # Non standard: minimisation and equalities Min: x1 + x2 ST: 2x1 + x2 = 12 6x1 + 5x2 = 40 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [8, 6, 0, 0, 52], ... [1, 1, 0, 0, 0], ... [2, 1, 1, 0, 12], ... [6, 5, 0, 1, 40.], - ... ]), 2, 2).run_simplex() + ... ]), 2, 2).run_simplex().items()} {'P': 7.0, 'x1': 5.0, 'x2': 2.0} @@ -275,7 +275,7 @@ def run_simplex(self) -> dict[Any, Any]: 2x1 + 4x2 <= 48 x1 + x2 >= 10 x1 >= 2 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0], ... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0], ... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0], @@ -283,7 +283,7 @@ def run_simplex(self) -> dict[Any, Any]: ... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0], ... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0], ... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0] - ... ]), 2, 2).run_simplex() # doctest: +ELLIPSIS + ... ]), 2, 2).run_simplex().items()} # doctest: +ELLIPSIS {'P': 132.0, 'x1': 12.000... 'x2': 5.999...} """ # Stop simplex algorithm from cycling. @@ -307,11 +307,11 @@ def run_simplex(self) -> dict[Any, Any]: def interpret_tableau(self) -> dict[str, float]: """Given the final tableau, add the corresponding values of the basic decision variables to the `output_dict` - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0,0,0.875,0.375,5], ... [0,1,0.375,-0.125,1], ... [1,0,-0.125,0.375,1] - ... ]),2, 0).interpret_tableau() + ... ]),2, 0).interpret_tableau().items()} {'P': 5.0, 'x1': 1.0, 'x2': 1.0} """ # P = RHS of final tableau diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index d0bd6ab0b555..72970431c3fc 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -26,15 +26,15 @@ def mean_squared_error(self, labels, prediction): >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) >>> test_prediction = float(6) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True >>> test_labels = np.array([1,2,3]) >>> test_prediction = float(2) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True """ if labels.ndim != 1: diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index dbb86caf8568..9d81b03cd09e 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -28,7 +28,7 @@ def linear_regression_prediction( input : training data (date, total_user, total_event) in list of float output : list of total user prediction in float >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2]) - >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors + >>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors True """ x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) @@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> ) model_fit = model.fit(disp=False, maxiter=600, method="nm") result = model_fit.predict(1, len(test_match), exog=[test_match]) - return result[0] + return float(result[0]) def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: @@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor.fit(x_train, train_user) y_pred = regressor.predict(x_test) - return y_pred[0] + return float(y_pred[0]) def interquartile_range_checker(train_user: list) -> float: @@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float: q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) - return low_lim + return float(low_lim) def data_safety_checker(list_vote: list, actual_result: float) -> bool: diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py index a43757c5c20e..fbc1b8bd227e 100644 --- a/machine_learning/k_nearest_neighbours.py +++ b/machine_learning/k_nearest_neighbours.py @@ -42,7 +42,7 @@ def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float: >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11])) 10.0 """ - return np.linalg.norm(a - b) + return float(np.linalg.norm(a - b)) def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str: """ diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 090af5382185..496026631fbe 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray: @returns: returns value in the range 0 to 1 Examples: - >>> sigmoid_function(4) + >>> float(sigmoid_function(4)) 0.9820137900379085 >>> sigmoid_function(np.array([-3, 3])) array([0.04742587, 0.95257413]) @@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float: References: - https://en.wikipedia.org/wiki/Logistic_regression """ - return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() + return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()) def log_likelihood(x, y, weights): diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 150035661eb7..0bd9aa8b5401 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -22,7 +22,7 @@ def binary_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_cross_entropy(true_labels, predicted_probs) + >>> float(binary_cross_entropy(true_labels, predicted_probs)) 0.2529995012327421 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -68,7 +68,7 @@ def binary_focal_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_focal_cross_entropy(true_labels, predicted_probs) + >>> float(binary_focal_cross_entropy(true_labels, predicted_probs)) 0.008257977659239775 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -108,7 +108,7 @@ def categorical_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) - >>> categorical_cross_entropy(true_labels, pred_probs) + >>> float(categorical_cross_entropy(true_labels, pred_probs)) 0.567395975254385 >>> true_labels = np.array([[1, 0], [0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) @@ -179,13 +179,13 @@ def categorical_focal_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) >>> alpha = np.array([0.6, 0.2, 0.7]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.0025966118981496423 >>> true_labels = np.array([[0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> alpha = np.array([0.25, 0.25, 0.25]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.23315276982014324 >>> true_labels = np.array([[1, 0], [0, 1]]) @@ -265,7 +265,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_labels = np.array([-1, 1, 1, -1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(true_labels, pred) + >>> float(hinge_loss(true_labels, pred)) 1.52 >>> true_labels = np.array([-1, 1, 1, -1, 1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) @@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + >>> bool(np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) - >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + >>> bool(np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) @@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) + >>> bool(np.isclose(mean_squared_error(true_values, predicted_values), 0.028)) True >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)) True >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)) False >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) @@ -420,7 +420,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_logarithmic_error(true_values, predicted_values) + >>> float(mean_squared_logarithmic_error(true_values, predicted_values)) 0.0030860877925181344 >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -459,17 +459,17 @@ def mean_absolute_percentage_error( Examples: >>> y_true = np.array([10, 20, 30, 40]) >>> y_pred = np.array([12, 18, 33, 45]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.13125 >>> y_true = np.array([1, 2, 3, 4]) >>> y_pred = np.array([2, 3, 4, 5]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.5208333333333333 >>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24]) >>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.064671076436071 """ if len(y_true) != len(y_pred): @@ -511,7 +511,7 @@ def perplexity_loss( ... [[0.03, 0.26, 0.21, 0.18, 0.30], ... [0.28, 0.10, 0.33, 0.15, 0.12]]] ... ) - >>> perplexity_loss(y_true, y_pred) + >>> float(perplexity_loss(y_true, y_pred)) 5.0247347775367945 >>> y_true = np.array([[1, 4], [2, 3]]) >>> y_pred = np.array( @@ -600,17 +600,17 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> >>> y_true = np.array([3, 5, 2, 7]) >>> y_pred = np.array([2.9, 4.8, 2.1, 7.2]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.012500000000000022 >>> y_true = np.array([2, 4, 6]) >>> y_pred = np.array([1, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.5 >>> y_true = np.array([1, 3, 5, 7]) >>> y_pred = np.array([1, 3, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.0 >>> y_true = np.array([1, 3, 5]) @@ -647,7 +647,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4]) - >>> kullback_leibler_divergence(true_labels, predicted_probs) + >>> float(kullback_leibler_divergence(true_labels, predicted_probs)) 0.030478754035472025 >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py index a1e99ce4ad40..dcc3151d5a1a 100644 --- a/machine_learning/mfcc.py +++ b/machine_learning/mfcc.py @@ -162,9 +162,9 @@ def normalize(audio: np.ndarray) -> np.ndarray: Examples: >>> audio = np.array([1, 2, 3, 4, 5]) >>> normalized_audio = normalize(audio) - >>> np.max(normalized_audio) + >>> float(np.max(normalized_audio)) 1.0 - >>> np.min(normalized_audio) + >>> float(np.min(normalized_audio)) 0.2 """ # Divide the entire audio signal by the maximum absolute value @@ -229,7 +229,8 @@ def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarra Examples: >>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) >>> audio_fft = calculate_fft(audio_windowed, ftt_size=4) - >>> np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, -1.5-0.8660254j])) + >>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, + ... -1.5-0.8660254j]))) True """ # Transpose the audio data to have time in rows and channels in columns @@ -281,7 +282,7 @@ def freq_to_mel(freq: float) -> float: The frequency in mel scale. Examples: - >>> round(freq_to_mel(1000), 2) + >>> float(round(freq_to_mel(1000), 2)) 999.99 """ # Use the formula to convert frequency to the mel scale @@ -321,7 +322,7 @@ def mel_spaced_filterbank( Mel-spaced filter bank. Examples: - >>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10) + >>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10)) 0.0004603981 """ freq_min = 0 @@ -438,7 +439,7 @@ def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarra The DCT basis matrix. Examples: - >>> round(discrete_cosine_transform(3, 5)[0][0], 5) + >>> float(round(discrete_cosine_transform(3, 5)[0][0], 5)) 0.44721 """ basis = np.empty((dct_filter_num, filter_num)) diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py index e99a4131e972..40f998c7dfa2 100644 --- a/machine_learning/multilayer_perceptron_classifier.py +++ b/machine_learning/multilayer_perceptron_classifier.py @@ -17,7 +17,7 @@ def wrapper(y): """ - >>> wrapper(Y) + >>> [int(x) for x in wrapper(Y)] [0, 0, 1] """ return list(y) diff --git a/machine_learning/scoring_functions.py b/machine_learning/scoring_functions.py index 08b969a95c3b..f6b685f4f98a 100644 --- a/machine_learning/scoring_functions.py +++ b/machine_learning/scoring_functions.py @@ -20,11 +20,11 @@ def mae(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mae(predict,actual),decimals = 2) + >>> float(np.around(mae(predict,actual),decimals = 2)) 0.67 >>> actual = [1,1,1];predict = [1,1,1] - >>> mae(predict,actual) + >>> float(mae(predict,actual)) 0.0 """ predict = np.array(predict) @@ -41,11 +41,11 @@ def mse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mse(predict,actual),decimals = 2) + >>> float(np.around(mse(predict,actual),decimals = 2)) 1.33 >>> actual = [1,1,1];predict = [1,1,1] - >>> mse(predict,actual) + >>> float(mse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -63,11 +63,11 @@ def rmse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(rmse(predict,actual),decimals = 2) + >>> float(np.around(rmse(predict,actual),decimals = 2)) 1.15 >>> actual = [1,1,1];predict = [1,1,1] - >>> rmse(predict,actual) + >>> float(rmse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -84,12 +84,10 @@ def rmse(predict, actual): def rmsle(predict, actual): """ Examples(rounded for precision): - >>> actual = [10,10,30];predict = [10,2,30] - >>> np.around(rmsle(predict,actual),decimals = 2) + >>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2)) 0.75 - >>> actual = [1,1,1];predict = [1,1,1] - >>> rmsle(predict,actual) + >>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1])) 0.0 """ predict = np.array(predict) @@ -117,12 +115,12 @@ def mbd(predict, actual): Here the model overpredicts >>> actual = [1,2,3];predict = [2,3,4] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) 50.0 Here the model underpredicts >>> actual = [1,2,3];predict = [0,1,1] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) -66.67 """ predict = np.array(predict) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 0bc3b17d7e5a..c8a573796882 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -153,7 +153,7 @@ def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: >>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) 0.9615239476408232 """ - return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) + return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))) if __name__ == "__main__": diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index 24046115ebc4..d17c9044a3e9 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -14,11 +14,11 @@ def norm_squared(vector: ndarray) -> float: Returns: float: squared second norm of vector - >>> norm_squared([1, 2]) + >>> int(norm_squared([1, 2])) 5 - >>> norm_squared(np.asarray([1, 2])) + >>> int(norm_squared(np.asarray([1, 2]))) 5 - >>> norm_squared([0, 0]) + >>> int(norm_squared([0, 0])) 0 """ return np.dot(vector, vector) diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 9b29b37b0ce6..aa7f3efc7684 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -13,13 +13,13 @@ def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors. A vector is defined as a list, tuple, or numpy 1D array. - >>> euclidean_distance((0, 0), (2, 2)) + >>> float(euclidean_distance((0, 0), (2, 2))) 2.8284271247461903 - >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) + >>> float(euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2]))) 3.4641016151377544 - >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) + >>> float(euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]))) 8.0 - >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) + >>> float(euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8])) 8.0 """ return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) diff --git a/maths/euler_method.py b/maths/euler_method.py index 30f193e6daa5..c6adb07e2d3d 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -26,7 +26,7 @@ def explicit_euler( ... return y >>> y0 = 1 >>> y = explicit_euler(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 144.77277243257308 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/euler_modified.py b/maths/euler_modified.py index d02123e1e2fb..bb282e9f0ab9 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -24,13 +24,13 @@ def euler_modified( >>> def f1(x, y): ... return -2*x*(y**2) >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0) - >>> y[-1] + >>> float(y[-1]) 0.503338255442106 >>> import math >>> def f2(x, y): ... return -2*y + (x**3)*math.exp(-2*x) >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3) - >>> y[-1] + >>> float(y[-1]) 0.5525976431951775 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/gaussian.py b/maths/gaussian.py index 0e02010a9c67..b1e62ea77fe2 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -5,18 +5,18 @@ from numpy import exp, pi, sqrt -def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: +def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> float: """ - >>> gaussian(1) + >>> float(gaussian(1)) 0.24197072451914337 - >>> gaussian(24) + >>> float(gaussian(24)) 3.342714441794458e-126 - >>> gaussian(1, 4, 2) + >>> float(gaussian(1, 4, 2)) 0.06475879783294587 - >>> gaussian(1, 5, 3) + >>> float(gaussian(1, 5, 3)) 0.05467002489199788 Supports NumPy Arrays @@ -29,7 +29,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27, 2.14638374e-32, 7.99882776e-38, 1.09660656e-43]) - >>> gaussian(15) + >>> float(gaussian(15)) 5.530709549844416e-50 >>> gaussian([1,2, 'string']) @@ -47,10 +47,10 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: ... OverflowError: (34, 'Result too large') - >>> gaussian(10**-326) + >>> float(gaussian(10**-326)) 0.3989422804014327 - >>> gaussian(2523, mu=234234, sigma=3425) + >>> float(gaussian(2523, mu=234234, sigma=3425)) 0.0 """ return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2)) diff --git a/maths/minkowski_distance.py b/maths/minkowski_distance.py index 3237124e8d36..99f02e31e417 100644 --- a/maths/minkowski_distance.py +++ b/maths/minkowski_distance.py @@ -19,7 +19,7 @@ def minkowski_distance( >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) 8.0 >>> import numpy as np - >>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) + >>> bool(np.isclose(5.0, minkowski_distance([5.0], [0.0], 3))) True >>> minkowski_distance([1.0], [2.0], -1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py index fb406171098a..26244a58552f 100644 --- a/maths/numerical_analysis/adams_bashforth.py +++ b/maths/numerical_analysis/adams_bashforth.py @@ -102,7 +102,7 @@ def step_3(self) -> np.ndarray: >>> def f(x, y): ... return x + y >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() - >>> y[3] + >>> float(y[3]) 0.15533333333333332 >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() @@ -140,9 +140,9 @@ def step_4(self) -> np.ndarray: ... return x + y >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() - >>> y[4] + >>> float(y[4]) 0.30699999999999994 - >>> y[5] + >>> float(y[5]) 0.5771083333333333 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() @@ -185,7 +185,7 @@ def step_5(self) -> np.ndarray: >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], ... 0.2, 1).step_5() - >>> y[-1] + >>> float(y[-1]) 0.05436839444444452 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() diff --git a/maths/numerical_analysis/runge_kutta.py b/maths/numerical_analysis/runge_kutta.py index 4cac017ee89e..3a25b0fb0173 100644 --- a/maths/numerical_analysis/runge_kutta.py +++ b/maths/numerical_analysis/runge_kutta.py @@ -19,7 +19,7 @@ def runge_kutta(f, y0, x0, h, x_end): ... return y >>> y0 = 1 >>> y = runge_kutta(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 148.41315904125113 """ n = int(np.ceil((x_end - x0) / h)) diff --git a/maths/numerical_analysis/runge_kutta_fehlberg_45.py b/maths/numerical_analysis/runge_kutta_fehlberg_45.py index 8181fe3015fc..0fbd60a35c1a 100644 --- a/maths/numerical_analysis/runge_kutta_fehlberg_45.py +++ b/maths/numerical_analysis/runge_kutta_fehlberg_45.py @@ -34,12 +34,12 @@ def runge_kutta_fehlberg_45( >>> def f(x, y): ... return 1 + y**2 >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) - >>> y[1] + >>> float(y[1]) 0.2027100937470787 >>> def f(x,y): ... return x >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) - >>> y[1] + >>> float(y[1]) -0.18000000000000002 >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py index 451cde4cb935..5d9672679813 100644 --- a/maths/numerical_analysis/runge_kutta_gills.py +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -34,7 +34,7 @@ def runge_kutta_gills( >>> def f(x, y): ... return (x-y)/2 >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) - >>> y[-1] + >>> float(y[-1]) 3.4104259225717537 >>> def f(x,y): diff --git a/maths/softmax.py b/maths/softmax.py index 04cf77525420..95c95e66f59e 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -28,7 +28,7 @@ def softmax(vector): The softmax vector adds up to one. We need to ceil to mitigate for precision - >>> np.ceil(np.sum(softmax([1,2,3,4]))) + >>> float(np.ceil(np.sum(softmax([1,2,3,4])))) 1.0 >>> vec = np.array([5,5]) diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index d488de590cc2..1b7c0beed3ba 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -64,7 +64,7 @@ def feedforward(self) -> np.ndarray: >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> array_sum = np.sum(res) - >>> np.isnan(array_sum) + >>> bool(np.isnan(array_sum)) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the @@ -105,7 +105,7 @@ def back_propagation(self) -> None: >>> res = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (res == updated_weights).all() + >>> bool((res == updated_weights).all()) False """ @@ -171,7 +171,7 @@ def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None: >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (first_iteration_weights == updated_weights).all() + >>> bool((first_iteration_weights == updated_weights).all()) False """ for iteration in range(1, iterations + 1): diff --git a/other/bankers_algorithm.py b/other/bankers_algorithm.py index 858eb0b2c524..d4254f479a4f 100644 --- a/other/bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -87,9 +87,11 @@ def __need_index_manager(self) -> dict[int, list[int]]: This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" Return: {0: [a: int, b: int], 1: [c: int, d: int]} - >>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table, - ... test_maximum_claim_table)._BankersAlgorithm__need_index_manager() - ... ) # doctest: +NORMALIZE_WHITESPACE + >>> index_control = BankersAlgorithm( + ... test_claim_vector, test_allocated_res_table, test_maximum_claim_table + ... )._BankersAlgorithm__need_index_manager() + >>> {key: [int(x) for x in value] for key, value + ... in index_control.items()} # doctest: +NORMALIZE_WHITESPACE {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]} """ diff --git a/physics/in_static_equilibrium.py b/physics/in_static_equilibrium.py index e3c2f9d07aed..fb5a9b5fff66 100644 --- a/physics/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -53,7 +53,7 @@ def in_static_equilibrium( # summation of moments is zero moments: NDArray[float64] = cross(location, forces) sum_moments: float = sum(moments) - return abs(sum_moments) < eps + return bool(abs(sum_moments) < eps) if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index bb3d671393b9..afbf25ba6edc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ beautifulsoup4 fake_useragent imageio -keras ; python_version < '3.12' +keras lxml matplotlib numpy @@ -17,7 +17,7 @@ rich scikit-learn statsmodels sympy -tensorflow +tensorflow ; python_version < '3.13' tweepy # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py.DISABLED similarity index 100% rename from web_programming/get_user_tweets.py rename to web_programming/get_user_tweets.py.DISABLED From 0abeeab39f4a612968a10b0541f630239b78f34f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 1 Oct 2024 17:32:31 +0200 Subject: [PATCH 116/260] Drop six from our GitHub Actions (#11621) Drop https://six.readthedocs.io --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index dad2b2fac086..f54cc982d1ec 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,7 +20,7 @@ jobs: key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | - python -m pip install --upgrade pip setuptools six wheel + python -m pip install --upgrade pip setuptools wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests # TODO: #8818 Re-enable quantum tests From 43a47e01eb2c2b681fa377b02150edba5cc76e32 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:18:17 +0530 Subject: [PATCH 117/260] Add word ladder algorithm in backtracking (#11590) * Add word ladder algorithm in backtracking * Improve comments and implement ruff checks * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change BFS to Backtracking * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Incorporate PR Changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add type hints for backtrack function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Hardvan Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + backtracking/word_ladder.py | 100 ++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 backtracking/word_ladder.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 56ab8377f16b..cdbbac684fd2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -22,6 +22,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Ladder](backtracking/word_ladder.py) * [Word Search](backtracking/word_search.py) ## Bit Manipulation diff --git a/backtracking/word_ladder.py b/backtracking/word_ladder.py new file mode 100644 index 000000000000..7d9fd00f6669 --- /dev/null +++ b/backtracking/word_ladder.py @@ -0,0 +1,100 @@ +""" +Word Ladder is a classic problem in computer science. +The problem is to transform a start word into an end word +by changing one letter at a time. +Each intermediate word must be a valid word from a given list of words. +The goal is to find a transformation sequence +from the start word to the end word. + +Wikipedia: https://en.wikipedia.org/wiki/Word_ladder +""" + +import string + + +def backtrack( + current_word: str, path: list[str], end_word: str, word_set: set[str] +) -> list[str]: + """ + Helper function to perform backtracking to find the transformation + from the current_word to the end_word. + + Parameters: + current_word (str): The current word in the transformation sequence. + path (list[str]): The list of transformations from begin_word to current_word. + end_word (str): The target word for transformation. + word_set (set[str]): The set of valid words for transformation. + + Returns: + list[str]: The list of transformations from begin_word to end_word. + Returns an empty list if there is no valid + transformation from current_word to end_word. + + Example: + >>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log", "cog"}) + ['hit', 'hot', 'dot', 'lot', 'log', 'cog'] + + >>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log"}) + [] + + >>> backtrack("lead", ["lead"], "gold", {"load", "goad", "gold", "lead", "lord"}) + ['lead', 'lead', 'load', 'goad', 'gold'] + + >>> backtrack("game", ["game"], "code", {"came", "cage", "code", "cade", "gave"}) + ['game', 'came', 'cade', 'code'] + """ + + # Base case: If the current word is the end word, return the path + if current_word == end_word: + return path + + # Try all possible single-letter transformations + for i in range(len(current_word)): + for c in string.ascii_lowercase: # Try changing each letter + transformed_word = current_word[:i] + c + current_word[i + 1 :] + if transformed_word in word_set: + word_set.remove(transformed_word) + # Recur with the new word added to the path + result = backtrack( + transformed_word, [*path, transformed_word], end_word, word_set + ) + if result: # valid transformation found + return result + word_set.add(transformed_word) # backtrack + + return [] # No valid transformation found + + +def word_ladder(begin_word: str, end_word: str, word_set: set[str]) -> list[str]: + """ + Solve the Word Ladder problem using Backtracking and return + the list of transformations from begin_word to end_word. + + Parameters: + begin_word (str): The word from which the transformation starts. + end_word (str): The target word for transformation. + word_list (list[str]): The list of valid words for transformation. + + Returns: + list[str]: The list of transformations from begin_word to end_word. + Returns an empty list if there is no valid transformation. + + Example: + >>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log", "cog"]) + ['hit', 'hot', 'dot', 'lot', 'log', 'cog'] + + >>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log"]) + [] + + >>> word_ladder("lead", "gold", ["load", "goad", "gold", "lead", "lord"]) + ['lead', 'lead', 'load', 'goad', 'gold'] + + >>> word_ladder("game", "code", ["came", "cage", "code", "cade", "gave"]) + ['game', 'came', 'cade', 'code'] + """ + + if end_word not in word_set: # no valid transformation possible + return [] + + # Perform backtracking starting from the begin_word + return backtrack(begin_word, [begin_word], end_word, word_set) From 00e9d862248a27281d4de24c8c7eb2d7b018531c Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:24:12 +0530 Subject: [PATCH 118/260] Improve comments, add doctests in symmetric_tree.py (#11619) --- data_structures/binary_tree/symmetric_tree.py | 67 +++++++++++++++++-- 1 file changed, 62 insertions(+), 5 deletions(-) diff --git a/data_structures/binary_tree/symmetric_tree.py b/data_structures/binary_tree/symmetric_tree.py index 98a766cab988..2bfeac98b2c9 100644 --- a/data_structures/binary_tree/symmetric_tree.py +++ b/data_structures/binary_tree/symmetric_tree.py @@ -13,7 +13,21 @@ @dataclass class Node: """ - A Node has data variable and pointers to Nodes to its left and right. + A Node represents an element of a binary tree, which contains: + + Attributes: + data: The value stored in the node (int). + left: Pointer to the left child node (Node or None). + right: Pointer to the right child node (Node or None). + + Example: + >>> node = Node(1, Node(2), Node(3)) + >>> node.data + 1 + >>> node.left.data + 2 + >>> node.right.data + 3 """ data: int @@ -24,12 +38,25 @@ class Node: def make_symmetric_tree() -> Node: r""" Create a symmetric tree for testing. + The tree looks like this: 1 / \ 2 2 / \ / \ 3 4 4 3 + + Returns: + Node: Root node of a symmetric tree. + + Example: + >>> tree = make_symmetric_tree() + >>> tree.data + 1 + >>> tree.left.data == tree.right.data + True + >>> tree.left.left.data == tree.right.right.data + True """ root = Node(1) root.left = Node(2) @@ -43,13 +70,26 @@ def make_symmetric_tree() -> Node: def make_asymmetric_tree() -> Node: r""" - Create a asymmetric tree for testing. + Create an asymmetric tree for testing. + The tree looks like this: 1 / \ 2 2 / \ / \ 3 4 3 4 + + Returns: + Node: Root node of an asymmetric tree. + + Example: + >>> tree = make_asymmetric_tree() + >>> tree.data + 1 + >>> tree.left.data == tree.right.data + True + >>> tree.left.left.data == tree.right.right.data + False """ root = Node(1) root.left = Node(2) @@ -63,7 +103,15 @@ def make_asymmetric_tree() -> Node: def is_symmetric_tree(tree: Node) -> bool: """ - Test cases for is_symmetric_tree function + Check if a binary tree is symmetric (i.e., a mirror of itself). + + Parameters: + tree: The root node of the binary tree. + + Returns: + bool: True if the tree is symmetric, False otherwise. + + Example: >>> is_symmetric_tree(make_symmetric_tree()) True >>> is_symmetric_tree(make_asymmetric_tree()) @@ -76,8 +124,17 @@ def is_symmetric_tree(tree: Node) -> bool: def is_mirror(left: Node | None, right: Node | None) -> bool: """ + Check if two subtrees are mirror images of each other. + + Parameters: + left: The root node of the left subtree. + right: The root node of the right subtree. + + Returns: + bool: True if the two subtrees are mirrors of each other, False otherwise. + + Example: >>> tree1 = make_symmetric_tree() - >>> tree1.right.right = Node(3) >>> is_mirror(tree1.left, tree1.right) True >>> tree2 = make_asymmetric_tree() @@ -91,7 +148,7 @@ def is_mirror(left: Node | None, right: Node | None) -> bool: # One side is empty while the other is not, which is not symmetric. return False if left.data == right.data: - # The values match, so check the subtree + # The values match, so check the subtrees recursively. return is_mirror(left.left, right.right) and is_mirror(left.right, right.left) return False From 918fa8bb8ae1f052921fffd188d229d4713c73c9 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Wed, 2 Oct 2024 23:37:07 +0530 Subject: [PATCH 119/260] Optimized O(n) to O(1) (#11669) --- data_structures/linked_list/has_loop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data_structures/linked_list/has_loop.py b/data_structures/linked_list/has_loop.py index bc06ffe150e8..f49e01579adc 100644 --- a/data_structures/linked_list/has_loop.py +++ b/data_structures/linked_list/has_loop.py @@ -14,11 +14,11 @@ def __init__(self, data: Any) -> None: def __iter__(self): node = self - visited = [] + visited = set() while node: if node in visited: raise ContainsLoopError - visited.append(node) + visited.add(node) yield node.data node = node.next_node From f4b4ac159a17e0621e7f37141b165d58ca655b81 Mon Sep 17 00:00:00 2001 From: Ali Rashid <110668489+alirashidAR@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:24:56 +0530 Subject: [PATCH 120/260] Adding Doctests to floyd_warshall.py (#11690) * Ruff test resolution * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/floyd_warshall.py | 47 +++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py index 2331f3e65483..b92c6667fb5c 100644 --- a/dynamic_programming/floyd_warshall.py +++ b/dynamic_programming/floyd_warshall.py @@ -12,19 +12,58 @@ def __init__(self, n=0): # a graph with Node 0,1,...,N-1 ] # dp[i][j] stores minimum distance from i to j def add_edge(self, u, v, w): + """ + Adds a directed edge from node u + to node v with weight w. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 5) + >>> g.dp[0][1] + 5 + """ self.dp[u][v] = w def floyd_warshall(self): + """ + Computes the shortest paths between all pairs of + nodes using the Floyd-Warshall algorithm. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 1) + >>> g.add_edge(1, 2, 2) + >>> g.floyd_warshall() + >>> g.show_min(0, 2) + 3 + >>> g.show_min(2, 0) + inf + """ for k in range(self.n): for i in range(self.n): for j in range(self.n): self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j]) def show_min(self, u, v): + """ + Returns the minimum distance from node u to node v. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 3) + >>> g.add_edge(1, 2, 4) + >>> g.floyd_warshall() + >>> g.show_min(0, 2) + 7 + >>> g.show_min(1, 0) + inf + """ return self.dp[u][v] if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example usage graph = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) @@ -38,5 +77,9 @@ def show_min(self, u, v): graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() - graph.show_min(1, 4) - graph.show_min(0, 3) + print( + graph.show_min(1, 4) + ) # Should output the minimum distance from node 1 to node 4 + print( + graph.show_min(0, 3) + ) # Should output the minimum distance from node 0 to node 3 From 080e7903a06765808c12c0c9c0b242f485cb9ce7 Mon Sep 17 00:00:00 2001 From: Aswin P Kumar <118362715+AswinPKumar01@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:33:48 +0530 Subject: [PATCH 121/260] Add Word Break algorithm (#11687) * Add Word Break algorithm * Add Word Break algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/word_break.py | 71 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 backtracking/word_break.py diff --git a/backtracking/word_break.py b/backtracking/word_break.py new file mode 100644 index 000000000000..1f2ab073f499 --- /dev/null +++ b/backtracking/word_break.py @@ -0,0 +1,71 @@ +""" +Word Break Problem is a well-known problem in computer science. +Given a string and a dictionary of words, the task is to determine if +the string can be segmented into a sequence of one or more dictionary words. + +Wikipedia: https://en.wikipedia.org/wiki/Word_break_problem +""" + + +def backtrack(input_string: str, word_dict: set[str], start: int) -> bool: + """ + Helper function that uses backtracking to determine if a valid + word segmentation is possible starting from index 'start'. + + Parameters: + input_string (str): The input string to be segmented. + word_dict (set[str]): A set of valid dictionary words. + start (int): The starting index of the substring to be checked. + + Returns: + bool: True if a valid segmentation is possible, otherwise False. + + Example: + >>> backtrack("leetcode", {"leet", "code"}, 0) + True + + >>> backtrack("applepenapple", {"apple", "pen"}, 0) + True + + >>> backtrack("catsandog", {"cats", "dog", "sand", "and", "cat"}, 0) + False + """ + + # Base case: if the starting index has reached the end of the string + if start == len(input_string): + return True + + # Try every possible substring from 'start' to 'end' + for end in range(start + 1, len(input_string) + 1): + if input_string[start:end] in word_dict and backtrack( + input_string, word_dict, end + ): + return True + + return False + + +def word_break(input_string: str, word_dict: set[str]) -> bool: + """ + Determines if the input string can be segmented into a sequence of + valid dictionary words using backtracking. + + Parameters: + input_string (str): The input string to segment. + word_dict (set[str]): The set of valid words. + + Returns: + bool: True if the string can be segmented into valid words, otherwise False. + + Example: + >>> word_break("leetcode", {"leet", "code"}) + True + + >>> word_break("applepenapple", {"apple", "pen"}) + True + + >>> word_break("catsandog", {"cats", "dog", "sand", "and", "cat"}) + False + """ + + return backtrack(input_string, word_dict, 0) From 40f65e8150045dc82a7a58fe7cff6bfb353999f2 Mon Sep 17 00:00:00 2001 From: JeevaRamanathan <64531160+JeevaRamanathan@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:48:01 +0530 Subject: [PATCH 122/260] Improve comments, docstrings in next_greatest_element.py (#11685) * Improve comments in next_greatest_element.py Signed-off-by: JeevaRamanathan * few changes Signed-off-by: JeevaRamanathan * updated descriptions of the functions parameters Signed-off-by: JeevaRamanathan --------- Signed-off-by: JeevaRamanathan --- .../stacks/next_greater_element.py | 60 ++++++++++++++----- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py index 7d76d1f47dfa..216850b4b894 100644 --- a/data_structures/stacks/next_greater_element.py +++ b/data_structures/stacks/next_greater_element.py @@ -6,9 +6,20 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]: """ - Get the Next Greatest Element (NGE) for all elements in a list. - Maximum element present after the current one which is also greater than the - current one. + Get the Next Greatest Element (NGE) for each element in the array + by checking all subsequent elements to find the next greater one. + + This is a brute-force implementation, and it has a time complexity + of O(n^2), where n is the size of the array. + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element_slow(arr) == expect True """ @@ -28,9 +39,21 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]: def next_greatest_element_fast(arr: list[float]) -> list[float]: """ - Like next_greatest_element_slow() but changes the loops to use - enumerate() instead of range(len()) for the outer loop and - for in a slice of arr for the inner loop. + Find the Next Greatest Element (NGE) for each element in the array + using a more readable approach. This implementation utilizes + enumerate() for the outer loop and slicing for the inner loop. + + While this improves readability over next_greatest_element_slow(), + it still has a time complexity of O(n^2). + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element_fast(arr) == expect True """ @@ -47,14 +70,23 @@ def next_greatest_element_fast(arr: list[float]) -> list[float]: def next_greatest_element(arr: list[float]) -> list[float]: """ - Get the Next Greatest Element (NGE) for all elements in a list. - Maximum element present after the current one which is also greater than the - current one. - - A naive way to solve this is to take two loops and check for the next bigger - number but that will make the time complexity as O(n^2). The better way to solve - this would be to use a stack to keep track of maximum number giving a linear time - solution. + Efficient solution to find the Next Greatest Element (NGE) for all elements + using a stack. The time complexity is reduced to O(n), making it suitable + for larger arrays. + + The stack keeps track of elements for which the next greater element hasn't + been found yet. By iterating through the array in reverse (from the last + element to the first), the stack is used to efficiently determine the next + greatest element for each element. + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element(arr) == expect True """ From e20b503b24fc271321a23584772ad8f0db17daf2 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:36:08 +0530 Subject: [PATCH 123/260] Improve comments, add doctests for kahns_algorithm_topo.py (#11668) * Improve comments, add doctests for kahns_algorithm_topo.py * Improve function docstring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename variables, remove print --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/kahns_algorithm_topo.py | 67 +++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 21 deletions(-) diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index b1260bd5bd9b..c956cf9f48fd 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -1,36 +1,61 @@ -def topological_sort(graph): +def topological_sort(graph: dict[int, list[int]]) -> list[int] | None: """ - Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph - using BFS + Perform topological sorting of a Directed Acyclic Graph (DAG) + using Kahn's Algorithm via Breadth-First Search (BFS). + + Topological sorting is a linear ordering of vertices in a graph such that for + every directed edge u → v, vertex u comes before vertex v in the ordering. + + Parameters: + graph: Adjacency list representing the directed graph where keys are + vertices, and values are lists of adjacent vertices. + + Returns: + The topologically sorted order of vertices if the graph is a DAG. + Returns None if the graph contains a cycle. + + Example: + >>> graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} + >>> topological_sort(graph) + [0, 1, 2, 3, 4, 5] + + >>> graph_with_cycle = {0: [1], 1: [2], 2: [0]} + >>> topological_sort(graph_with_cycle) """ + indegree = [0] * len(graph) queue = [] - topo = [] - cnt = 0 + topo_order = [] + processed_vertices_count = 0 + # Calculate the indegree of each vertex for values in graph.values(): for i in values: indegree[i] += 1 + # Add all vertices with 0 indegree to the queue for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) + # Perform BFS while queue: vertex = queue.pop(0) - cnt += 1 - topo.append(vertex) - for x in graph[vertex]: - indegree[x] -= 1 - if indegree[x] == 0: - queue.append(x) - - if cnt != len(graph): - print("Cycle exists") - else: - print(topo) - - -# Adjacency List of Graph -graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} -topological_sort(graph) + processed_vertices_count += 1 + topo_order.append(vertex) + + # Traverse neighbors + for neighbor in graph[vertex]: + indegree[neighbor] -= 1 + if indegree[neighbor] == 0: + queue.append(neighbor) + + if processed_vertices_count != len(graph): + return None # no topological ordering exists due to cycle + return topo_order # valid topological ordering + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 917ad62105dc829e45c0732d9ac2aae7ef358627 Mon Sep 17 00:00:00 2001 From: Sai Aswin Madhavan Date: Fri, 4 Oct 2024 14:58:50 +0530 Subject: [PATCH 124/260] Removed incorrect type hints (#11711) --- strings/min_cost_string_conversion.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index d147a9d7954c..40d54f0e8420 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -21,7 +21,6 @@ def compute_transform_tables( destination_seq = list(destination_string) len_source_seq = len(source_seq) len_destination_seq = len(destination_seq) - costs = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] @@ -31,28 +30,28 @@ def compute_transform_tables( for i in range(1, len_source_seq + 1): costs[i][0] = i * delete_cost - ops[i][0] = f"D{source_seq[i - 1]:c}" + ops[i][0] = f"D{source_seq[i - 1]}" for i in range(1, len_destination_seq + 1): costs[0][i] = i * insert_cost - ops[0][i] = f"I{destination_seq[i - 1]:c}" + ops[0][i] = f"I{destination_seq[i - 1]}" for i in range(1, len_source_seq + 1): for j in range(1, len_destination_seq + 1): if source_seq[i - 1] == destination_seq[j - 1]: costs[i][j] = costs[i - 1][j - 1] + copy_cost - ops[i][j] = f"C{source_seq[i - 1]:c}" + ops[i][j] = f"C{source_seq[i - 1]}" else: costs[i][j] = costs[i - 1][j - 1] + replace_cost - ops[i][j] = f"R{source_seq[i - 1]:c}" + str(destination_seq[j - 1]) + ops[i][j] = f"R{source_seq[i - 1]}" + str(destination_seq[j - 1]) if costs[i - 1][j] + delete_cost < costs[i][j]: costs[i][j] = costs[i - 1][j] + delete_cost - ops[i][j] = f"D{source_seq[i - 1]:c}" + ops[i][j] = f"D{source_seq[i - 1]}" if costs[i][j - 1] + insert_cost < costs[i][j]: costs[i][j] = costs[i][j - 1] + insert_cost - ops[i][j] = f"I{destination_seq[j - 1]:c}" + ops[i][j] = f"I{destination_seq[j - 1]}" return costs, ops From 59ff87dc55b704dc7d3683bb6fabc7c4dc0afade Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Fri, 4 Oct 2024 10:36:14 +0100 Subject: [PATCH 125/260] Added doctests to min_cost_string_conversion.py and removed :c specifier (#11721) * Added doctests to min_cost_string_conversion.py and removed :c specifier * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved line length issues based on ruff requirements * modified in compliance with ruff for line length * Update strings/min_cost_string_conversion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- strings/min_cost_string_conversion.py | 35 +++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 40d54f0e8420..a5a3c4a4e3f8 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -17,6 +17,23 @@ def compute_transform_tables( delete_cost: int, insert_cost: int, ) -> tuple[list[list[int]], list[list[str]]]: + """ + Finds the most cost efficient sequence + for converting one string into another. + + >>> costs, operations = compute_transform_tables("cat", "cut", 1, 2, 3, 3) + >>> costs[0][:4] + [0, 3, 6, 9] + >>> costs[2][:4] + [6, 4, 3, 6] + >>> operations[0][:4] + ['0', 'Ic', 'Iu', 'It'] + >>> operations[3][:4] + ['Dt', 'Dt', 'Rtu', 'Ct'] + + >>> compute_transform_tables("", "", 1, 2, 3, 3) + ([[0]], [['0']]) + """ source_seq = list(source_string) destination_seq = list(destination_string) len_source_seq = len(source_seq) @@ -57,6 +74,24 @@ def compute_transform_tables( def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: + """ + Assembles the transformations based on the ops table. + + >>> ops = [['0', 'Ic', 'Iu', 'It'], + ... ['Dc', 'Cc', 'Iu', 'It'], + ... ['Da', 'Da', 'Rau', 'Rat'], + ... ['Dt', 'Dt', 'Rtu', 'Ct']] + >>> x = len(ops) - 1 + >>> y = len(ops[0]) - 1 + >>> assemble_transformation(ops, x, y) + ['Cc', 'Rau', 'Ct'] + + >>> ops1 = [['0']] + >>> x1 = len(ops1) - 1 + >>> y1 = len(ops1[0]) - 1 + >>> assemble_transformation(ops1, x1, y1) + [] + """ if i == 0 and j == 0: return [] elif ops[i][j][0] in {"C", "R"}: From 9a572dec2b6011e7c2c0d82f50989b3a404ea426 Mon Sep 17 00:00:00 2001 From: ARNAV RAJ <126798788+Acuspeedster@users.noreply.github.com> Date: Fri, 4 Oct 2024 21:59:39 +0530 Subject: [PATCH 126/260] feat: Implemented Matrix Exponentiation Method (#11747) * feat: add Matrix Exponentiation method docs: updated the header documentation and added new documentation for the new function. * feat: added new function matrix exponetiation method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: This function uses the tail-recursive form of the Euclidean algorithm to calculate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reduced the number of characters per line in the comments * removed unwanted code * feat: Implemented a new function to swaap numbers without dummy variable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed previos code * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/fibonacci.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/fibonacci.py | 88 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 927700b0418e..24b2d7ae449e 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -7,6 +7,8 @@ NOTE 2: the Binet's formula function is much more limited in the size of inputs that it can handle due to the size limitations of Python floats +NOTE 3: the matrix function is the fastest and most memory efficient for large n + See benchmark numbers in __main__ for performance comparisons/ https://en.wikipedia.org/wiki/Fibonacci_number for more information @@ -17,6 +19,9 @@ from math import sqrt from time import time +import numpy as np +from numpy import ndarray + def time_func(func, *args, **kwargs): """ @@ -230,6 +235,88 @@ def fib_binet(n: int) -> list[int]: return [round(phi**i / sqrt_5) for i in range(n + 1)] +def matrix_pow_np(m: ndarray, power: int) -> ndarray: + """ + Raises a matrix to the power of 'power' using binary exponentiation. + + Args: + m: Matrix as a numpy array. + power: The power to which the matrix is to be raised. + + Returns: + The matrix raised to the power. + + Raises: + ValueError: If power is negative. + + >>> m = np.array([[1, 1], [1, 0]], dtype=int) + >>> matrix_pow_np(m, 0) # Identity matrix when raised to the power of 0 + array([[1, 0], + [0, 1]]) + + >>> matrix_pow_np(m, 1) # Same matrix when raised to the power of 1 + array([[1, 1], + [1, 0]]) + + >>> matrix_pow_np(m, 5) + array([[8, 5], + [5, 3]]) + + >>> matrix_pow_np(m, -1) + Traceback (most recent call last): + ... + ValueError: power is negative + """ + result = np.array([[1, 0], [0, 1]], dtype=int) # Identity Matrix + base = m + if power < 0: # Negative power is not allowed + raise ValueError("power is negative") + while power: + if power % 2 == 1: + result = np.dot(result, base) + base = np.dot(base, base) + power //= 2 + return result + + +def fib_matrix_np(n: int) -> int: + """ + Calculates the n-th Fibonacci number using matrix exponentiation. + https://www.nayuki.io/page/fast-fibonacci-algorithms#:~:text= + Summary:%20The%20two%20fast%20Fibonacci%20algorithms%20are%20matrix + + Args: + n: Fibonacci sequence index + + Returns: + The n-th Fibonacci number. + + Raises: + ValueError: If n is negative. + + >>> fib_matrix_np(0) + 0 + >>> fib_matrix_np(1) + 1 + >>> fib_matrix_np(5) + 5 + >>> fib_matrix_np(10) + 55 + >>> fib_matrix_np(-1) + Traceback (most recent call last): + ... + ValueError: n is negative + """ + if n < 0: + raise ValueError("n is negative") + if n == 0: + return 0 + + m = np.array([[1, 1], [1, 0]], dtype=int) + result = matrix_pow_np(m, n - 1) + return int(result[0, 0]) + + if __name__ == "__main__": from doctest import testmod @@ -242,3 +329,4 @@ def fib_binet(n: int) -> list[int]: time_func(fib_memoization, num) # 0.0100 ms time_func(fib_recursive_cached, num) # 0.0153 ms time_func(fib_recursive, num) # 257.0910 ms + time_func(fib_matrix_np, num) # 0.0000 ms From 5a8655d306d872085112d965067fcdc440286928 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Sat, 5 Oct 2024 22:49:58 +0530 Subject: [PATCH 127/260] Added new algorithm to generate numbers in lexicographical order (#11674) * Added algorithm to generate numbers in lexicographical order * Removed the test cases * Updated camelcase to snakecase * Added doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added descriptive name for n * Reduced the number of letters * Updated the return type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated import statement * Updated return type to Iterator[int] * removed parentheses --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../stacks/lexicographical_numbers.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 data_structures/stacks/lexicographical_numbers.py diff --git a/data_structures/stacks/lexicographical_numbers.py b/data_structures/stacks/lexicographical_numbers.py new file mode 100644 index 000000000000..6a174e7d9e95 --- /dev/null +++ b/data_structures/stacks/lexicographical_numbers.py @@ -0,0 +1,38 @@ +from collections.abc import Iterator + + +def lexical_order(max_number: int) -> Iterator[int]: + """ + Generate numbers in lexical order from 1 to max_number. + + >>> " ".join(map(str, lexical_order(13))) + '1 10 11 12 13 2 3 4 5 6 7 8 9' + >>> list(lexical_order(1)) + [1] + >>> " ".join(map(str, lexical_order(20))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 3 4 5 6 7 8 9' + >>> " ".join(map(str, lexical_order(25))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 3 4 5 6 7 8 9' + >>> list(lexical_order(12)) + [1, 10, 11, 12, 2, 3, 4, 5, 6, 7, 8, 9] + """ + + stack = [1] + + while stack: + num = stack.pop() + if num > max_number: + continue + + yield num + if (num % 10) != 9: + stack.append(num + 1) + + stack.append(num * 10) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"Numbers from 1 to 25 in lexical order: {list(lexical_order(26))}") From 50aca04c67315ef7de7ef03e51a018075d8d026b Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Sat, 5 Oct 2024 22:51:43 +0530 Subject: [PATCH 128/260] feat: increase test coverage of longest_common_subsequence to 75% (#11777) --- .../longest_common_subsequence.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 9a98b1736ed5..4a6c880aff61 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -28,6 +28,24 @@ def longest_common_subsequence(x: str, y: str): (2, 'ph') >>> longest_common_subsequence("computer", "food") (1, 'o') + >>> longest_common_subsequence("", "abc") # One string is empty + (0, '') + >>> longest_common_subsequence("abc", "") # Other string is empty + (0, '') + >>> longest_common_subsequence("", "") # Both strings are empty + (0, '') + >>> longest_common_subsequence("abc", "def") # No common subsequence + (0, '') + >>> longest_common_subsequence("abc", "abc") # Identical strings + (3, 'abc') + >>> longest_common_subsequence("a", "a") # Single character match + (1, 'a') + >>> longest_common_subsequence("a", "b") # Single character no match + (0, '') + >>> longest_common_subsequence("abcdef", "ace") # Interleaved subsequence + (3, 'ace') + >>> longest_common_subsequence("ABCD", "ACBD") # No repeated characters + (3, 'ABD') """ # find the length of strings From ad6395d3408b9d80a0bef4d180d1e7613a55d807 Mon Sep 17 00:00:00 2001 From: Andrey Ivanov <97749666+ivnvxd@users.noreply.github.com> Date: Sat, 5 Oct 2024 18:24:58 +0100 Subject: [PATCH 129/260] Update ruff usage example in CONTRIBUTING.md (#11772) * Update ruff usage example * Update CONTRIBUTING.md Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 096582e45afa..b5113212929a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -96,7 +96,7 @@ We want your work to be readable by others; therefore, we encourage you to note ```bash python3 -m pip install ruff # only required the first time - ruff . + ruff check ``` - Original code submission require docstrings or comments to describe your work. From fcf82a1eda21dcf36254a8fcaadc913f6a94c8da Mon Sep 17 00:00:00 2001 From: Vineet Kumar <108144301+whyvineet@users.noreply.github.com> Date: Sat, 5 Oct 2024 23:04:48 +0530 Subject: [PATCH 130/260] =?UTF-8?q?Implemented=20Exponential=20Search=20wi?= =?UTF-8?q?th=20binary=20search=20for=20improved=20perfor=E2=80=A6=20(#116?= =?UTF-8?q?66)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Implemented Exponential Search with binary search for improved performance on large sorted arrays. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added type hints and doctests for binary_search and exponential_search functions. Improved code documentation and ensured testability. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename Exponential_Search.py to exponential_search.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- searches/exponential_search.py | 113 +++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 searches/exponential_search.py diff --git a/searches/exponential_search.py b/searches/exponential_search.py new file mode 100644 index 000000000000..ed09b14e101c --- /dev/null +++ b/searches/exponential_search.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 + +""" +Pure Python implementation of exponential search algorithm + +For more information, see the Wikipedia page: +https://en.wikipedia.org/wiki/Exponential_search + +For doctests run the following command: +python3 -m doctest -v exponential_search.py + +For manual testing run: +python3 exponential_search.py +""" + +from __future__ import annotations + + +def binary_search_by_recursion( + sorted_collection: list[int], item: int, left: int = 0, right: int = -1 +) -> int: + """Pure implementation of binary search algorithm in Python using recursion + + Be careful: the collection must be ascending sorted otherwise, the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :param left: starting index for the search + :param right: ending index for the search + :return: index of the found item or -1 if the item is not found + + Examples: + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 0, 0, 4) + 0 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 15, 0, 4) + 4 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 5, 0, 4) + 1 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 6, 0, 4) + -1 + """ + if right < 0: + right = len(sorted_collection) - 1 + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + if right < left: + return -1 + + midpoint = left + (right - left) // 2 + + if sorted_collection[midpoint] == item: + return midpoint + elif sorted_collection[midpoint] > item: + return binary_search_by_recursion(sorted_collection, item, left, midpoint - 1) + else: + return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right) + + +def exponential_search(sorted_collection: list[int], item: int) -> int: + """ + Pure implementation of an exponential search algorithm in Python. + For more information, refer to: + https://en.wikipedia.org/wiki/Exponential_search + + Be careful: the collection must be ascending sorted, otherwise the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :return: index of the found item or -1 if the item is not found + + The time complexity of this algorithm is O(log i) where i is the index of the item. + + Examples: + >>> exponential_search([0, 5, 7, 10, 15], 0) + 0 + >>> exponential_search([0, 5, 7, 10, 15], 15) + 4 + >>> exponential_search([0, 5, 7, 10, 15], 5) + 1 + >>> exponential_search([0, 5, 7, 10, 15], 6) + -1 + """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + + if sorted_collection[0] == item: + return 0 + + bound = 1 + while bound < len(sorted_collection) and sorted_collection[bound] < item: + bound *= 2 + + left = bound // 2 + right = min(bound, len(sorted_collection) - 1) + return binary_search_by_recursion(sorted_collection, item, left, right) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Manual testing + user_input = input("Enter numbers separated by commas: ").strip() + collection = sorted(int(item) for item in user_input.split(",")) + target = int(input("Enter a number to search for: ")) + result = exponential_search(sorted_collection=collection, item=target) + if result == -1: + print(f"{target} was not found in {collection}.") + else: + print(f"{target} was found at index {result} in {collection}.") From 3422ebc75bda6aba9b234eb217a79f25bec65f21 Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Mon, 7 Oct 2024 12:00:11 +0530 Subject: [PATCH 131/260] feat: add testcase of polynom_for_points (#11811) * feat: add testcase of polynom_for_points * fix: remove the print from the testcase of points_to_polynomial * fix: remove print statement from old test cases --- linear_algebra/src/polynom_for_points.py | 42 ++++++++++++++---------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index a9a9a8117c18..452f3edd4aee 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -3,30 +3,36 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: coordinates is a two dimensional matrix: [[x, y], [x, y], ...] number of points you want to use - >>> print(points_to_polynomial([])) + >>> points_to_polynomial([]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[]])) + >>> points_to_polynomial([[]]) + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. + >>> points_to_polynomial([[1, 0], [2, 0], [3, 0]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 1], [3, 1]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*1.0' + >>> points_to_polynomial([[1, 3], [2, 3], [3, 3]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*3.0' + >>> points_to_polynomial([[1, 1], [2, 2], [3, 3]]) + 'f(x)=x^2*0.0+x^1*1.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 4], [3, 9]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 3], [2, 6], [3, 11]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*2.0' + >>> points_to_polynomial([[1, -3], [2, -6], [3, -11]]) + 'f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0' + >>> points_to_polynomial([[1, 5], [2, 2], [3, 9]]) + 'f(x)=x^2*5.0+x^1*-18.0+x^0*18.0' + >>> points_to_polynomial([[1, 1], [1, 2], [1, 3]]) + 'x=1' + >>> points_to_polynomial([[1, 1], [2, 2], [2, 2]]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*1.0 - >>> print(points_to_polynomial([[1, 3], [2, 3], [3, 3]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*3.0 - >>> print(points_to_polynomial([[1, 1], [2, 2], [3, 3]])) - f(x)=x^2*0.0+x^1*1.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 4], [3, 9]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 3], [2, 6], [3, 11]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*2.0 - >>> print(points_to_polynomial([[1, -3], [2, -6], [3, -11]])) - f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0 - >>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]])) - f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): raise ValueError("The program cannot work out a fitting polynomial.") From cfd6d095f122d1d3ef2f3c2cdcf84864aac56fa7 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:06:15 +0530 Subject: [PATCH 132/260] Added max_sum_bst.py (#11832) * Added new algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated filename * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update maximum_sum_bst.py * def max_sum_bst(root: TreeNode | None) -> int: * def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/maximum_sum_bst.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 data_structures/binary_tree/maximum_sum_bst.py diff --git a/data_structures/binary_tree/maximum_sum_bst.py b/data_structures/binary_tree/maximum_sum_bst.py new file mode 100644 index 000000000000..7dadc7b95920 --- /dev/null +++ b/data_structures/binary_tree/maximum_sum_bst.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import sys +from dataclasses import dataclass + +INT_MIN = -sys.maxsize + 1 +INT_MAX = sys.maxsize - 1 + + +@dataclass +class TreeNode: + val: int = 0 + left: TreeNode | None = None + right: TreeNode | None = None + + +def max_sum_bst(root: TreeNode | None) -> int: + """ + The solution traverses a binary tree to find the maximum sum of + keys in any subtree that is a Binary Search Tree (BST). It uses + recursion to validate BST properties and calculates sums, returning + the highest sum found among all valid BST subtrees. + + >>> t1 = TreeNode(4) + >>> t1.left = TreeNode(3) + >>> t1.left.left = TreeNode(1) + >>> t1.left.right = TreeNode(2) + >>> print(max_sum_bst(t1)) + 2 + >>> t2 = TreeNode(-4) + >>> t2.left = TreeNode(-2) + >>> t2.right = TreeNode(-5) + >>> print(max_sum_bst(t2)) + 0 + >>> t3 = TreeNode(1) + >>> t3.left = TreeNode(4) + >>> t3.left.left = TreeNode(2) + >>> t3.left.right = TreeNode(4) + >>> t3.right = TreeNode(3) + >>> t3.right.left = TreeNode(2) + >>> t3.right.right = TreeNode(5) + >>> t3.right.right.left = TreeNode(4) + >>> t3.right.right.right = TreeNode(6) + >>> print(max_sum_bst(t3)) + 20 + """ + ans: int = 0 + + def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: + """ + Returns the maximum sum by making recursive calls + >>> t1 = TreeNode(1) + >>> print(solver(t1)) + 1 + """ + nonlocal ans + + if not node: + return True, INT_MAX, INT_MIN, 0 # Valid BST, min, max, sum + + is_left_valid, min_left, max_left, sum_left = solver(node.left) + is_right_valid, min_right, max_right, sum_right = solver(node.right) + + if is_left_valid and is_right_valid and max_left < node.val < min_right: + total_sum = sum_left + sum_right + node.val + ans = max(ans, total_sum) + return True, min(min_left, node.val), max(max_right, node.val), total_sum + + return False, -1, -1, -1 # Not a valid BST + + solver(root) + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dba8eecb47cea7f11ac383344524afbc0ca7cf5b Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Mon, 7 Oct 2024 10:58:07 +0100 Subject: [PATCH 133/260] added gronsfeld cipher implementation (#11835) * added gronsfeld cipher implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * from string import ascii_uppercase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gronsfeld_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/gronsfeld_cipher.py | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 ciphers/gronsfeld_cipher.py diff --git a/ciphers/gronsfeld_cipher.py b/ciphers/gronsfeld_cipher.py new file mode 100644 index 000000000000..8fbeab4307fc --- /dev/null +++ b/ciphers/gronsfeld_cipher.py @@ -0,0 +1,45 @@ +from string import ascii_uppercase + + +def gronsfeld(text: str, key: str) -> str: + """ + Encrypt plaintext with the Gronsfeld cipher + + >>> gronsfeld('hello', '412') + 'LFNPP' + >>> gronsfeld('hello', '123') + 'IGOMQ' + >>> gronsfeld('', '123') + '' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '0') + 'YES, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '01') + 'YFS, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '012') + 'YFU, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '') + Traceback (most recent call last): + ... + ZeroDivisionError: integer modulo by zero + """ + ascii_len = len(ascii_uppercase) + key_len = len(key) + encrypted_text = "" + keys = [int(char) for char in key] + upper_case_text = text.upper() + + for i, char in enumerate(upper_case_text): + if char in ascii_uppercase: + new_position = (ascii_uppercase.index(char) + keys[i % key_len]) % ascii_len + shifted_letter = ascii_uppercase[new_position] + encrypted_text += shifted_letter + else: + encrypted_text += char + + return encrypted_text + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2d671df073770f0122658f462c17b838ddbe4d2a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 22:49:29 +0200 Subject: [PATCH 134/260] [pre-commit.ci] pre-commit autoupdate (#11874) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/pre-commit-hooks: v4.6.0 → v5.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.6.0...v5.0.0) - [github.com/astral-sh/ruff-pre-commit: v0.6.8 → v0.6.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.8...v0.6.9) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a8e5c1f6ad9..77541027afb3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-executables-have-shebangs - id: check-toml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.6.9 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index cdbbac684fd2..0a3be2a06533 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -22,6 +22,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Break](backtracking/word_break.py) * [Word Ladder](backtracking/word_ladder.py) * [Word Search](backtracking/word_search.py) @@ -99,6 +100,7 @@ * [Elgamal Key Generator](ciphers/elgamal_key_generator.py) * [Enigma Machine2](ciphers/enigma_machine2.py) * [Fractionated Morse Cipher](ciphers/fractionated_morse_cipher.py) + * [Gronsfeld Cipher](ciphers/gronsfeld_cipher.py) * [Hill Cipher](ciphers/hill_cipher.py) * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py) * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py) @@ -211,6 +213,7 @@ * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) + * [Maximum Sum Bst](data_structures/binary_tree/maximum_sum_bst.py) * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) * [Mirror Binary Tree](data_structures/binary_tree/mirror_binary_tree.py) * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) @@ -284,6 +287,7 @@ * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) + * [Lexicographical Numbers](data_structures/stacks/lexicographical_numbers.py) * [Next Greater Element](data_structures/stacks/next_greater_element.py) * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py) @@ -1201,6 +1205,7 @@ * [Binary Tree Traversal](searches/binary_tree_traversal.py) * [Double Linear Search](searches/double_linear_search.py) * [Double Linear Search Recursion](searches/double_linear_search_recursion.py) + * [Exponential Search](searches/exponential_search.py) * [Fibonacci Search](searches/fibonacci_search.py) * [Hill Climbing](searches/hill_climbing.py) * [Interpolation Search](searches/interpolation_search.py) From 260e3d8b350c64e927ecb1d62b953b8bf25490ea Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Tue, 8 Oct 2024 17:03:28 +0530 Subject: [PATCH 135/260] feat: add test cases in cipher's autokey (#11881) --- ciphers/autokey.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ciphers/autokey.py b/ciphers/autokey.py index 8683e6d37001..05d8c066b139 100644 --- a/ciphers/autokey.py +++ b/ciphers/autokey.py @@ -24,6 +24,14 @@ def encrypt(plaintext: str, key: str) -> str: Traceback (most recent call last): ... ValueError: plaintext is empty + >>> encrypt("coffee is good as python", "") + Traceback (most recent call last): + ... + ValueError: key is empty + >>> encrypt(527.26, "TheAlgorithms") + Traceback (most recent call last): + ... + TypeError: plaintext must be a string """ if not isinstance(plaintext, str): raise TypeError("plaintext must be a string") @@ -80,6 +88,14 @@ def decrypt(ciphertext: str, key: str) -> str: Traceback (most recent call last): ... TypeError: ciphertext must be a string + >>> decrypt("", "TheAlgorithms") + Traceback (most recent call last): + ... + ValueError: ciphertext is empty + >>> decrypt("vvjfpk wj ohvp su ddylsv", 2) + Traceback (most recent call last): + ... + TypeError: key must be a string """ if not isinstance(ciphertext, str): raise TypeError("ciphertext must be a string") From e9e7c964655015819e0120694465928df1abefb0 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 8 Oct 2024 19:09:28 +0200 Subject: [PATCH 136/260] Create GitHub Pages docs with Sphinx (#11888) --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 2 +- .github/CODEOWNERS | 2 - .github/workflows/build.yml | 3 +- .github/workflows/sphinx.yml | 50 +++++++++ CONTRIBUTING.md | 2 +- DIRECTORY.md | 3 + LICENSE.md | 2 +- docs/{source => }/__init__.py | 0 docs/conf.py | 3 + financial/{ABOUT.md => README.md} | 2 +- index.md | 10 ++ .../{local_weighted_learning.md => README.md} | 0 pyproject.toml | 106 +++++++++++++++++- requirements.txt | 1 + source/__init__.py | 0 16 files changed, 179 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/sphinx.yml rename docs/{source => }/__init__.py (100%) create mode 100644 docs/conf.py rename financial/{ABOUT.md => README.md} (97%) create mode 100644 index.md rename machine_learning/local_weighted_learning/{local_weighted_learning.md => README.md} (100%) delete mode 100644 source/__init__.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 6aa0073bf95b..a0bd05f47ec8 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,5 +1,5 @@ # https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md -ARG VARIANT=3.12-bookworm +ARG VARIANT=3.13-bookworm FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} COPY requirements.txt /tmp/pip-tmp/ RUN python3 -m pip install --upgrade pip \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ae1d4fb7494d..e23263f5b9de 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -7,7 +7,7 @@ // Update 'VARIANT' to pick a Python version: 3, 3.11, 3.10, 3.9, 3.8 // Append -bullseye or -buster to pin to an OS version. // Use -bullseye variants on local on arm64/Apple Silicon. - "VARIANT": "3.12-bookworm", + "VARIANT": "3.13-bookworm", } }, diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d2ac43c7df31..3cc25d1bae1c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,8 +9,6 @@ /.* @cclauss -# /arithmetic_analysis/ - # /backtracking/ # /bit_manipulation/ diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f54cc982d1ec..b5703e2f1ab6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,12 +25,13 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/q_fourier_transform.py --ignore=computer_vision/cnn_classification.py + --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py --ignore=machine_learning/lstm/lstm_prediction.py --ignore=neural_network/input_data.py --ignore=project_euler/ + --ignore=quantum/q_fourier_transform.py --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml new file mode 100644 index 000000000000..9dfe344f9743 --- /dev/null +++ b/.github/workflows/sphinx.yml @@ -0,0 +1,50 @@ +name: sphinx + +on: + # Triggers the workflow on push or pull request events but only for the "master" branch + push: + branches: ["master"] + pull_request: + branches: ["master"] + # Or manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build_docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.13 + allow-prereleases: true + - run: pip install --upgrade pip + - run: pip install myst-parser sphinx-autoapi sphinx-pyproject + - uses: actions/configure-pages@v5 + - run: sphinx-build -c docs . docs/_build/html + - uses: actions/upload-pages-artifact@v3 + with: + path: docs/_build/html + + deploy_docs: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + if: github.event_name != 'pull_request' + needs: build_docs + runs-on: ubuntu-latest + steps: + - uses: actions/deploy-pages@v4 + id: deployment diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b5113212929a..3df39f95b784 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,7 +77,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.13+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index 0a3be2a06533..f0a34a553946 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -351,6 +351,9 @@ * [Power](divide_and_conquer/power.py) * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) +## Docs + * [Conf](docs/conf.py) + ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) * [All Construct](dynamic_programming/all_construct.py) diff --git a/LICENSE.md b/LICENSE.md index 2897d02e2a01..de631c3ef333 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -MIT License +## MIT License Copyright (c) 2016-2022 TheAlgorithms and contributors diff --git a/docs/source/__init__.py b/docs/__init__.py similarity index 100% rename from docs/source/__init__.py rename to docs/__init__.py diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000000..f2481f107267 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,3 @@ +from sphinx_pyproject import SphinxConfig + +project = SphinxConfig("../pyproject.toml", globalns=globals()).name diff --git a/financial/ABOUT.md b/financial/README.md similarity index 97% rename from financial/ABOUT.md rename to financial/README.md index f6b0647f8201..e5d3a84c8381 100644 --- a/financial/ABOUT.md +++ b/financial/README.md @@ -1,4 +1,4 @@ -### Interest +# Interest * Compound Interest: "Compound interest is calculated by multiplying the initial principal amount by one plus the annual interest rate raised to the number of compound periods minus one." [Compound Interest](https://www.investopedia.com/) * Simple Interest: "Simple interest paid or received over a certain period is a fixed percentage of the principal amount that was borrowed or lent. " [Simple Interest](https://www.investopedia.com/) diff --git a/index.md b/index.md new file mode 100644 index 000000000000..134520cb94aa --- /dev/null +++ b/index.md @@ -0,0 +1,10 @@ +# TheAlgorithms/Python +```{toctree} +:maxdepth: 2 +:caption: index.md + + +CONTRIBUTING.md +README.md +LICENSE.md +``` diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/README.md similarity index 100% rename from machine_learning/local_weighted_learning/local_weighted_learning.md rename to machine_learning/local_weighted_learning/README.md diff --git a/pyproject.toml b/pyproject.toml index bb8657183164..c57419e79db3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,22 @@ +[project] +name = "thealgorithms-python" +version = "0.0.1" +description = "TheAlgorithms in Python" +authors = [ { name = "TheAlgorithms Contributors" } ] +requires-python = ">=3.13" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.13", + +] +optional-dependencies.docs = [ + "myst-parser", + "sphinx-autoapi", + "sphinx-pyproject", +] + [tool.ruff] -target-version = "py312" +target-version = "py313" output-format = "full" lint.select = [ @@ -113,6 +130,9 @@ lint.pylint.max-statements = 88 # default: 50 ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" +[tool.pyproject-fmt] +max_supported_python = "3.13" + [tool.pytest.ini_options] markers = [ "mat_ops: mark a test as utilizing matrix operations.", @@ -129,3 +149,87 @@ omit = [ "project_euler/*", ] sort = "Cover" + +[tool.sphinx-pyproject] +copyright = "2014, TheAlgorithms" +autoapi_dirs = [ + "audio_filters", + "backtracking", + "bit_manipulation", + "blockchain", + "boolean_algebra", + "cellular_automata", + "ciphers", + "compression", + "computer_vision", + "conversions", + "data_structures", + "digital_image_processing", + "divide_and_conquer", + "dynamic_programming", + "electronics", + "file_transfer", + "financial", + "fractals", + "fuzzy_logic", + "genetic_algorithm", + "geodesy", + "geometry", + "graphics", + "graphs", + "greedy_methods", + "hashes", + "knapsack", + "linear_algebra", + "linear_programming", + "machine_learning", + "maths", + "matrix", + "networking_flow", + "neural_network", + "other", + "physics", + "project_euler", + "quantum", + "scheduling", + "searches", + "sorts", + "strings", + "web_programming", +] +autoapi_member_order = "groupwise" +# autoapi_python_use_implicit_namespaces = true +exclude_patterns = [ + ".*/*", + "docs/", +] +extensions = [ + "autoapi.extension", + "myst_parser", +] +html_static_path = [ "_static" ] +html_theme = "alabaster" +myst_enable_extensions = [ + "amsmath", + "attrs_inline", + "colon_fence", + "deflist", + "dollarmath", + "fieldlist", + "html_admonition", + "html_image", + # "linkify", + "replacements", + "smartquotes", + "strikethrough", + "substitution", + "tasklist", +] +myst_fence_as_directive = [ + "include", +] +templates_path = [ "_templates" ] +[tool.sphinx-pyproject.source_suffix] +".rst" = "restructuredtext" +# ".txt" = "markdown" +".md" = "markdown" diff --git a/requirements.txt b/requirements.txt index afbf25ba6edc..6754363332c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ requests rich # scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn +sphinx_pyproject statsmodels sympy tensorflow ; python_version < '3.13' diff --git a/source/__init__.py b/source/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 03a42510b01c574292ca9c6525cbf0572ff5a2a5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 22:42:24 +0200 Subject: [PATCH 137/260] [pre-commit.ci] pre-commit autoupdate (#12071) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/tox-dev/pyproject-fmt: 2.2.4 → 2.3.0](https://github.com/tox-dev/pyproject-fmt/compare/2.2.4...2.3.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 77541027afb3..e1d185fabc12 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.4" + rev: "2.3.0" hooks: - id: pyproject-fmt From 6e24935f8860965dd7f2f5a50fd05724e84e9e8d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:22:34 +0200 Subject: [PATCH 138/260] [pre-commit.ci] pre-commit autoupdate (#12234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.9 → v0.7.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.9...v0.7.0) - [github.com/tox-dev/pyproject-fmt: 2.3.0 → 2.4.3](https://github.com/tox-dev/pyproject-fmt/compare/2.3.0...2.4.3) - [github.com/abravalheri/validate-pyproject: v0.20.2 → v0.21](https://github.com/abravalheri/validate-pyproject/compare/v0.20.2...v0.21) - [github.com/pre-commit/mirrors-mypy: v1.11.2 → v1.12.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.2...v1.12.1) * project_euler/problem_047/sol1.py: def solution(n: int = 4) -> int | None: * Update sol1.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 8 ++++---- project_euler/problem_047/sol1.py | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e1d185fabc12..a849de0c4e16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.9 + rev: v0.7.0 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.3.0" + rev: "2.4.3" hooks: - id: pyproject-fmt @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.20.2 + rev: v0.21 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.2 + rev: v1.12.1 hooks: - id: mypy args: diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index 4ecd4f4b44c1..d174de27dcd0 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -24,7 +24,7 @@ def unique_prime_factors(n: int) -> set: """ Find unique prime factors of an integer. - Tests include sorting because only the set really matters, + Tests include sorting because only the set matters, not the order in which it is produced. >>> sorted(set(unique_prime_factors(14))) [2, 7] @@ -58,7 +58,7 @@ def upf_len(num: int) -> int: def equality(iterable: list) -> bool: """ - Check equality of ALL elements in an iterable + Check the equality of ALL elements in an iterable >>> equality([1, 2, 3, 4]) False >>> equality([2, 2, 2, 2]) @@ -69,7 +69,7 @@ def equality(iterable: list) -> bool: return len(set(iterable)) in (0, 1) -def run(n: int) -> list: +def run(n: int) -> list[int]: """ Runs core process to find problem solution. >>> run(3) @@ -77,7 +77,7 @@ def run(n: int) -> list: """ # Incrementor variable for our group list comprehension. - # This serves as the first number in each list of values + # This is the first number in each list of values # to test. base = 2 @@ -85,7 +85,7 @@ def run(n: int) -> list: # Increment each value of a generated range group = [base + i for i in range(n)] - # Run elements through out unique_prime_factors function + # Run elements through the unique_prime_factors function # Append our target number to the end. checker = [upf_len(x) for x in group] checker.append(n) @@ -98,7 +98,7 @@ def run(n: int) -> list: base += 1 -def solution(n: int = 4) -> int: +def solution(n: int = 4) -> int | None: """Return the first value of the first four consecutive integers to have four distinct prime factors each. >>> solution() From 52602ea5b6dd8179aa662c002891c6506f519435 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 21:27:00 +0100 Subject: [PATCH 139/260] [pre-commit.ci] pre-commit autoupdate (#12313) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.0 → v0.7.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.0...v0.7.1) - [github.com/tox-dev/pyproject-fmt: 2.4.3 → v2.4.3](https://github.com/tox-dev/pyproject-fmt/compare/2.4.3...v2.4.3) - [github.com/abravalheri/validate-pyproject: v0.21 → v0.22](https://github.com/abravalheri/validate-pyproject/compare/v0.21...v0.22) - [github.com/pre-commit/mirrors-mypy: v1.12.1 → v1.13.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.12.1...v1.13.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a849de0c4e16..0828b715106d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.0 + rev: v0.7.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.4.3" + rev: "v2.4.3" hooks: - id: pyproject-fmt @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.21 + rev: v0.22 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.12.1 + rev: v1.13.0 hooks: - id: mypy args: From a19bede190ddb4fa3c1c9850b612a47fc69d6709 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 1 Nov 2024 13:40:09 +0100 Subject: [PATCH 140/260] Add scripts/find_git_conflicts.sh (#12343) --- scripts/find_git_conflicts.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 scripts/find_git_conflicts.sh diff --git a/scripts/find_git_conflicts.sh b/scripts/find_git_conflicts.sh new file mode 100755 index 000000000000..8af33fa75279 --- /dev/null +++ b/scripts/find_git_conflicts.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Replace with your repository (format: owner/repo) +REPO="TheAlgorithms/Python" + +# Fetch open pull requests with conflicts into a variable +echo "Checking for pull requests with conflicts in $REPO..." + +prs=$(gh pr list --repo "$REPO" --state open --json number,title,mergeable --jq '.[] | select(.mergeable == "CONFLICTING") | {number, title}' --limit 500) + +# Process each conflicting PR +echo "$prs" | jq -c '.[]' | while read -r pr; do + PR_NUMBER=$(echo "$pr" | jq -r '.number') + PR_TITLE=$(echo "$pr" | jq -r '.title') + echo "PR #$PR_NUMBER - $PR_TITLE has conflicts." +done From 3e9ca92ca972bbe752d32b43c71a88789dce94c0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:09:03 +0100 Subject: [PATCH 141/260] [pre-commit.ci] pre-commit autoupdate (#12349) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.1 → v0.7.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.1...v0.7.2) - [github.com/tox-dev/pyproject-fmt: v2.4.3 → v2.5.0](https://github.com/tox-dev/pyproject-fmt/compare/v2.4.3...v2.5.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0828b715106d..f112ee553b51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.1 + rev: v0.7.2 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "v2.4.3" + rev: "v2.5.0" hooks: - id: pyproject-fmt From e3f3d668be4ada7aee82eea0bc75c50436c1ab3a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 21:05:50 +0100 Subject: [PATCH 142/260] [pre-commit.ci] pre-commit autoupdate (#12370) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.2 → v0.7.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.2...v0.7.3) - [github.com/abravalheri/validate-pyproject: v0.22 → v0.23](https://github.com/abravalheri/validate-pyproject/compare/v0.22...v0.23) * Update sudoku_solver.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- data_structures/arrays/sudoku_solver.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f112ee553b51..9d794473cc01 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.2 + rev: v0.7.3 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.22 + rev: v0.23 hooks: - id: validate-pyproject diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index a8157a520c97..70bcdc748195 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -172,7 +172,7 @@ def unitsolved(unit): def from_file(filename, sep="\n"): "Parse a file into a list of strings, separated by sep." - return open(filename).read().strip().split(sep) # noqa: SIM115 + return open(filename).read().strip().split(sep) def random_puzzle(assignments=17): From e3bd7721c8241a6db77254bac44757dced1b96f8 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 15 Nov 2024 14:59:14 +0100 Subject: [PATCH 143/260] `validate_filenames.py` Shebang `python` for Windows (#12371) --- scripts/validate_filenames.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index 0890024dd349..e76b4dbfe288 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!python import os try: From f3f32ae3ca818f64de2ed3267803882956681044 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 22:07:12 +0100 Subject: [PATCH 144/260] [pre-commit.ci] pre-commit autoupdate (#12385) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.3 → v0.7.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.3...v0.7.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9d794473cc01..6ad19f1fdcb1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.3 + rev: v0.7.4 hooks: - id: ruff - id: ruff-format From fc33c505935e9927cffb6142591891f721a7bcd9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 21:46:20 +0100 Subject: [PATCH 145/260] [pre-commit.ci] pre-commit autoupdate (#12398) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.10 → v0.5.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.10...v0.5.0) - [github.com/pre-commit/mirrors-mypy: v1.10.0 → v1.10.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.0...v1.10.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- cellular_automata/conways_game_of_life.py | 6 ++--- ciphers/playfair_cipher.py | 2 +- ciphers/simple_keyword_cypher.py | 2 +- ciphers/transposition_cipher.py | 6 ++--- compression/lempel_ziv.py | 4 ++-- data_structures/arrays/sudoku_solver.py | 2 +- .../binary_tree/binary_tree_traversals.py | 24 ++++++++----------- data_structures/linked_list/deque_doubly.py | 2 +- data_structures/queue/double_ended_queue.py | 2 +- docs/source/__init__.py | 0 electronics/electrical_impedance.py | 2 +- graphs/ant_colony_optimization_algorithms.py | 6 ++--- graphs/basic_graphs.py | 22 ++++++++--------- graphs/minimum_spanning_tree_boruvka.py | 8 +++---- hashes/md5.py | 2 +- machine_learning/frequent_pattern_growth.py | 4 ++-- maths/collatz_sequence.py | 2 +- maths/prime_numbers.py | 6 ++--- maths/volume.py | 2 +- neural_network/input_data.py | 10 ++++---- physics/basic_orbital_capture.py | 9 ++++--- physics/grahams_law.py | 2 +- project_euler/problem_025/sol2.py | 2 +- project_euler/problem_123/sol1.py | 2 +- pyproject.toml | 1 + source/__init__.py | 0 strings/frequency_finder.py | 2 +- strings/min_cost_string_conversion.py | 8 +++---- web_programming/fetch_jobs.py | 2 +- 30 files changed, 66 insertions(+), 78 deletions(-) create mode 100644 docs/source/__init__.py create mode 100644 source/__init__.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6ad19f1fdcb1..64d9a833cd21 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.4 + rev: v0.8.0 hooks: - id: ruff - id: ruff-format diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index 364a34c3aba6..485f0d47bd8b 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -58,10 +58,8 @@ def new_generation(cells: list[list[int]]) -> list[list[int]]: # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. alive = cells[i][j] == 1 - if ( - (alive and 2 <= neighbour_count <= 3) - or not alive - and neighbour_count == 3 + if (alive and 2 <= neighbour_count <= 3) or ( + not alive and neighbour_count == 3 ): next_generation_row.append(1) else: diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 86b45bc4fb6a..d48f113f02e0 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -24,7 +24,7 @@ from collections.abc import Generator, Iterable -def chunker(seq: Iterable[str], size: int) -> Generator[tuple[str, ...], None, None]: +def chunker(seq: Iterable[str], size: int) -> Generator[tuple[str, ...]]: it = iter(seq) while True: chunk = tuple(itertools.islice(it, size)) diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py index 1635471aebd1..9dc624e7762c 100644 --- a/ciphers/simple_keyword_cypher.py +++ b/ciphers/simple_keyword_cypher.py @@ -10,7 +10,7 @@ def remove_duplicates(key: str) -> str: key_no_dups = "" for ch in key: - if ch == " " or ch not in key_no_dups and ch.isalpha(): + if ch == " " or (ch not in key_no_dups and ch.isalpha()): key_no_dups += ch return key_no_dups diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py index f1f07ddc3f35..76178cb6a1bc 100644 --- a/ciphers/transposition_cipher.py +++ b/ciphers/transposition_cipher.py @@ -52,10 +52,8 @@ def decrypt_message(key: int, message: str) -> str: plain_text[col] += symbol col += 1 - if ( - (col == num_cols) - or (col == num_cols - 1) - and (row >= num_rows - num_shaded_boxes) + if (col == num_cols) or ( + (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): col = 0 row += 1 diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index 2751a0ebcdb6..648b029471bd 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -35,8 +35,8 @@ def add_key_to_lexicon( lexicon[curr_string + "0"] = last_match_id if math.log2(index).is_integer(): - for curr_key in lexicon: - lexicon[curr_key] = "0" + lexicon[curr_key] + for curr_key, value in lexicon.items(): + lexicon[curr_key] = f"0{value}" lexicon[curr_string + "1"] = bin(index)[2:] diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 70bcdc748195..7e38e1465728 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -156,7 +156,7 @@ def time_solve(grid): times, results = zip(*[time_solve(grid) for grid in grids]) if (n := len(grids)) > 1: print( - "Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs)." + "Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs)." # noqa: UP031 % (sum(results), n, name, sum(times) / n, n / sum(times), max(times)) ) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 49c208335b2c..5ba149d0cbc6 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -30,7 +30,7 @@ def make_tree() -> Node | None: return tree -def preorder(root: Node | None) -> Generator[int, None, None]: +def preorder(root: Node | None) -> Generator[int]: """ Pre-order traversal visits root node, left subtree, right subtree. >>> list(preorder(make_tree())) @@ -43,7 +43,7 @@ def preorder(root: Node | None) -> Generator[int, None, None]: yield from preorder(root.right) -def postorder(root: Node | None) -> Generator[int, None, None]: +def postorder(root: Node | None) -> Generator[int]: """ Post-order traversal visits left subtree, right subtree, root node. >>> list(postorder(make_tree())) @@ -56,7 +56,7 @@ def postorder(root: Node | None) -> Generator[int, None, None]: yield root.data -def inorder(root: Node | None) -> Generator[int, None, None]: +def inorder(root: Node | None) -> Generator[int]: """ In-order traversal visits left subtree, root node, right subtree. >>> list(inorder(make_tree())) @@ -69,7 +69,7 @@ def inorder(root: Node | None) -> Generator[int, None, None]: yield from inorder(root.right) -def reverse_inorder(root: Node | None) -> Generator[int, None, None]: +def reverse_inorder(root: Node | None) -> Generator[int]: """ Reverse in-order traversal visits right subtree, root node, left subtree. >>> list(reverse_inorder(make_tree())) @@ -93,7 +93,7 @@ def height(root: Node | None) -> int: return (max(height(root.left), height(root.right)) + 1) if root else 0 -def level_order(root: Node | None) -> Generator[int, None, None]: +def level_order(root: Node | None) -> Generator[int]: """ Returns a list of nodes value from a whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. @@ -116,9 +116,7 @@ def level_order(root: Node | None) -> Generator[int, None, None]: process_queue.append(node.right) -def get_nodes_from_left_to_right( - root: Node | None, level: int -) -> Generator[int, None, None]: +def get_nodes_from_left_to_right(root: Node | None, level: int) -> Generator[int]: """ Returns a list of nodes value from a particular level: Left to right direction of the binary tree. @@ -128,7 +126,7 @@ def get_nodes_from_left_to_right( [2, 3] """ - def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: + def populate_output(root: Node | None, level: int) -> Generator[int]: if not root: return if level == 1: @@ -140,9 +138,7 @@ def populate_output(root: Node | None, level: int) -> Generator[int, None, None] yield from populate_output(root, level) -def get_nodes_from_right_to_left( - root: Node | None, level: int -) -> Generator[int, None, None]: +def get_nodes_from_right_to_left(root: Node | None, level: int) -> Generator[int]: """ Returns a list of nodes value from a particular level: Right to left direction of the binary tree. @@ -152,7 +148,7 @@ def get_nodes_from_right_to_left( [3, 2] """ - def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: + def populate_output(root: Node | None, level: int) -> Generator[int]: if not root: return if level == 1: @@ -164,7 +160,7 @@ def populate_output(root: Node | None, level: int) -> Generator[int, None, None] yield from populate_output(root, level) -def zigzag(root: Node | None) -> Generator[int, None, None]: +def zigzag(root: Node | None) -> Generator[int]: """ ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively. diff --git a/data_structures/linked_list/deque_doubly.py b/data_structures/linked_list/deque_doubly.py index 2b9d70c223c4..e554ead91c5a 100644 --- a/data_structures/linked_list/deque_doubly.py +++ b/data_structures/linked_list/deque_doubly.py @@ -12,7 +12,7 @@ class _DoublyLinkedBase: """A Private class (to be inherited)""" class _Node: - __slots__ = "_prev", "_data", "_next" + __slots__ = "_data", "_next", "_prev" def __init__(self, link_p, element, link_n): self._prev = link_p diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 607d0bda3df4..c28d46c65168 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -33,7 +33,7 @@ class Deque: the number of nodes """ - __slots__ = ("_front", "_back", "_len") + __slots__ = ("_back", "_front", "_len") @dataclass class _Node: diff --git a/docs/source/__init__.py b/docs/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/electronics/electrical_impedance.py b/electronics/electrical_impedance.py index 44041ff790b6..4f4f1d308293 100644 --- a/electronics/electrical_impedance.py +++ b/electronics/electrical_impedance.py @@ -6,7 +6,7 @@ from __future__ import annotations -from math import pow, sqrt +from math import pow, sqrt # noqa: A004 def electrical_impedance( diff --git a/graphs/ant_colony_optimization_algorithms.py b/graphs/ant_colony_optimization_algorithms.py index 13637da44874..753f4c0962c8 100644 --- a/graphs/ant_colony_optimization_algorithms.py +++ b/graphs/ant_colony_optimization_algorithms.py @@ -194,10 +194,8 @@ def city_select( IndexError: list index out of range """ probabilities = [] - for city in unvisited_cities: - city_distance = distance( - unvisited_cities[city], next(iter(current_city.values())) - ) + for city, value in unvisited_cities.items(): + city_distance = distance(value, next(iter(current_city.values()))) probability = (pheromone[city][next(iter(current_city.keys()))] ** alpha) * ( (1 / city_distance) ** beta ) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 25c8045b3d2b..567fa65040ae 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -133,18 +133,18 @@ def dijk(g, s): if len(known) == len(g) - 1: break mini = 100000 - for i in dist: - if i not in known and dist[i] < mini: - mini = dist[i] - u = i + for key, value in dist: + if key not in known and value < mini: + mini = value + u = key known.add(u) for v in g[u]: if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000): dist[v[0]] = dist[u] + v[1] path[v[0]] = u - for i in dist: - if i != s: - print(dist[i]) + for key, value in dist.items(): + if key != s: + print(value) """ @@ -255,10 +255,10 @@ def prim(g, s): if len(known) == len(g) - 1: break mini = 100000 - for i in dist: - if i not in known and dist[i] < mini: - mini = dist[i] - u = i + for key, value in dist.items(): + if key not in known and value < mini: + mini = value + u = key known.add(u) for v in g[u]: if v[0] not in known and v[1] < dist.get(v[0], 100000): diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 3c6888037948..f234d65ab765 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -185,12 +185,12 @@ def boruvka_mst(graph): if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight: cheap_edge[set2] = [head, tail, weight] - for vertex in cheap_edge: - if cheap_edge[vertex] != -1: - head, tail, weight = cheap_edge[vertex] + for head_tail_weight in cheap_edge.values(): + if head_tail_weight != -1: + head, tail, weight = head_tail_weight if union_find.find(head) != union_find.find(tail): union_find.union(head, tail) - mst_edges.append(cheap_edge[vertex]) + mst_edges.append(head_tail_weight) num_components = num_components - 1 mst = Graph.build(edges=mst_edges) return mst diff --git a/hashes/md5.py b/hashes/md5.py index 622a50d290e1..f9d802ff0308 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -131,7 +131,7 @@ def preprocess(message: bytes) -> bytes: return bit_string -def get_block_words(bit_string: bytes) -> Generator[list[int], None, None]: +def get_block_words(bit_string: bytes) -> Generator[list[int]]: """ Splits bit string into blocks of 512 chars and yields each block as a list of 32-bit words diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py index 947f8692f298..fae2df16efb1 100644 --- a/machine_learning/frequent_pattern_growth.py +++ b/machine_learning/frequent_pattern_growth.py @@ -107,8 +107,8 @@ def create_tree(data_set: list, min_sup: int = 1) -> tuple[TreeNode, dict]: if not (freq_item_set := set(header_table)): return TreeNode("Null Set", 1, None), {} - for k in header_table: - header_table[k] = [header_table[k], None] + for key, value in header_table.items(): + header_table[key] = [value, None] fp_tree = TreeNode("Null Set", 1, None) # Parent is None for the root node for tran_set in data_set: diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py index b47017146a1e..b00dca8d70b7 100644 --- a/maths/collatz_sequence.py +++ b/maths/collatz_sequence.py @@ -17,7 +17,7 @@ from collections.abc import Generator -def collatz_sequence(n: int) -> Generator[int, None, None]: +def collatz_sequence(n: int) -> Generator[int]: """ Generate the Collatz sequence starting at n. >>> tuple(collatz_sequence(2.1)) diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 38cc6670385d..5ad12baf3dc3 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -2,7 +2,7 @@ from collections.abc import Generator -def slow_primes(max_n: int) -> Generator[int, None, None]: +def slow_primes(max_n: int) -> Generator[int]: """ Return a list of all primes numbers up to max. >>> list(slow_primes(0)) @@ -29,7 +29,7 @@ def slow_primes(max_n: int) -> Generator[int, None, None]: yield i -def primes(max_n: int) -> Generator[int, None, None]: +def primes(max_n: int) -> Generator[int]: """ Return a list of all primes numbers up to max. >>> list(primes(0)) @@ -58,7 +58,7 @@ def primes(max_n: int) -> Generator[int, None, None]: yield i -def fast_primes(max_n: int) -> Generator[int, None, None]: +def fast_primes(max_n: int) -> Generator[int]: """ Return a list of all primes numbers up to max. >>> list(fast_primes(0)) diff --git a/maths/volume.py b/maths/volume.py index 33be9bdd131a..23fcf6be6ef1 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -6,7 +6,7 @@ from __future__ import annotations -from math import pi, pow +from math import pi, pow # noqa: A004 def vol_cube(side_length: float) -> float: diff --git a/neural_network/input_data.py b/neural_network/input_data.py index f90287fe3f5b..72debabb566a 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -61,9 +61,8 @@ def _extract_images(f): with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2051: - raise ValueError( - "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) - ) + msg = f"Invalid magic number {magic} in MNIST image file: {f.name}" + raise ValueError(msg) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) @@ -102,9 +101,8 @@ def _extract_labels(f, one_hot=False, num_classes=10): with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2049: - raise ValueError( - "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) - ) + msg = f"Invalid magic number {magic} in MNIST label file: {f.name}" + raise ValueError(msg) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = np.frombuffer(buf, dtype=np.uint8) diff --git a/physics/basic_orbital_capture.py b/physics/basic_orbital_capture.py index a5434b5cb7cb..eb1fdd9d6420 100644 --- a/physics/basic_orbital_capture.py +++ b/physics/basic_orbital_capture.py @@ -1,7 +1,3 @@ -from math import pow, sqrt - -from scipy.constants import G, c, pi - """ These two functions will return the radii of impact for a target object of mass M and radius R as well as it's effective cross sectional area sigma. @@ -14,9 +10,12 @@ cross section for capture as sigma=π*R_capture**2. This algorithm does not account for an N-body problem. - """ +from math import pow, sqrt # noqa: A004 + +from scipy.constants import G, c, pi + def capture_radii( target_body_radius: float, target_body_mass: float, projectile_velocity: float diff --git a/physics/grahams_law.py b/physics/grahams_law.py index 6e5d75127e83..c56359280ea4 100644 --- a/physics/grahams_law.py +++ b/physics/grahams_law.py @@ -14,7 +14,7 @@ (Description adapted from https://en.wikipedia.org/wiki/Graham%27s_law) """ -from math import pow, sqrt +from math import pow, sqrt # noqa: A004 def validate(*values: float) -> bool: diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index a0f056023bc9..4094b6251d50 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -27,7 +27,7 @@ from collections.abc import Generator -def fibonacci_generator() -> Generator[int, None, None]: +def fibonacci_generator() -> Generator[int]: """ A generator that produces numbers in the Fibonacci sequence diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index 3dd31a2e8505..265348d2d4c8 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -43,7 +43,7 @@ from collections.abc import Generator -def sieve() -> Generator[int, None, None]: +def sieve() -> Generator[int]: """ Returns a prime number generator using sieve method. >>> type(sieve()) diff --git a/pyproject.toml b/pyproject.toml index c57419e79db3..c60ec246144e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,6 +80,7 @@ lint.ignore = [ "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string + "ISC001", # Conflicts with ruff format -- DO NOT FIX "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/source/__init__.py b/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index 8479c81ae464..e5afee891bd9 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -67,7 +67,7 @@ def get_frequency_order(message: str) -> str: freq_to_letter_str: dict[int, str] = {} - for freq in freq_to_letter: + for freq in freq_to_letter: # noqa: PLC0206 freq_to_letter[freq].sort(key=ETAOIN.find, reverse=True) freq_to_letter_str[freq] = "".join(freq_to_letter[freq]) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index a5a3c4a4e3f8..93791e2a7ed3 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -124,7 +124,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: print("".join(string)) if op[0] == "C": - file.write("%-16s" % "Copy %c" % op[1]) + file.write("%-16s" % "Copy %c" % op[1]) # noqa: UP031 file.write("\t\t\t" + "".join(string)) file.write("\r\n") @@ -132,7 +132,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: elif op[0] == "R": string[i] = op[2] - file.write("%-16s" % ("Replace %c" % op[1] + " with " + str(op[2]))) + file.write("%-16s" % ("Replace %c" % op[1] + " with " + str(op[2]))) # noqa: UP031 file.write("\t\t" + "".join(string)) file.write("\r\n") @@ -140,7 +140,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: elif op[0] == "D": string.pop(i) - file.write("%-16s" % "Delete %c" % op[1]) + file.write("%-16s" % "Delete %c" % op[1]) # noqa: UP031 file.write("\t\t\t" + "".join(string)) file.write("\r\n") @@ -148,7 +148,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: else: string.insert(i, op[1]) - file.write("%-16s" % "Insert %c" % op[1]) + file.write("%-16s" % "Insert %c" % op[1]) # noqa: UP031 file.write("\t\t\t" + "".join(string)) file.write("\r\n") diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index 0d89bf45de57..3753d25bbe5f 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -12,7 +12,7 @@ url = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" -def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]: +def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str]]: soup = BeautifulSoup( requests.get(url + location, timeout=10).content, "html.parser" ) From c7921226326f35932bbc9d214e9742c2f3d310bf Mon Sep 17 00:00:00 2001 From: Anamaria Miranda Date: Mon, 2 Dec 2024 11:57:04 +0100 Subject: [PATCH 146/260] Added matrix based color game algorithm (#12400) * Added matrix based color game * updating DIRECTORY.md --------- Co-authored-by: Miranda13 --- DIRECTORY.md | 1 + matrix/matrix_based_game.py | 284 ++++++++++++++++++++++++++++++++++++ 2 files changed, 285 insertions(+) create mode 100644 matrix/matrix_based_game.py diff --git a/DIRECTORY.md b/DIRECTORY.md index f0a34a553946..d234d366df06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -794,6 +794,7 @@ * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) + * [Matrix Based Game](matrix/matrix_based_game.py) * [Matrix Class](matrix/matrix_class.py) * [Matrix Equalization](matrix/matrix_equalization.py) * [Matrix Multiplication Recursion](matrix/matrix_multiplication_recursion.py) diff --git a/matrix/matrix_based_game.py b/matrix/matrix_based_game.py new file mode 100644 index 000000000000..1ff0cbe93435 --- /dev/null +++ b/matrix/matrix_based_game.py @@ -0,0 +1,284 @@ +""" +Matrix-Based Game Script +========================= +This script implements a matrix-based game where players interact with a grid of +elements. The primary goals are to: +- Identify connected elements of the same type from a selected position. +- Remove those elements, adjust the matrix by simulating gravity, and reorganize empty + columns. +- Calculate and display the score based on the number of elements removed in each move. + +Functions: +----------- +1. `find_repeat`: Finds all connected elements of the same type. +2. `increment_score`: Calculates the score for a given move. +3. `move_x`: Simulates gravity in a column. +4. `move_y`: Reorganizes the matrix by shifting columns leftward when a column becomes + empty. +5. `play`: Executes a single move, updating the matrix and returning the score. + +Input Format: +-------------- +1. Matrix size (`lines`): Integer specifying the size of the matrix (N x N). +2. Matrix content (`matrix`): Rows of the matrix, each consisting of characters. +3. Number of moves (`movs`): Integer indicating the number of moves. +4. List of moves (`movements`): A comma-separated string of coordinates for each move. + +(0,0) position starts from first left column to last right, and below row to up row + + +Example Input: +--------------- +4 +RRBG +RBBG +YYGG +XYGG +2 +0 1,1 1 + +Example (0,0) = X + +Output: +-------- +The script outputs the total score after processing all moves. + +Usage: +------- +Run the script and provide the required inputs as prompted. + +""" + + +def validate_matrix_size(size: int) -> None: + """ + >>> validate_matrix_size(-1) + Traceback (most recent call last): + ... + ValueError: Matrix size must be a positive integer. + """ + if not isinstance(size, int) or size <= 0: + raise ValueError("Matrix size must be a positive integer.") + + +def validate_matrix_content(matrix: list[str], size: int) -> None: + """ + Validates that the number of elements in the matrix matches the given size. + + >>> validate_matrix_content(['aaaa', 'aaaa', 'aaaa', 'aaaa'], 3) + Traceback (most recent call last): + ... + ValueError: The matrix dont match with size. + >>> validate_matrix_content(['aa%', 'aaa', 'aaa'], 3) + Traceback (most recent call last): + ... + ValueError: Matrix rows can only contain letters and numbers. + >>> validate_matrix_content(['aaa', 'aaa', 'aaaa'], 3) + Traceback (most recent call last): + ... + ValueError: Each row in the matrix must have exactly 3 characters. + """ + print(matrix) + if len(matrix) != size: + raise ValueError("The matrix dont match with size.") + for row in matrix: + if len(row) != size: + msg = f"Each row in the matrix must have exactly {size} characters." + raise ValueError(msg) + if not all(char.isalnum() for char in row): + raise ValueError("Matrix rows can only contain letters and numbers.") + + +def validate_moves(moves: list[tuple[int, int]], size: int) -> None: + """ + >>> validate_moves([(1, 2), (-1, 0)], 3) + Traceback (most recent call last): + ... + ValueError: Move is out of bounds for a matrix. + """ + for move in moves: + x, y = move + if not (0 <= x < size and 0 <= y < size): + raise ValueError("Move is out of bounds for a matrix.") + + +def parse_moves(input_str: str) -> list[tuple[int, int]]: + """ + >>> parse_moves("0 1, 1 1") + [(0, 1), (1, 1)] + >>> parse_moves("0 1, 1 1, 2") + Traceback (most recent call last): + ... + ValueError: Each move must have exactly two numbers. + >>> parse_moves("0 1, 1 1, 2 4 5 6") + Traceback (most recent call last): + ... + ValueError: Each move must have exactly two numbers. + """ + moves = [] + for pair in input_str.split(","): + parts = pair.strip().split() + if len(parts) != 2: + raise ValueError("Each move must have exactly two numbers.") + x, y = map(int, parts) + moves.append((x, y)) + return moves + + +def find_repeat( + matrix_g: list[list[str]], row: int, column: int, size: int +) -> set[tuple[int, int]]: + """ + Finds all connected elements of the same type from a given position. + + >>> find_repeat([['A', 'B', 'A'], ['A', 'B', 'A'], ['A', 'A', 'A']], 0, 0, 3) + {(1, 2), (2, 1), (0, 0), (2, 0), (0, 2), (2, 2), (1, 0)} + >>> find_repeat([['-', '-', '-'], ['-', '-', '-'], ['-', '-', '-']], 1, 1, 3) + set() + """ + + column = size - 1 - column + visited = set() + repeated = set() + + if (color := matrix_g[column][row]) != "-": + + def dfs(row_n: int, column_n: int) -> None: + if row_n < 0 or row_n >= size or column_n < 0 or column_n >= size: + return + if (row_n, column_n) in visited: + return + visited.add((row_n, column_n)) + if matrix_g[row_n][column_n] == color: + repeated.add((row_n, column_n)) + dfs(row_n - 1, column_n) + dfs(row_n + 1, column_n) + dfs(row_n, column_n - 1) + dfs(row_n, column_n + 1) + + dfs(column, row) + + return repeated + + +def increment_score(count: int) -> int: + """ + Calculates the score for a move based on the number of elements removed. + + >>> increment_score(3) + 6 + >>> increment_score(0) + 0 + """ + return int(count * (count + 1) / 2) + + +def move_x(matrix_g: list[list[str]], column: int, size: int) -> list[list[str]]: + """ + Simulates gravity in a specific column. + + >>> move_x([['-', 'A'], ['-', '-'], ['-', 'C']], 1, 2) + [['-', '-'], ['-', 'A'], ['-', 'C']] + """ + + new_list = [] + + for row in range(size): + if matrix_g[row][column] != "-": + new_list.append(matrix_g[row][column]) + else: + new_list.insert(0, matrix_g[row][column]) + for row in range(size): + matrix_g[row][column] = new_list[row] + return matrix_g + + +def move_y(matrix_g: list[list[str]], size: int) -> list[list[str]]: + """ + Shifts all columns leftward when an entire column becomes empty. + + >>> move_y([['-', 'A'], ['-', '-'], ['-', 'C']], 2) + [['A', '-'], ['-', '-'], ['-', 'C']] + """ + + empty_columns = [] + + for column in range(size - 1, -1, -1): + if all(matrix_g[row][column] == "-" for row in range(size)): + empty_columns.append(column) + + for column in empty_columns: + for col in range(column + 1, size): + for row in range(size): + matrix_g[row][col - 1] = matrix_g[row][col] + for row in range(size): + matrix_g[row][-1] = "-" + + return matrix_g + + +def play( + matrix_g: list[list[str]], pos_x: int, pos_y: int, size: int +) -> tuple[list[list[str]], int]: + """ + Processes a single move, updating the matrix and calculating the score. + + >>> play([['R', 'G'], ['R', 'G']], 0, 0, 2) + ([['G', '-'], ['G', '-']], 3) + """ + + same_colors = find_repeat(matrix_g, pos_x, pos_y, size) + + if len(same_colors) != 0: + for pos in same_colors: + matrix_g[pos[0]][pos[1]] = "-" + for column in range(size): + matrix_g = move_x(matrix_g, column, size) + + matrix_g = move_y(matrix_g, size) + + return (matrix_g, increment_score(len(same_colors))) + + +def process_game(size: int, matrix: list[str], moves: list[tuple[int, int]]) -> int: + """Processes the game logic for the given matrix and moves. + + Args: + size (int): Size of the game board. + matrix (List[str]): Initial game matrix. + moves (List[Tuple[int, int]]): List of moves as (x, y) coordinates. + + Returns: + int: The total score obtained. + >>> process_game(3, ['aaa', 'bbb', 'ccc'], [(0, 0)]) + 6 + """ + + game_matrix = [list(row) for row in matrix] + total_score = 0 + + for move in moves: + pos_x, pos_y = move + game_matrix, score = play(game_matrix, pos_x, pos_y, size) + total_score += score + + return total_score + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + try: + size = int(input("Enter the size of the matrix: ")) + validate_matrix_size(size) + print(f"Enter the {size} rows of the matrix:") + matrix = [input(f"Row {i+1}: ") for i in range(size)] + validate_matrix_content(matrix, size) + moves_input = input("Enter the moves (e.g., '0 0, 1 1'): ") + moves = parse_moves(moves_input) + validate_moves(moves, size) + score = process_game(size, matrix, moves) + print(f"Total score: {score}") + except ValueError as e: + print(f"{e}") From b22fab0ea46c7b625d8137d1fb07d082e20d6d7b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:35:21 +0100 Subject: [PATCH 147/260] [pre-commit.ci] pre-commit autoupdate (#12404) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.0 → v0.8.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.0...v0.8.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 64d9a833cd21..bef251749c19 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.0 + rev: v0.8.1 hooks: - id: ruff - id: ruff-format From 0bcdfbdb34e03e24e2f5da90a7236226b721981d Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 5 Dec 2024 05:34:48 +0100 Subject: [PATCH 148/260] Use Astral uv (#12402) * Use Astral uv * uvx vs uv run * uv sync --group=euler-validate,test * uv sync --group=euler-validate --group=test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * --group=test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 15 +- .github/workflows/project_euler.yml | 16 +- .github/workflows/ruff.yml | 4 +- .github/workflows/sphinx.yml | 6 +- pyproject.toml | 48 +- requirements.txt | 6 - uv.lock | 1246 +++++++++++++++++++++++++++ 7 files changed, 1301 insertions(+), 40 deletions(-) create mode 100644 uv.lock diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b5703e2f1ab6..a6f308715cc2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,21 +10,18 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 + with: + enable-cache: true + cache-dependency-glob: uv.lock - uses: actions/setup-python@v5 with: python-version: 3.13 allow-prereleases: true - - uses: actions/cache@v4 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip setuptools wheel - python -m pip install pytest-cov -r requirements.txt + - run: uv sync --group=test - name: Run tests # TODO: #8818 Re-enable quantum tests - run: pytest + run: uv run pytest --ignore=computer_vision/cnn_classification.py --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 59e1208a650d..84c55335451e 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -15,25 +15,21 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 - uses: actions/setup-python@v5 with: python-version: 3.x - - name: Install pytest and pytest-cov - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade numpy pytest pytest-cov - - run: pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/ + - run: uv sync --group=euler-validate --group=test + - run: uv run pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/ validate-solutions: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 - uses: actions/setup-python@v5 with: python-version: 3.x - - name: Install pytest and requests - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade numpy pytest requests - - run: pytest scripts/validate_solutions.py + - run: uv sync --group=euler-validate --group=test + - run: uv run pytest scripts/validate_solutions.py env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index d354eba672ae..2c6f92fcf7bf 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -12,5 +12,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - run: pip install --user ruff - - run: ruff check --output-format=github . + - uses: astral-sh/setup-uv@v4 + - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index 9dfe344f9743..e3e2ce81a95d 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -26,14 +26,14 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 - uses: actions/setup-python@v5 with: python-version: 3.13 allow-prereleases: true - - run: pip install --upgrade pip - - run: pip install myst-parser sphinx-autoapi sphinx-pyproject + - run: uv sync --group=docs - uses: actions/configure-pages@v5 - - run: sphinx-build -c docs . docs/_build/html + - run: uv run sphinx-build -c docs . docs/_build/html - uses: actions/upload-pages-artifact@v3 with: path: docs/_build/html diff --git a/pyproject.toml b/pyproject.toml index c60ec246144e..7b7176705c44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,12 +7,43 @@ requires-python = ">=3.13" classifiers = [ "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.13", +] +dependencies = [ + "beautifulsoup4>=4.12.3", + "fake-useragent>=1.5.1", + "imageio>=2.36.1", + "keras>=3.7", + "lxml>=5.3", + "matplotlib>=3.9.3", + "numpy>=2.1.3", + "opencv-python>=4.10.0.84", + "pandas>=2.2.3", + "pillow>=11", + "requests>=2.32.3", + "rich>=13.9.4", + "scikit-learn>=1.5.2", + "sphinx-pyproject>=0.3", + "statsmodels>=0.14.4", + "sympy>=1.13.3", + "tweepy>=4.14", + "typing-extensions>=4.12.2", + "xgboost>=2.1.3", +] +[dependency-groups] +test = [ + "pytest>=8.3.4", + "pytest-cov>=6", ] -optional-dependencies.docs = [ - "myst-parser", - "sphinx-autoapi", - "sphinx-pyproject", + +docs = [ + "myst-parser>=4", + "sphinx-autoapi>=3.4", + "sphinx-pyproject>=0.3", +] +euler-validate = [ + "numpy>=2.1.3", + "requests>=2.32.3", ] [tool.ruff] @@ -61,8 +92,8 @@ lint.select = [ "UP", # pyupgrade "W", # pycodestyle "YTT", # flake8-2020 - # "ANN", # flake8-annotations # FIX ME? - # "COM", # flake8-commas + # "ANN", # flake8-annotations -- FIX ME? + # "COM", # flake8-commas -- DO NOT FIX # "D", # pydocstyle -- FIX ME? # "ERA", # eradicate -- DO NOT FIX # "FBT", # flake8-boolean-trap # FIX ME @@ -129,10 +160,7 @@ lint.pylint.max-statements = 88 # default: 50 [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" -skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" - -[tool.pyproject-fmt] -max_supported_python = "3.13" +skip = "./.*,*.json,*.lock,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" [tool.pytest.ini_options] markers = [ diff --git a/requirements.txt b/requirements.txt index 6754363332c4..4cc83f44987d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,18 +8,12 @@ numpy opencv-python pandas pillow -# projectq # uncomment once quantum/quantum_random.py is fixed -qiskit ; python_version < '3.12' -qiskit-aer ; python_version < '3.12' requests rich -# scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn sphinx_pyproject statsmodels sympy -tensorflow ; python_version < '3.13' tweepy -# yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions xgboost diff --git a/uv.lock b/uv.lock new file mode 100644 index 000000000000..077288f041a1 --- /dev/null +++ b/uv.lock @@ -0,0 +1,1246 @@ +version = 1 +requires-python = ">=3.13" +resolution-markers = [ + "platform_system == 'Darwin'", + "platform_machine == 'aarch64' and platform_system == 'Linux'", + "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')", +] + +[[package]] +name = "absl-py" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7a/8f/fc001b92ecc467cc32ab38398bd0bfb45df46e7523bf33c2ad22a505f06e/absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff", size = 118055 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/ad/e0d3c824784ff121c03cc031f944bc7e139a8f1870ffd2845cc2dd76f6c4/absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308", size = 133706 }, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929 }, +] + +[[package]] +name = "astroid" +version = "3.3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/1e/326fb1d3d83a3bb77c9f9be29d31f2901e35acb94b0605c3f2e5085047f9/astroid-3.3.5.tar.gz", hash = "sha256:5cfc40ae9f68311075d27ef68a4841bdc5cc7f6cf86671b49f00607d30188e2d", size = 397229 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/30/624365383fa4a40329c0f0bbbc151abc4a64e30dfc110fc8f6e2afcd02bb/astroid-3.3.5-py3-none-any.whl", hash = "sha256:a9d1c946ada25098d790e079ba2a1b112157278f3fb7e718ae6a9252f5835dc8", size = 274586 }, +] + +[[package]] +name = "babel" +version = "2.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/74/f1bc80f23eeba13393b7222b11d95ca3af2c1e28edca18af487137eefed9/babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316", size = 9348104 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/ca/824b1195773ce6166d388573fc106ce56d4a805bd7427b624e063596ec58/beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051", size = 581181 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/fe/e8c672695b37eecc5cbf43e1d0638d88d66ba3a44c4d321c796f4e59167f/beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed", size = 147925 }, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/ee/9b19140fe824b367c04c5e1b369942dd754c4c5462d5674002f75c4dedc1/certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9", size = 168507 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/90/3c9ff0512038035f59d279fddeb79f5f1eccd8859f06d6163c58798b9487/certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", size = 167321 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/4f/e1808dc01273379acc506d18f1504eb2d299bd4131743b9fc54d7be4df1e/charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", size = 106620 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/89/68a4c86f1a0002810a27f12e9a7b22feb198c59b2f05231349fbce5c06f4/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", size = 194617 }, + { url = "https://files.pythonhosted.org/packages/4f/cd/8947fe425e2ab0aa57aceb7807af13a0e4162cd21eee42ef5b053447edf5/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", size = 125310 }, + { url = "https://files.pythonhosted.org/packages/5b/f0/b5263e8668a4ee9becc2b451ed909e9c27058337fda5b8c49588183c267a/charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", size = 119126 }, + { url = "https://files.pythonhosted.org/packages/ff/6e/e445afe4f7fda27a533f3234b627b3e515a1b9429bc981c9a5e2aa5d97b6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", size = 139342 }, + { url = "https://files.pythonhosted.org/packages/a1/b2/4af9993b532d93270538ad4926c8e37dc29f2111c36f9c629840c57cd9b3/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", size = 149383 }, + { url = "https://files.pythonhosted.org/packages/fb/6f/4e78c3b97686b871db9be6f31d64e9264e889f8c9d7ab33c771f847f79b7/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", size = 142214 }, + { url = "https://files.pythonhosted.org/packages/2b/c9/1c8fe3ce05d30c87eff498592c89015b19fade13df42850aafae09e94f35/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", size = 144104 }, + { url = "https://files.pythonhosted.org/packages/ee/68/efad5dcb306bf37db7db338338e7bb8ebd8cf38ee5bbd5ceaaaa46f257e6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", size = 146255 }, + { url = "https://files.pythonhosted.org/packages/0c/75/1ed813c3ffd200b1f3e71121c95da3f79e6d2a96120163443b3ad1057505/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", size = 140251 }, + { url = "https://files.pythonhosted.org/packages/7d/0d/6f32255c1979653b448d3c709583557a4d24ff97ac4f3a5be156b2e6a210/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", size = 148474 }, + { url = "https://files.pythonhosted.org/packages/ac/a0/c1b5298de4670d997101fef95b97ac440e8c8d8b4efa5a4d1ef44af82f0d/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", size = 151849 }, + { url = "https://files.pythonhosted.org/packages/04/4f/b3961ba0c664989ba63e30595a3ed0875d6790ff26671e2aae2fdc28a399/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", size = 149781 }, + { url = "https://files.pythonhosted.org/packages/d8/90/6af4cd042066a4adad58ae25648a12c09c879efa4849c705719ba1b23d8c/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482", size = 144970 }, + { url = "https://files.pythonhosted.org/packages/cc/67/e5e7e0cbfefc4ca79025238b43cdf8a2037854195b37d6417f3d0895c4c2/charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", size = 94973 }, + { url = "https://files.pythonhosted.org/packages/65/97/fc9bbc54ee13d33dc54a7fcf17b26368b18505500fc01e228c27b5222d80/charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", size = 102308 }, + { url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 }, +] + +[[package]] +name = "codespell" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a0/a9/98353dfc7afcdf18cffd2dd3e959a25eaaf2728cf450caa59af89648a8e4/codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f", size = 329791 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/20/b6019add11e84f821184234cea0ad91442373489ef7ccfa3d73a71b908fa/codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1", size = 329167 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "contourpy" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/c2/fc7193cc5383637ff390a712e88e4ded0452c9fbcf84abe3de5ea3df1866/contourpy-1.3.1.tar.gz", hash = "sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699", size = 13465753 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/e7/de62050dce687c5e96f946a93546910bc67e483fe05324439e329ff36105/contourpy-1.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2", size = 271548 }, + { url = "https://files.pythonhosted.org/packages/78/4d/c2a09ae014ae984c6bdd29c11e74d3121b25eaa117eca0bb76340efd7e1c/contourpy-1.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5", size = 255576 }, + { url = "https://files.pythonhosted.org/packages/ab/8a/915380ee96a5638bda80cd061ccb8e666bfdccea38d5741cb69e6dbd61fc/contourpy-1.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81", size = 306635 }, + { url = "https://files.pythonhosted.org/packages/29/5c/c83ce09375428298acd4e6582aeb68b1e0d1447f877fa993d9bf6cd3b0a0/contourpy-1.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2", size = 345925 }, + { url = "https://files.pythonhosted.org/packages/29/63/5b52f4a15e80c66c8078a641a3bfacd6e07106835682454647aca1afc852/contourpy-1.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7", size = 318000 }, + { url = "https://files.pythonhosted.org/packages/9a/e2/30ca086c692691129849198659bf0556d72a757fe2769eb9620a27169296/contourpy-1.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c", size = 322689 }, + { url = "https://files.pythonhosted.org/packages/6b/77/f37812ef700f1f185d348394debf33f22d531e714cf6a35d13d68a7003c7/contourpy-1.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3", size = 1268413 }, + { url = "https://files.pythonhosted.org/packages/3f/6d/ce84e79cdd128542ebeb268f84abb4b093af78e7f8ec504676673d2675bc/contourpy-1.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1", size = 1326530 }, + { url = "https://files.pythonhosted.org/packages/72/22/8282f4eae20c73c89bee7a82a19c4e27af9b57bb602ecaa00713d5bdb54d/contourpy-1.3.1-cp313-cp313-win32.whl", hash = "sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82", size = 175315 }, + { url = "https://files.pythonhosted.org/packages/e3/d5/28bca491f65312b438fbf076589dcde7f6f966b196d900777f5811b9c4e2/contourpy-1.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd", size = 220987 }, + { url = "https://files.pythonhosted.org/packages/2f/24/a4b285d6adaaf9746e4700932f579f1a7b6f9681109f694cfa233ae75c4e/contourpy-1.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30", size = 285001 }, + { url = "https://files.pythonhosted.org/packages/48/1d/fb49a401b5ca4f06ccf467cd6c4f1fd65767e63c21322b29b04ec40b40b9/contourpy-1.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751", size = 268553 }, + { url = "https://files.pythonhosted.org/packages/79/1e/4aef9470d13fd029087388fae750dccb49a50c012a6c8d1d634295caa644/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342", size = 310386 }, + { url = "https://files.pythonhosted.org/packages/b0/34/910dc706ed70153b60392b5305c708c9810d425bde12499c9184a1100888/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c", size = 349806 }, + { url = "https://files.pythonhosted.org/packages/31/3c/faee6a40d66d7f2a87f7102236bf4780c57990dd7f98e5ff29881b1b1344/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f", size = 321108 }, + { url = "https://files.pythonhosted.org/packages/17/69/390dc9b20dd4bb20585651d7316cc3054b7d4a7b4f8b710b2b698e08968d/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda", size = 327291 }, + { url = "https://files.pythonhosted.org/packages/ef/74/7030b67c4e941fe1e5424a3d988080e83568030ce0355f7c9fc556455b01/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242", size = 1263752 }, + { url = "https://files.pythonhosted.org/packages/f0/ed/92d86f183a8615f13f6b9cbfc5d4298a509d6ce433432e21da838b4b63f4/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1", size = 1318403 }, + { url = "https://files.pythonhosted.org/packages/b3/0e/c8e4950c77dcfc897c71d61e56690a0a9df39543d2164040301b5df8e67b/contourpy-1.3.1-cp313-cp313t-win32.whl", hash = "sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1", size = 185117 }, + { url = "https://files.pythonhosted.org/packages/c1/31/1ae946f11dfbd229222e6d6ad8e7bd1891d3d48bde5fbf7a0beb9491f8e3/contourpy-1.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546", size = 236668 }, +] + +[[package]] +name = "coverage" +version = "7.6.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ab/75/aecfd0a3adbec6e45753976bc2a9fed62b42cea9a206d10fd29244a77953/coverage-7.6.8.tar.gz", hash = "sha256:8b2b8503edb06822c86d82fa64a4a5cb0760bb8f31f26e138ec743f422f37cfc", size = 801425 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/84/6f0ccf94a098ac3d6d6f236bd3905eeac049a9e0efcd9a63d4feca37ac4b/coverage-7.6.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0b0c69f4f724c64dfbfe79f5dfb503b42fe6127b8d479b2677f2b227478db2eb", size = 207313 }, + { url = "https://files.pythonhosted.org/packages/db/2b/e3b3a3a12ebec738c545897ac9f314620470fcbc368cdac88cf14974ba20/coverage-7.6.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c15b32a7aca8038ed7644f854bf17b663bc38e1671b5d6f43f9a2b2bd0c46f63", size = 207574 }, + { url = "https://files.pythonhosted.org/packages/db/c0/5bf95d42b6a8d21dfce5025ce187f15db57d6460a59b67a95fe8728162f1/coverage-7.6.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63068a11171e4276f6ece913bde059e77c713b48c3a848814a6537f35afb8365", size = 240090 }, + { url = "https://files.pythonhosted.org/packages/57/b8/d6fd17d1a8e2b0e1a4e8b9cb1f0f261afd422570735899759c0584236916/coverage-7.6.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f4548c5ead23ad13fb7a2c8ea541357474ec13c2b736feb02e19a3085fac002", size = 237237 }, + { url = "https://files.pythonhosted.org/packages/d4/e4/a91e9bb46809c8b63e68fc5db5c4d567d3423b6691d049a4f950e38fbe9d/coverage-7.6.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b4b4299dd0d2c67caaaf286d58aef5e75b125b95615dda4542561a5a566a1e3", size = 239225 }, + { url = "https://files.pythonhosted.org/packages/31/9c/9b99b0591ec4555b7292d271e005f27b465388ce166056c435b288db6a69/coverage-7.6.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9ebfb2507751f7196995142f057d1324afdab56db1d9743aab7f50289abd022", size = 238888 }, + { url = "https://files.pythonhosted.org/packages/a6/85/285c2df9a04bc7c31f21fd9d4a24d19e040ec5e2ff06e572af1f6514c9e7/coverage-7.6.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c1b4474beee02ede1eef86c25ad4600a424fe36cff01a6103cb4533c6bf0169e", size = 236974 }, + { url = "https://files.pythonhosted.org/packages/cb/a1/95ec8522206f76cdca033bf8bb61fff56429fb414835fc4d34651dfd29fc/coverage-7.6.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9fd2547e6decdbf985d579cf3fc78e4c1d662b9b0ff7cc7862baaab71c9cc5b", size = 238815 }, + { url = "https://files.pythonhosted.org/packages/8d/ac/687e9ba5e6d0979e9dab5c02e01c4f24ac58260ef82d88d3b433b3f84f1e/coverage-7.6.8-cp313-cp313-win32.whl", hash = "sha256:8aae5aea53cbfe024919715eca696b1a3201886ce83790537d1c3668459c7146", size = 209957 }, + { url = "https://files.pythonhosted.org/packages/2f/a3/b61cc8e3fcf075293fb0f3dee405748453c5ba28ac02ceb4a87f52bdb105/coverage-7.6.8-cp313-cp313-win_amd64.whl", hash = "sha256:ae270e79f7e169ccfe23284ff5ea2d52a6f401dc01b337efb54b3783e2ce3f28", size = 210711 }, + { url = "https://files.pythonhosted.org/packages/ee/4b/891c8b9acf1b62c85e4a71dac142ab9284e8347409b7355de02e3f38306f/coverage-7.6.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:de38add67a0af869b0d79c525d3e4588ac1ffa92f39116dbe0ed9753f26eba7d", size = 208053 }, + { url = "https://files.pythonhosted.org/packages/18/a9/9e330409b291cc002723d339346452800e78df1ce50774ca439ade1d374f/coverage-7.6.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b07c25d52b1c16ce5de088046cd2432b30f9ad5e224ff17c8f496d9cb7d1d451", size = 208329 }, + { url = "https://files.pythonhosted.org/packages/9c/0d/33635fd429f6589c6e1cdfc7bf581aefe4c1792fbff06383f9d37f59db60/coverage-7.6.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62a66ff235e4c2e37ed3b6104d8b478d767ff73838d1222132a7a026aa548764", size = 251052 }, + { url = "https://files.pythonhosted.org/packages/23/32/8a08da0e46f3830bbb9a5b40614241b2e700f27a9c2889f53122486443ed/coverage-7.6.8-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09b9f848b28081e7b975a3626e9081574a7b9196cde26604540582da60235fdf", size = 246765 }, + { url = "https://files.pythonhosted.org/packages/56/3f/3b86303d2c14350fdb1c6c4dbf9bc76000af2382f42ca1d4d99c6317666e/coverage-7.6.8-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:093896e530c38c8e9c996901858ac63f3d4171268db2c9c8b373a228f459bbc5", size = 249125 }, + { url = "https://files.pythonhosted.org/packages/36/cb/c4f081b9023f9fd8646dbc4ef77be0df090263e8f66f4ea47681e0dc2cff/coverage-7.6.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a7b8ac36fd688c8361cbc7bf1cb5866977ece6e0b17c34aa0df58bda4fa18a4", size = 248615 }, + { url = "https://files.pythonhosted.org/packages/32/ee/53bdbf67760928c44b57b2c28a8c0a4bf544f85a9ee129a63ba5c78fdee4/coverage-7.6.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:38c51297b35b3ed91670e1e4efb702b790002e3245a28c76e627478aa3c10d83", size = 246507 }, + { url = "https://files.pythonhosted.org/packages/57/49/5a57910bd0af6d8e802b4ca65292576d19b54b49f81577fd898505dee075/coverage-7.6.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2e4e0f60cb4bd7396108823548e82fdab72d4d8a65e58e2c19bbbc2f1e2bfa4b", size = 247785 }, + { url = "https://files.pythonhosted.org/packages/bd/37/e450c9f6b297c79bb9858407396ed3e084dcc22990dd110ab01d5ceb9770/coverage-7.6.8-cp313-cp313t-win32.whl", hash = "sha256:6535d996f6537ecb298b4e287a855f37deaf64ff007162ec0afb9ab8ba3b8b71", size = 210605 }, + { url = "https://files.pythonhosted.org/packages/44/79/7d0c7dd237c6905018e2936cd1055fe1d42e7eba2ebab3c00f4aad2a27d7/coverage-7.6.8-cp313-cp313t-win_amd64.whl", hash = "sha256:c79c0685f142ca53256722a384540832420dff4ab15fec1863d7e5bc8691bdcc", size = 211777 }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, +] + +[[package]] +name = "dom-toml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "domdf-python-tools" }, + { name = "tomli" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/34/f7690cf288eaa86b55c8f1b890d0834e6df44a026a88eca12274fcd624ab/dom_toml-2.0.0.tar.gz", hash = "sha256:3c07e8436538994974127b1ae037661d1a779ac915c44fd06b3ab5fe140ff589", size = 11133 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/99/b6fc87dff3138491d81676bdcbf1531080925ba41486ec1dafd86e33fdbc/dom_toml-2.0.0-py3-none-any.whl", hash = "sha256:0b6d02a72bcbc6be8175c61afc30623bbb6b74c4650f2a806fbc3fb7fe86935d", size = 13376 }, +] + +[[package]] +name = "domdf-python-tools" +version = "3.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "natsort" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6b/78/974e10c583ba9d2302e748c9585313a7f2c7ba00e4f600324f432e38fe68/domdf_python_tools-3.9.0.tar.gz", hash = "sha256:1f8a96971178333a55e083e35610d7688cd7620ad2b99790164e1fc1a3614c18", size = 103792 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/e9/7447a88b217650a74927d3444a89507986479a69b83741900eddd34167fe/domdf_python_tools-3.9.0-py3-none-any.whl", hash = "sha256:4e1ef365cbc24627d6d1e90cf7d46d8ab8df967e1237f4a26885f6986c78872e", size = 127106 }, +] + +[[package]] +name = "fake-useragent" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/a1/1f662631ab153975fa8dbf09296324ecbaf53370dce922054e8de6b57370/fake-useragent-1.5.1.tar.gz", hash = "sha256:6387269f5a2196b5ba7ed8935852f75486845a1c95c50e72460e6a8e762f5c49", size = 22631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/99/60d8cf1b26938c2e0a57e232f7f15641dfcd6f8deda454d73e4145910ff6/fake_useragent-1.5.1-py3-none-any.whl", hash = "sha256:57415096557c8a4e23b62a375c21c55af5fd4ba30549227f562d2c4f5b60e3b3", size = 17190 }, +] + +[[package]] +name = "fonttools" +version = "4.55.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4e/053fe1b5c0ce346c0a9d0557492c654362bafb14f026eae0d3ee98009152/fonttools-4.55.0.tar.gz", hash = "sha256:7636acc6ab733572d5e7eec922b254ead611f1cdad17be3f0be7418e8bfaca71", size = 3490431 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/87/a669ac26c6077e37ffb06abf29c5571789eefe518d06c52df392181ee694/fonttools-4.55.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8118dc571921dc9e4b288d9cb423ceaf886d195a2e5329cc427df82bba872cd9", size = 2752519 }, + { url = "https://files.pythonhosted.org/packages/0c/e9/4822ad238fe215133c7df20f1cdb1a58cfb634a31523e77ff0fb2033970a/fonttools-4.55.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01124f2ca6c29fad4132d930da69158d3f49b2350e4a779e1efbe0e82bd63f6c", size = 2286819 }, + { url = "https://files.pythonhosted.org/packages/3e/a4/d7941c3897129e60fe68d20e4819fda4d0c4858d77badae0e80ca6440b36/fonttools-4.55.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ffd58d2691f11f7c8438796e9f21c374828805d33e83ff4b76e4635633674c", size = 4770382 }, + { url = "https://files.pythonhosted.org/packages/31/cf/c51ea1348f9fba9c627439afad9dee0090040809ab431f4422b5bfdda34c/fonttools-4.55.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5435e5f1eb893c35c2bc2b9cd3c9596b0fcb0a59e7a14121562986dd4c47b8dd", size = 4858336 }, + { url = "https://files.pythonhosted.org/packages/73/be/36c1fe0e5c9a96b068ddd7e82001243bbe7fe12549c8d14e1bd025bf40c9/fonttools-4.55.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d12081729280c39d001edd0f4f06d696014c26e6e9a0a55488fabc37c28945e4", size = 4756072 }, + { url = "https://files.pythonhosted.org/packages/5c/18/6dd381c29f215a017f79aa9fea0722424a0046b47991c4390a78ff87ce0c/fonttools-4.55.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7ad1f1b98ab6cb927ab924a38a8649f1ffd7525c75fe5b594f5dab17af70e18", size = 5008668 }, + { url = "https://files.pythonhosted.org/packages/b8/95/316f20092b389b927dba1d1dccd3f541853f96e707e210f1b9f4e7bacdd5/fonttools-4.55.0-cp313-cp313-win32.whl", hash = "sha256:abe62987c37630dca69a104266277216de1023cf570c1643bb3a19a9509e7a1b", size = 2155841 }, + { url = "https://files.pythonhosted.org/packages/35/ca/b4638aa3e446184892e2f9cc8ef44bb506f47fea04580df7fb84f5a4363d/fonttools-4.55.0-cp313-cp313-win_amd64.whl", hash = "sha256:2863555ba90b573e4201feaf87a7e71ca3b97c05aa4d63548a4b69ea16c9e998", size = 2200587 }, + { url = "https://files.pythonhosted.org/packages/b4/4a/786589606d4989cb34d8bc766cd687d955aaf3039c367fe7104bcf82dc98/fonttools-4.55.0-py3-none-any.whl", hash = "sha256:12db5888cd4dd3fcc9f0ee60c6edd3c7e1fd44b7dd0f31381ea03df68f8a153f", size = 1100249 }, +] + +[[package]] +name = "h5py" +version = "3.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/0c/5c2b0a88158682aeafb10c1c2b735df5bc31f165bfe192f2ee9f2a23b5f1/h5py-3.12.1.tar.gz", hash = "sha256:326d70b53d31baa61f00b8aa5f95c2fcb9621a3ee8365d770c551a13dbbcbfdf", size = 411457 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/1c/ecdd0efab52c24f2a9bf2324289828b860e8dd1e3c5ada3cf0889e14fdc1/h5py-3.12.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:513171e90ed92236fc2ca363ce7a2fc6f2827375efcbb0cc7fbdd7fe11fecafc", size = 3346239 }, + { url = "https://files.pythonhosted.org/packages/93/cd/5b6f574bf3e318bbe305bc93ba45181676550eb44ba35e006d2e98004eaa/h5py-3.12.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:59400f88343b79655a242068a9c900001a34b63e3afb040bd7cdf717e440f653", size = 2843416 }, + { url = "https://files.pythonhosted.org/packages/8a/4f/b74332f313bfbe94ba03fff784219b9db385e6139708e55b11490149f90a/h5py-3.12.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e465aee0ec353949f0f46bf6c6f9790a2006af896cee7c178a8c3e5090aa32", size = 5154390 }, + { url = "https://files.pythonhosted.org/packages/1a/57/93ea9e10a6457ea8d3b867207deb29a527e966a08a84c57ffd954e32152a/h5py-3.12.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba51c0c5e029bb5420a343586ff79d56e7455d496d18a30309616fdbeed1068f", size = 5378244 }, + { url = "https://files.pythonhosted.org/packages/50/51/0bbf3663062b2eeee78aa51da71e065f8a0a6e3cb950cc7020b4444999e6/h5py-3.12.1-cp313-cp313-win_amd64.whl", hash = "sha256:52ab036c6c97055b85b2a242cb540ff9590bacfda0c03dd0cf0661b311f522f8", size = 2979760 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "imageio" +version = "2.36.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/aa/2e7a49259339e691ff2b477ae0696b1784a09313c5872700bbbdd00a3030/imageio-2.36.1.tar.gz", hash = "sha256:e4e1d231f47f9a9e16100b0f7ce1a86e8856fb4d1c0fa2c4365a316f1746be62", size = 389522 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/f9/f78e7f5ac8077c481bf6b43b8bc736605363034b3d5eb3ce8eb79f53f5f1/imageio-2.36.1-py3-none-any.whl", hash = "sha256:20abd2cae58e55ca1af8a8dcf43293336a59adf0391f1917bf8518633cfc2cdf", size = 315435 }, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "jinja2" +version = "3.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/55/39036716d19cab0747a5020fc7e907f362fbf48c984b14e62127f7e68e5d/jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", size = 240245 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/80/3a54838c3fb461f6fec263ebf3a3a41771bd05190238de3486aae8540c36/jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d", size = 133271 }, +] + +[[package]] +name = "joblib" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/64/33/60135848598c076ce4b231e1b1895170f45fbcaeaa2c9d5e38b04db70c35/joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e", size = 2116621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/29/df4b9b42f2be0b623cbd5e2140cafcaa2bef0759a00b7b70104dcfe2fb51/joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6", size = 301817 }, +] + +[[package]] +name = "keras" +version = "3.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "h5py" }, + { name = "ml-dtypes" }, + { name = "namex" }, + { name = "numpy" }, + { name = "optree" }, + { name = "packaging" }, + { name = "rich" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/c3/56fc6800c5eab94bd0f5e930751bd4c0fa1ee0aee272fad4a72723ffae87/keras-3.7.0.tar.gz", hash = "sha256:a4451a5591e75dfb414d0b84a3fd2fb9c0240cc87ebe7e397f547ce10b0e67b7", size = 924719 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/bf/9e3f10e55df30b0fb4bf6c2ee7d50bda2e070599b86f62ea3f9954af172b/keras-3.7.0-py3-none-any.whl", hash = "sha256:546a64f302e4779c129c06d9826fa586de752cdfd43d7dc4010c31b282587969", size = 1228365 }, +] + +[[package]] +name = "kiwisolver" +version = "1.4.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/4d/2255e1c76304cbd60b48cee302b66d1dde4468dc5b1160e4b7cb43778f2a/kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60", size = 97286 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/06/7da99b04259b0f18b557a4effd1b9c901a747f7fdd84cf834ccf520cb0b2/kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e", size = 121913 }, + { url = "https://files.pythonhosted.org/packages/97/f5/b8a370d1aa593c17882af0a6f6755aaecd643640c0ed72dcfd2eafc388b9/kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6", size = 65627 }, + { url = "https://files.pythonhosted.org/packages/2a/fc/6c0374f7503522539e2d4d1b497f5ebad3f8ed07ab51aed2af988dd0fb65/kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750", size = 63888 }, + { url = "https://files.pythonhosted.org/packages/bf/3e/0b7172793d0f41cae5c923492da89a2ffcd1adf764c16159ca047463ebd3/kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d", size = 1369145 }, + { url = "https://files.pythonhosted.org/packages/77/92/47d050d6f6aced2d634258123f2688fbfef8ded3c5baf2c79d94d91f1f58/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379", size = 1461448 }, + { url = "https://files.pythonhosted.org/packages/9c/1b/8f80b18e20b3b294546a1adb41701e79ae21915f4175f311a90d042301cf/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c", size = 1578750 }, + { url = "https://files.pythonhosted.org/packages/a4/fe/fe8e72f3be0a844f257cadd72689c0848c6d5c51bc1d60429e2d14ad776e/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34", size = 1507175 }, + { url = "https://files.pythonhosted.org/packages/39/fa/cdc0b6105d90eadc3bee525fecc9179e2b41e1ce0293caaf49cb631a6aaf/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1", size = 1463963 }, + { url = "https://files.pythonhosted.org/packages/6e/5c/0c03c4e542720c6177d4f408e56d1c8315899db72d46261a4e15b8b33a41/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f", size = 2248220 }, + { url = "https://files.pythonhosted.org/packages/3d/ee/55ef86d5a574f4e767df7da3a3a7ff4954c996e12d4fbe9c408170cd7dcc/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b", size = 2404463 }, + { url = "https://files.pythonhosted.org/packages/0f/6d/73ad36170b4bff4825dc588acf4f3e6319cb97cd1fb3eb04d9faa6b6f212/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27", size = 2352842 }, + { url = "https://files.pythonhosted.org/packages/0b/16/fa531ff9199d3b6473bb4d0f47416cdb08d556c03b8bc1cccf04e756b56d/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a", size = 2501635 }, + { url = "https://files.pythonhosted.org/packages/78/7e/aa9422e78419db0cbe75fb86d8e72b433818f2e62e2e394992d23d23a583/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee", size = 2314556 }, + { url = "https://files.pythonhosted.org/packages/a8/b2/15f7f556df0a6e5b3772a1e076a9d9f6c538ce5f05bd590eca8106508e06/kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07", size = 46364 }, + { url = "https://files.pythonhosted.org/packages/0b/db/32e897e43a330eee8e4770bfd2737a9584b23e33587a0812b8e20aac38f7/kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76", size = 55887 }, + { url = "https://files.pythonhosted.org/packages/c8/a4/df2bdca5270ca85fd25253049eb6708d4127be2ed0e5c2650217450b59e9/kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650", size = 48530 }, +] + +[[package]] +name = "lxml" +version = "5.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/6b/20c3a4b24751377aaa6307eb230b66701024012c29dd374999cc92983269/lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f", size = 3679318 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/6a/42141e4d373903bfea6f8e94b2f554d05506dfda522ada5343c651410dc8/lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a", size = 8156284 }, + { url = "https://files.pythonhosted.org/packages/91/5e/fa097f0f7d8b3d113fb7312c6308af702f2667f22644441715be961f2c7e/lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd", size = 4432407 }, + { url = "https://files.pythonhosted.org/packages/2d/a1/b901988aa6d4ff937f2e5cfc114e4ec561901ff00660c3e56713642728da/lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51", size = 5048331 }, + { url = "https://files.pythonhosted.org/packages/30/0f/b2a54f48e52de578b71bbe2a2f8160672a8a5e103df3a78da53907e8c7ed/lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b", size = 4744835 }, + { url = "https://files.pythonhosted.org/packages/82/9d/b000c15538b60934589e83826ecbc437a1586488d7c13f8ee5ff1f79a9b8/lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002", size = 5316649 }, + { url = "https://files.pythonhosted.org/packages/e3/ee/ffbb9eaff5e541922611d2c56b175c45893d1c0b8b11e5a497708a6a3b3b/lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4", size = 4812046 }, + { url = "https://files.pythonhosted.org/packages/15/ff/7ff89d567485c7b943cdac316087f16b2399a8b997007ed352a1248397e5/lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492", size = 4918597 }, + { url = "https://files.pythonhosted.org/packages/c6/a3/535b6ed8c048412ff51268bdf4bf1cf052a37aa7e31d2e6518038a883b29/lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3", size = 4738071 }, + { url = "https://files.pythonhosted.org/packages/7a/8f/cbbfa59cb4d4fd677fe183725a76d8c956495d7a3c7f111ab8f5e13d2e83/lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4", size = 5342213 }, + { url = "https://files.pythonhosted.org/packages/5c/fb/db4c10dd9958d4b52e34d1d1f7c1f434422aeaf6ae2bbaaff2264351d944/lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367", size = 4893749 }, + { url = "https://files.pythonhosted.org/packages/f2/38/bb4581c143957c47740de18a3281a0cab7722390a77cc6e610e8ebf2d736/lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832", size = 4945901 }, + { url = "https://files.pythonhosted.org/packages/fc/d5/18b7de4960c731e98037bd48fa9f8e6e8f2558e6fbca4303d9b14d21ef3b/lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff", size = 4815447 }, + { url = "https://files.pythonhosted.org/packages/97/a8/cd51ceaad6eb849246559a8ef60ae55065a3df550fc5fcd27014361c1bab/lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd", size = 5411186 }, + { url = "https://files.pythonhosted.org/packages/89/c3/1e3dabab519481ed7b1fdcba21dcfb8832f57000733ef0e71cf6d09a5e03/lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb", size = 5324481 }, + { url = "https://files.pythonhosted.org/packages/b6/17/71e9984cf0570cd202ac0a1c9ed5c1b8889b0fc8dc736f5ef0ffb181c284/lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b", size = 5011053 }, + { url = "https://files.pythonhosted.org/packages/69/68/9f7e6d3312a91e30829368c2b3217e750adef12a6f8eb10498249f4e8d72/lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957", size = 3485634 }, + { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, +] + +[[package]] +name = "matplotlib" +version = "3.9.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/75/9f/562ed484b11ac9f4bb4f9d2d7546954ec106a8c0f06cc755d6f63e519274/matplotlib-3.9.3.tar.gz", hash = "sha256:cd5dbbc8e25cad5f706845c4d100e2c8b34691b412b93717ce38d8ae803bcfa5", size = 36113438 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/04/949640040982822416c471d9ebe4e9e6c69ca9f9bb6ba82ed30808863c02/matplotlib-3.9.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:203d18df84f5288973b2d56de63d4678cc748250026ca9e1ad8f8a0fd8a75d83", size = 7883417 }, + { url = "https://files.pythonhosted.org/packages/9f/90/ebd37143cd3150b6c650ee1580024df3dd649d176e68d346f826b8d24e37/matplotlib-3.9.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b651b0d3642991259109dc0351fc33ad44c624801367bb8307be9bfc35e427ad", size = 7768720 }, + { url = "https://files.pythonhosted.org/packages/dc/84/6591e6b55d755d16dacdc113205067031867c1f5e3c08b32c01aad831420/matplotlib-3.9.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66d7b171fecf96940ce069923a08ba3df33ef542de82c2ff4fe8caa8346fa95a", size = 8192723 }, + { url = "https://files.pythonhosted.org/packages/29/09/146a17d37e32313507f11ac984e65311f2d5805d731eb981d4f70eb928dc/matplotlib-3.9.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be0ba61f6ff2e6b68e4270fb63b6813c9e7dec3d15fc3a93f47480444fd72f0", size = 8305801 }, + { url = "https://files.pythonhosted.org/packages/85/cb/d2690572c08f19ca7c0f44b1fb4d11c121d63467a57b508cc3656ff80b43/matplotlib-3.9.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d6b2e8856dec3a6db1ae51aec85c82223e834b228c1d3228aede87eee2b34f9", size = 9086564 }, + { url = "https://files.pythonhosted.org/packages/28/dd/0a5176027c1cb94fe75f69f76cb274180c8abf740df6fc0e6a1e4cbaec3f/matplotlib-3.9.3-cp313-cp313-win_amd64.whl", hash = "sha256:90a85a004fefed9e583597478420bf904bb1a065b0b0ee5b9d8d31b04b0f3f70", size = 7833257 }, + { url = "https://files.pythonhosted.org/packages/42/d4/e477d50a8e4b437c2afbb5c665cb8e5d79b06abe6fe3c6915d6f7f0c2ef2/matplotlib-3.9.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3119b2f16de7f7b9212ba76d8fe6a0e9f90b27a1e04683cd89833a991682f639", size = 7911906 }, + { url = "https://files.pythonhosted.org/packages/ae/a1/ba5ab89666c42ace8e31b4ff5a2c76a17e4d6f91aefce476b064c56ff61d/matplotlib-3.9.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:87ad73763d93add1b6c1f9fcd33af662fd62ed70e620c52fcb79f3ac427cf3a6", size = 7801336 }, + { url = "https://files.pythonhosted.org/packages/77/59/4dcdb3a6695af6c698a95aec13016a550ef2f85144d22f61f81d1e064148/matplotlib-3.9.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:026bdf3137ab6022c866efa4813b6bbeddc2ed4c9e7e02f0e323a7bca380dfa0", size = 8218178 }, + { url = "https://files.pythonhosted.org/packages/4f/27/7c72db0d0ee35d9237572565ffa3c0eb25fc46a3f47e0f16412a587bc9d8/matplotlib-3.9.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:760a5e89ebbb172989e8273024a1024b0f084510b9105261b3b00c15e9c9f006", size = 8327768 }, + { url = "https://files.pythonhosted.org/packages/de/ad/213eee624feadba7b77e881c9d2c04c1e036efe69d19031e3fa927fdb5dc/matplotlib-3.9.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a42b9dc42de2cfe357efa27d9c50c7833fc5ab9b2eb7252ccd5d5f836a84e1e4", size = 9094075 }, + { url = "https://files.pythonhosted.org/packages/19/1b/cb8e99a5fe2e2b14e3b8234cb1649a675be63f74a5224a648ae4ab61f60c/matplotlib-3.9.3-cp313-cp313t-win_amd64.whl", hash = "sha256:e0fcb7da73fbf67b5f4bdaa57d85bb585a4e913d4a10f3e15b32baea56a67f0a", size = 7888937 }, +] + +[[package]] +name = "mdit-py-plugins" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/03/a2ecab526543b152300717cf232bb4bb8605b6edb946c845016fa9c9c9fd/mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5", size = 43542 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/f7/7782a043553ee469c1ff49cfa1cdace2d6bf99a1f333cf38676b3ddf30da/mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636", size = 55316 }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, +] + +[[package]] +name = "ml-dtypes" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "python_full_version >= '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/79/717c5e22ad25d63ce3acdfe8ff8d64bdedec18914256c59b838218708b16/ml_dtypes-0.5.0.tar.gz", hash = "sha256:3e7d3a380fe73a63c884f06136f8baa7a5249cc8e9fdec677997dd78549f8128", size = 699367 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/4a/18f670a2703e771a6775fbc354208e597ff062a88efb0cecc220a282210b/ml_dtypes-0.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d3b3db9990c3840986a0e70524e122cfa32b91139c3653df76121ba7776e015f", size = 753345 }, + { url = "https://files.pythonhosted.org/packages/ed/c6/358d85e274e22d53def0c85f3cbe0933475fa3cf6922e9dca66eb25cb22f/ml_dtypes-0.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04fde367b2fe901b1d47234426fe8819909bd1dd862a5adb630f27789c20599", size = 4424962 }, + { url = "https://files.pythonhosted.org/packages/4c/b4/d766586e24e7a073333c8eb8bd9275f3c6fe0569b509ae7b1699d4f00c74/ml_dtypes-0.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54415257f00eb44fbcc807454efac3356f75644f1cbfc2d4e5522a72ae1dacab", size = 4475201 }, + { url = "https://files.pythonhosted.org/packages/14/87/30323ad2e52f56262019a4493fe5f5e71067c5561ce7e2f9c75de520f5e8/ml_dtypes-0.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:cb5cc7b25acabd384f75bbd78892d0c724943f3e2e1986254665a1aa10982e07", size = 213195 }, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 }, +] + +[[package]] +name = "myst-parser" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "jinja2" }, + { name = "markdown-it-py" }, + { name = "mdit-py-plugins" }, + { name = "pyyaml" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/55/6d1741a1780e5e65038b74bce6689da15f620261c490c3511eb4c12bac4b/myst_parser-4.0.0.tar.gz", hash = "sha256:851c9dfb44e36e56d15d05e72f02b80da21a9e0d07cba96baf5e2d476bb91531", size = 93858 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/b4/b036f8fdb667587bb37df29dc6644681dd78b7a2a6321a34684b79412b28/myst_parser-4.0.0-py3-none-any.whl", hash = "sha256:b9317997552424448c6096c2558872fdb6f81d3ecb3a40ce84a7518798f3f28d", size = 84563 }, +] + +[[package]] +name = "namex" +version = "0.0.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/48/d275cdb6216c6bb4f9351675795a0b48974e138f16b1ffe0252c1f8faa28/namex-0.0.8.tar.gz", hash = "sha256:32a50f6c565c0bb10aa76298c959507abdc0e850efe085dc38f3440fcb3aa90b", size = 6623 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/59/7854fbfb59f8ae35483ce93493708be5942ebb6328cd85b3a609df629736/namex-0.0.8-py3-none-any.whl", hash = "sha256:7ddb6c2bb0e753a311b7590f84f6da659dd0c05e65cb89d519d54c0a250c0487", size = 5806 }, +] + +[[package]] +name = "natsort" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/a9/a0c57aee75f77794adaf35322f8b6404cbd0f89ad45c87197a937764b7d0/natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581", size = 76575 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/82/7a9d0550484a62c6da82858ee9419f3dd1ccc9aa1c26a1e43da3ecd20b0d/natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c", size = 38268 }, +] + +[[package]] +name = "numpy" +version = "2.1.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/25/ca/1166b75c21abd1da445b97bf1fa2f14f423c6cfb4fc7c4ef31dccf9f6a94/numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761", size = 20166090 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/0b/620591441457e25f3404c8057eb924d04f161244cb8a3680d529419aa86e/numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f", size = 20836263 }, + { url = "https://files.pythonhosted.org/packages/45/e1/210b2d8b31ce9119145433e6ea78046e30771de3fe353f313b2778142f34/numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598", size = 13507771 }, + { url = "https://files.pythonhosted.org/packages/55/44/aa9ee3caee02fa5a45f2c3b95cafe59c44e4b278fbbf895a93e88b308555/numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57", size = 5075805 }, + { url = "https://files.pythonhosted.org/packages/78/d6/61de6e7e31915ba4d87bbe1ae859e83e6582ea14c6add07c8f7eefd8488f/numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe", size = 6608380 }, + { url = "https://files.pythonhosted.org/packages/3e/46/48bdf9b7241e317e6cf94276fe11ba673c06d1fdf115d8b4ebf616affd1a/numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43", size = 13602451 }, + { url = "https://files.pythonhosted.org/packages/70/50/73f9a5aa0810cdccda9c1d20be3cbe4a4d6ea6bfd6931464a44c95eef731/numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56", size = 16039822 }, + { url = "https://files.pythonhosted.org/packages/ad/cd/098bc1d5a5bc5307cfc65ee9369d0ca658ed88fbd7307b0d49fab6ca5fa5/numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a", size = 16411822 }, + { url = "https://files.pythonhosted.org/packages/83/a2/7d4467a2a6d984549053b37945620209e702cf96a8bc658bc04bba13c9e2/numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef", size = 14079598 }, + { url = "https://files.pythonhosted.org/packages/e9/6a/d64514dcecb2ee70bfdfad10c42b76cab657e7ee31944ff7a600f141d9e9/numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f", size = 6236021 }, + { url = "https://files.pythonhosted.org/packages/bb/f9/12297ed8d8301a401e7d8eb6b418d32547f1d700ed3c038d325a605421a4/numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed", size = 12560405 }, + { url = "https://files.pythonhosted.org/packages/a7/45/7f9244cd792e163b334e3a7f02dff1239d2890b6f37ebf9e82cbe17debc0/numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f", size = 20859062 }, + { url = "https://files.pythonhosted.org/packages/b1/b4/a084218e7e92b506d634105b13e27a3a6645312b93e1c699cc9025adb0e1/numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4", size = 13515839 }, + { url = "https://files.pythonhosted.org/packages/27/45/58ed3f88028dcf80e6ea580311dc3edefdd94248f5770deb980500ef85dd/numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e", size = 5116031 }, + { url = "https://files.pythonhosted.org/packages/37/a8/eb689432eb977d83229094b58b0f53249d2209742f7de529c49d61a124a0/numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0", size = 6629977 }, + { url = "https://files.pythonhosted.org/packages/42/a3/5355ad51ac73c23334c7caaed01adadfda49544f646fcbfbb4331deb267b/numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408", size = 13575951 }, + { url = "https://files.pythonhosted.org/packages/c4/70/ea9646d203104e647988cb7d7279f135257a6b7e3354ea6c56f8bafdb095/numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6", size = 16022655 }, + { url = "https://files.pythonhosted.org/packages/14/ce/7fc0612903e91ff9d0b3f2eda4e18ef9904814afcae5b0f08edb7f637883/numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f", size = 16399902 }, + { url = "https://files.pythonhosted.org/packages/ef/62/1d3204313357591c913c32132a28f09a26357e33ea3c4e2fe81269e0dca1/numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17", size = 14067180 }, + { url = "https://files.pythonhosted.org/packages/24/d7/78a40ed1d80e23a774cb8a34ae8a9493ba1b4271dde96e56ccdbab1620ef/numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48", size = 6291907 }, + { url = "https://files.pythonhosted.org/packages/86/09/a5ab407bd7f5f5599e6a9261f964ace03a73e7c6928de906981c31c38082/numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4", size = 12644098 }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.23.4" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/3a/0112397396dec37ffc8edd7836d48261b4d14ca60ec8ed7bc857cce1d916/nvidia_nccl_cu12-2.23.4-py3-none-manylinux2014_aarch64.whl", hash = "sha256:aa946c8327e22ced28e7cef508a334673abc42064ec85f02d005ba1785ea4cec", size = 198953892 }, + { url = "https://files.pythonhosted.org/packages/ed/1f/6482380ec8dcec4894e7503490fc536d846b0d59694acad9cf99f27d0e7d/nvidia_nccl_cu12-2.23.4-py3-none-manylinux2014_x86_64.whl", hash = "sha256:b097258d9aab2fa9f686e33c6fe40ae57b27df60cedbd15d139701bb5509e0c1", size = 198954603 }, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/fa/fbf4001037904031639e6bfbfc02badfc7e12f137a8afa254df6c4c8a670/oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918", size = 177352 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/80/cab10959dc1faead58dc8384a781dfbf93cb4d33d50988f7a69f1b7c9bbe/oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", size = 151688 }, +] + +[[package]] +name = "opencv-python" +version = "4.10.0.84" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "python_full_version >= '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/b70a2d9ab205110d715906fc8ec83fbb00404aeb3a37a0654fdb68eb0c8c/opencv-python-4.10.0.84.tar.gz", hash = "sha256:72d234e4582e9658ffea8e9cae5b63d488ad06994ef12d81dc303b17472f3526", size = 95103981 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/82/564168a349148298aca281e342551404ef5521f33fba17b388ead0a84dc5/opencv_python-4.10.0.84-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:fc182f8f4cda51b45f01c64e4cbedfc2f00aff799debebc305d8d0210c43f251", size = 54835524 }, + { url = "https://files.pythonhosted.org/packages/64/4a/016cda9ad7cf18c58ba074628a4eaae8aa55f3fd06a266398cef8831a5b9/opencv_python-4.10.0.84-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:71e575744f1d23f79741450254660442785f45a0797212852ee5199ef12eed98", size = 56475426 }, + { url = "https://files.pythonhosted.org/packages/81/e4/7a987ebecfe5ceaf32db413b67ff18eb3092c598408862fff4d7cc3fd19b/opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09a332b50488e2dda866a6c5573ee192fe3583239fb26ff2f7f9ceb0bc119ea6", size = 41746971 }, + { url = "https://files.pythonhosted.org/packages/3f/a4/d2537f47fd7fcfba966bd806e3ec18e7ee1681056d4b0a9c8d983983e4d5/opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ace140fc6d647fbe1c692bcb2abce768973491222c067c131d80957c595b71f", size = 62548253 }, + { url = "https://files.pythonhosted.org/packages/1e/39/bbf57e7b9dab623e8773f6ff36385456b7ae7fa9357a5e53db732c347eac/opencv_python-4.10.0.84-cp37-abi3-win32.whl", hash = "sha256:2db02bb7e50b703f0a2d50c50ced72e95c574e1e5a0bb35a8a86d0b35c98c236", size = 28737688 }, + { url = "https://files.pythonhosted.org/packages/ec/6c/fab8113424af5049f85717e8e527ca3773299a3c6b02506e66436e19874f/opencv_python-4.10.0.84-cp37-abi3-win_amd64.whl", hash = "sha256:32dbbd94c26f611dc5cc6979e6b7aa1f55a64d6b463cc1dcd3c95505a63e48fe", size = 38842521 }, +] + +[[package]] +name = "optree" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/f2/56afdaeaae36b076659be7db8e72be0924dd64ebd1c131675c77f7e704a6/optree-0.13.1.tar.gz", hash = "sha256:af67856aa8073d237fe67313d84f8aeafac32c1cef7239c628a2768d02679c43", size = 155738 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/53/f3727cad24f16a06666f328f1212476988cadac9b9e7919ddfb2c22eb662/optree-0.13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f788b2ad120deb73b4908a74473cd6de79cfb9f33bbe9dcb59cea2e2477d4e28", size = 608270 }, + { url = "https://files.pythonhosted.org/packages/64/f2/68beb9da2dd52baa50e7a589ed2bd8434fdd70cdba06754aa5910263da06/optree-0.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2909cb42add6bb1a5a2b0243bdd8c4b861bf072f3741e26239481907ac8ad4e6", size = 325703 }, + { url = "https://files.pythonhosted.org/packages/45/db/08921e56f3425bf649eb593eb28775263c935d029985d35572dc5690cc1a/optree-0.13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc5fa2ff5090389f3a906567446f01d692bd6fe5cfcc5ae2d5861f24e8e0e4d", size = 355813 }, + { url = "https://files.pythonhosted.org/packages/e5/e3/587e0d28dc2cee064902adfebca97db124e12b275dbe9c2b05a70a22345f/optree-0.13.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4711f5cac5a2a49c3d6c9f0eca7b77c22b452170bb33ea01c3214ebb17931db9", size = 402566 }, + { url = "https://files.pythonhosted.org/packages/8a/1d/0d5bbab8c99580b732b89ef2c5fcdd6ef410478295949fdf2984fa1bfc28/optree-0.13.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c4ab1d391b89cb88eb3c63383d5eb0930bc21141de9d5acd277feed9e38eb65", size = 397005 }, + { url = "https://files.pythonhosted.org/packages/16/fa/fc2a8183e14f0d195d25824bf65095ff32b34bd469614a6c30d0a596a30f/optree-0.13.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5e5f09c85ae558a6bdaea57e63168082e728e777391393e9e2792f0d15b7b59", size = 369400 }, + { url = "https://files.pythonhosted.org/packages/9f/42/8c08ce4ebb3d9a6e4415f1a97830c84879e2d1a43710a7c8a18b2c3e169d/optree-0.13.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c8ee1e988c634a451146b87d9ebdbf650a75dc1f52a9cffcd89fabb7289321c", size = 390179 }, + { url = "https://files.pythonhosted.org/packages/06/02/3a701d6307fdfefe4fcecbac644803e2a4314ab2406ff465e03129cc85f6/optree-0.13.1-cp313-cp313-win32.whl", hash = "sha256:5b6531cd4eb23fadbbf77faf834e1119da06d7af3154f55786b59953cd87bb8a", size = 264264 }, + { url = "https://files.pythonhosted.org/packages/ef/f9/8a1421181c5eb0c0f81d1423a900baeb3faba68a48747bbdffb7581239ac/optree-0.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:27d81dc43b522ba47ba7d2e7d91dbb486940348b1bf85caeb0afc2815c0aa492", size = 293682 }, + { url = "https://files.pythonhosted.org/packages/80/34/d1b1849a6240385c4a3af5da9425b11912204d0b1cf142d802815319b73a/optree-0.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:f39c7174a3f3cdc3f5fe6fb4b832f608c40ac174d7567ed6734b2ee952094631", size = 293670 }, + { url = "https://files.pythonhosted.org/packages/0d/d6/f81e6748bcc3f35a2f570a814014e3418b0ed425d7cbc2b42d88d12863d5/optree-0.13.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:3010ae24e994f6e00071098d34e98e78eb995b7454a2ef629a0bf7df17441b24", size = 702861 }, + { url = "https://files.pythonhosted.org/packages/08/7f/70a2d02110ccb245bc57bd9ad57668acfea0ff364c27d7dfe1735ede79ed/optree-0.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5b5626c38d4a18a144063db5c1dbb558431d83ca10682324f74665a12214801f", size = 370740 }, + { url = "https://files.pythonhosted.org/packages/63/37/4ddf05267467809236203e2007e9443519c4d55e0744ce7eea1aa74dffee/optree-0.13.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1935639dd498a42367633e3877797e1330e39d44d48bbca1a136bb4dbe4c1bc9", size = 374695 }, + { url = "https://files.pythonhosted.org/packages/19/f2/51a63a799f6dce31813d7e02a7547394aebcb39f407e62038ecbd999d490/optree-0.13.1-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01819c3df950696f32c91faf8d376ae6b695ffdba18f330f1cab6b8e314e4612", size = 418671 }, + { url = "https://files.pythonhosted.org/packages/f0/7c/a08191e0c9202f2be9c415057eea3cf3a5af18e9a6d81f4c7b0e6faf0a1f/optree-0.13.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48c29d9c6c64c8dc48c8ee97f7c1d5cdb83e37320f0be0857c06ce4b97994aea", size = 414966 }, + { url = "https://files.pythonhosted.org/packages/8f/37/7bf815f4da7234e387863228b17246b42b8c02553882581a4013a64a88d0/optree-0.13.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:025d23400b8b579462a251420f0a9ae77d3d3593f84276f3465985731d79d722", size = 389219 }, + { url = "https://files.pythonhosted.org/packages/3d/84/bb521a66d3a84fe2f1500ef67d245c2cc1a26277fcaaf4bc70b22c06e99b/optree-0.13.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55e82426bef151149cfa41d68ac957730fcd420996c0db8324fca81aa6a810ba", size = 405377 }, + { url = "https://files.pythonhosted.org/packages/06/99/3eb53829c4c0b6dc20115d957d2d8e945630ddf40c656dc4e39c5a6e51f2/optree-0.13.1-cp313-cp313t-win32.whl", hash = "sha256:e40f018f522fcfd244688d1b3a360518e636ba7f636385aae0566eae3e7d29bc", size = 292734 }, + { url = "https://files.pythonhosted.org/packages/2f/59/d7601959ad0b90d309794c0975a256304488b4c5671f24e3e12101ade7ef/optree-0.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d580f1bf23bb352c4db6b3544f282f1ac08dcb0d9ab537d25e56220353438cf7", size = 331457 }, + { url = "https://files.pythonhosted.org/packages/8b/36/c01a5bc34660d46c6a3b1fe090bbdc8c76af7b5c1a6613cc671aa6df8349/optree-0.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:c4d13f55dbd509d27be3af54d53b4ca0751bc518244ced6d0567e518e51452a2", size = 331470 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "pandas" +version = "2.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "python_full_version >= '3.13'" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643 }, + { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573 }, + { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085 }, + { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809 }, + { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316 }, + { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055 }, + { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175 }, + { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650 }, + { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177 }, + { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526 }, + { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013 }, + { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620 }, + { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436 }, +] + +[[package]] +name = "patsy" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/81/74f6a65b848ffd16c18f920620ce999fe45fe27f01ab3911260ce4ed85e4/patsy-1.0.1.tar.gz", hash = "sha256:e786a9391eec818c054e359b737bbce692f051aee4c661f4141cc88fb459c0c4", size = 396010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/2b/b50d3d08ea0fc419c183a84210571eba005328efa62b6b98bc28e9ead32a/patsy-1.0.1-py2.py3-none-any.whl", hash = "sha256:751fb38f9e97e62312e921a1954b81e1bb2bcda4f5eeabaf94db251ee791509c", size = 232923 }, +] + +[[package]] +name = "pillow" +version = "11.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a5/26/0d95c04c868f6bdb0c447e3ee2de5564411845e36a858cfd63766bc7b563/pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739", size = 46737780 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/24/e2e15e392d00fcf4215907465d8ec2a2f23bcec1481a8ebe4ae760459995/pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699", size = 3147300 }, + { url = "https://files.pythonhosted.org/packages/43/72/92ad4afaa2afc233dc44184adff289c2e77e8cd916b3ddb72ac69495bda3/pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38", size = 2978742 }, + { url = "https://files.pythonhosted.org/packages/9e/da/c8d69c5bc85d72a8523fe862f05ababdc52c0a755cfe3d362656bb86552b/pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2", size = 4194349 }, + { url = "https://files.pythonhosted.org/packages/cd/e8/686d0caeed6b998351d57796496a70185376ed9c8ec7d99e1d19ad591fc6/pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2", size = 4298714 }, + { url = "https://files.pythonhosted.org/packages/ec/da/430015cec620d622f06854be67fd2f6721f52fc17fca8ac34b32e2d60739/pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527", size = 4208514 }, + { url = "https://files.pythonhosted.org/packages/44/ae/7e4f6662a9b1cb5f92b9cc9cab8321c381ffbee309210940e57432a4063a/pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa", size = 4380055 }, + { url = "https://files.pythonhosted.org/packages/74/d5/1a807779ac8a0eeed57f2b92a3c32ea1b696e6140c15bd42eaf908a261cd/pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f", size = 4296751 }, + { url = "https://files.pythonhosted.org/packages/38/8c/5fa3385163ee7080bc13026d59656267daaaaf3c728c233d530e2c2757c8/pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb", size = 4430378 }, + { url = "https://files.pythonhosted.org/packages/ca/1d/ad9c14811133977ff87035bf426875b93097fb50af747793f013979facdb/pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798", size = 2249588 }, + { url = "https://files.pythonhosted.org/packages/fb/01/3755ba287dac715e6afdb333cb1f6d69740a7475220b4637b5ce3d78cec2/pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de", size = 2567509 }, + { url = "https://files.pythonhosted.org/packages/c0/98/2c7d727079b6be1aba82d195767d35fcc2d32204c7a5820f822df5330152/pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84", size = 2254791 }, + { url = "https://files.pythonhosted.org/packages/eb/38/998b04cc6f474e78b563716b20eecf42a2fa16a84589d23c8898e64b0ffd/pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b", size = 3150854 }, + { url = "https://files.pythonhosted.org/packages/13/8e/be23a96292113c6cb26b2aa3c8b3681ec62b44ed5c2bd0b258bd59503d3c/pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003", size = 2982369 }, + { url = "https://files.pythonhosted.org/packages/97/8a/3db4eaabb7a2ae8203cd3a332a005e4aba00067fc514aaaf3e9721be31f1/pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2", size = 4333703 }, + { url = "https://files.pythonhosted.org/packages/28/ac/629ffc84ff67b9228fe87a97272ab125bbd4dc462745f35f192d37b822f1/pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a", size = 4412550 }, + { url = "https://files.pythonhosted.org/packages/d6/07/a505921d36bb2df6868806eaf56ef58699c16c388e378b0dcdb6e5b2fb36/pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8", size = 4461038 }, + { url = "https://files.pythonhosted.org/packages/d6/b9/fb620dd47fc7cc9678af8f8bd8c772034ca4977237049287e99dda360b66/pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8", size = 2253197 }, + { url = "https://files.pythonhosted.org/packages/df/86/25dde85c06c89d7fc5db17940f07aae0a56ac69aa9ccb5eb0f09798862a8/pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904", size = 2572169 }, + { url = "https://files.pythonhosted.org/packages/51/85/9c33f2517add612e17f3381aee7c4072779130c634921a756c97bc29fb49/pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3", size = 2256828 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "pygments" +version = "2.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", size = 4891905 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", size = 1205513 }, +] + +[[package]] +name = "pyparsing" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/d5/e5aeee5387091148a19e1145f63606619cb5f20b83fccb63efae6474e7b2/pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c", size = 920984 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/ec/2eb3cd785efd67806c46c13a17339708ddc346cbb684eade7a6e6f79536a/pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84", size = 106921 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/45/9b538de8cef30e17c7b45ef42f538a94889ed6a16f2387a6c89e73220651/pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0", size = 66945 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/3b/48e79f2cd6a61dbbd4807b4ed46cb564b4fd50a76166b1c4ea5c1d9e2371/pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35", size = 22949 }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, +] + +[[package]] +name = "pytz" +version = "2024.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/31/3c70bf7603cc2dca0f19bdc53b4537a797747a58875b552c8c413d963a3f/pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a", size = 319692 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/c3/005fcca25ce078d2cc29fd559379817424e94885510568bc1bc53d7d5846/pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725", size = 508002 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "requests-oauthlib" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/52/531ef197b426646f26b53815a7d2a67cb7a331ef098bb276db26a68ac49f/requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a", size = 52027 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/bb/5deac77a9af870143c684ab46a7934038a53eb4aa975bc0687ed6ca2c610/requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5", size = 23892 }, +] + +[[package]] +name = "rich" +version = "13.9.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 }, +] + +[[package]] +name = "ruff" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/d0/8ff5b189d125f4260f2255d143bf2fa413b69c2610c405ace7a0a8ec81ec/ruff-0.8.1.tar.gz", hash = "sha256:3583db9a6450364ed5ca3f3b4225958b24f78178908d5c4bc0f46251ccca898f", size = 3313222 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/d6/1a6314e568db88acdbb5121ed53e2c52cebf3720d3437a76f82f923bf171/ruff-0.8.1-py3-none-linux_armv6l.whl", hash = "sha256:fae0805bd514066f20309f6742f6ee7904a773eb9e6c17c45d6b1600ca65c9b5", size = 10532605 }, + { url = "https://files.pythonhosted.org/packages/89/a8/a957a8812e31facffb6a26a30be0b5b4af000a6e30c7d43a22a5232a3398/ruff-0.8.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b8a4f7385c2285c30f34b200ca5511fcc865f17578383db154e098150ce0a087", size = 10278243 }, + { url = "https://files.pythonhosted.org/packages/a8/23/9db40fa19c453fabf94f7a35c61c58f20e8200b4734a20839515a19da790/ruff-0.8.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd054486da0c53e41e0086e1730eb77d1f698154f910e0cd9e0d64274979a209", size = 9917739 }, + { url = "https://files.pythonhosted.org/packages/e2/a0/6ee2d949835d5701d832fc5acd05c0bfdad5e89cfdd074a171411f5ccad5/ruff-0.8.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2029b8c22da147c50ae577e621a5bfbc5d1fed75d86af53643d7a7aee1d23871", size = 10779153 }, + { url = "https://files.pythonhosted.org/packages/7a/25/9c11dca9404ef1eb24833f780146236131a3c7941de394bc356912ef1041/ruff-0.8.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2666520828dee7dfc7e47ee4ea0d928f40de72056d929a7c5292d95071d881d1", size = 10304387 }, + { url = "https://files.pythonhosted.org/packages/c8/b9/84c323780db1b06feae603a707d82dbbd85955c8c917738571c65d7d5aff/ruff-0.8.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:333c57013ef8c97a53892aa56042831c372e0bb1785ab7026187b7abd0135ad5", size = 11360351 }, + { url = "https://files.pythonhosted.org/packages/6b/e1/9d4bbb2ace7aad14ded20e4674a48cda5b902aed7a1b14e6b028067060c4/ruff-0.8.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:288326162804f34088ac007139488dcb43de590a5ccfec3166396530b58fb89d", size = 12022879 }, + { url = "https://files.pythonhosted.org/packages/75/28/752ff6120c0e7f9981bc4bc275d540c7f36db1379ba9db9142f69c88db21/ruff-0.8.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b12c39b9448632284561cbf4191aa1b005882acbc81900ffa9f9f471c8ff7e26", size = 11610354 }, + { url = "https://files.pythonhosted.org/packages/ba/8c/967b61c2cc8ebd1df877607fbe462bc1e1220b4a30ae3352648aec8c24bd/ruff-0.8.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:364e6674450cbac8e998f7b30639040c99d81dfb5bbc6dfad69bc7a8f916b3d1", size = 12813976 }, + { url = "https://files.pythonhosted.org/packages/7f/29/e059f945d6bd2d90213387b8c360187f2fefc989ddcee6bbf3c241329b92/ruff-0.8.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b22346f845fec132aa39cd29acb94451d030c10874408dbf776af3aaeb53284c", size = 11154564 }, + { url = "https://files.pythonhosted.org/packages/55/47/cbd05e5a62f3fb4c072bc65c1e8fd709924cad1c7ec60a1000d1e4ee8307/ruff-0.8.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b2f2f7a7e7648a2bfe6ead4e0a16745db956da0e3a231ad443d2a66a105c04fa", size = 10760604 }, + { url = "https://files.pythonhosted.org/packages/bb/ee/4c3981c47147c72647a198a94202633130cfda0fc95cd863a553b6f65c6a/ruff-0.8.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:adf314fc458374c25c5c4a4a9270c3e8a6a807b1bec018cfa2813d6546215540", size = 10391071 }, + { url = "https://files.pythonhosted.org/packages/6b/e6/083eb61300214590b188616a8ac6ae1ef5730a0974240fb4bec9c17de78b/ruff-0.8.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a885d68342a231b5ba4d30b8c6e1b1ee3a65cf37e3d29b3c74069cdf1ee1e3c9", size = 10896657 }, + { url = "https://files.pythonhosted.org/packages/77/bd/aacdb8285d10f1b943dbeb818968efca35459afc29f66ae3bd4596fbf954/ruff-0.8.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d2c16e3508c8cc73e96aa5127d0df8913d2290098f776416a4b157657bee44c5", size = 11228362 }, + { url = "https://files.pythonhosted.org/packages/39/72/fcb7ad41947f38b4eaa702aca0a361af0e9c2bf671d7fd964480670c297e/ruff-0.8.1-py3-none-win32.whl", hash = "sha256:93335cd7c0eaedb44882d75a7acb7df4b77cd7cd0d2255c93b28791716e81790", size = 8803476 }, + { url = "https://files.pythonhosted.org/packages/e4/ea/cae9aeb0f4822c44651c8407baacdb2e5b4dcd7b31a84e1c5df33aa2cc20/ruff-0.8.1-py3-none-win_amd64.whl", hash = "sha256:2954cdbe8dfd8ab359d4a30cd971b589d335a44d444b6ca2cb3d1da21b75e4b6", size = 9614463 }, + { url = "https://files.pythonhosted.org/packages/eb/76/fbb4bd23dfb48fa7758d35b744413b650a9fd2ddd93bca77e30376864414/ruff-0.8.1-py3-none-win_arm64.whl", hash = "sha256:55873cc1a473e5ac129d15eccb3c008c096b94809d693fc7053f588b67822737", size = 8959621 }, +] + +[[package]] +name = "scikit-learn" +version = "1.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "joblib" }, + { name = "numpy" }, + { name = "scipy" }, + { name = "threadpoolctl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/37/59/44985a2bdc95c74e34fef3d10cb5d93ce13b0e2a7baefffe1b53853b502d/scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d", size = 7001680 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/50/8891028437858cc510e13578fe7046574a60c2aaaa92b02d64aac5b1b412/scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5", size = 12025584 }, + { url = "https://files.pythonhosted.org/packages/d2/79/17feef8a1c14149436083bec0e61d7befb4812e272d5b20f9d79ea3e9ab1/scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908", size = 10959795 }, + { url = "https://files.pythonhosted.org/packages/b1/c8/f08313f9e2e656bd0905930ae8bf99a573ea21c34666a813b749c338202f/scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3", size = 12077302 }, + { url = "https://files.pythonhosted.org/packages/a7/48/fbfb4dc72bed0fe31fe045fb30e924909ad03f717c36694351612973b1a9/scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12", size = 13002811 }, + { url = "https://files.pythonhosted.org/packages/a5/e7/0c869f9e60d225a77af90d2aefa7a4a4c0e745b149325d1450f0f0ce5399/scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f", size = 10951354 }, +] + +[[package]] +name = "scipy" +version = "1.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/11/4d44a1f274e002784e4dbdb81e0ea96d2de2d1045b2132d5af62cc31fd28/scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417", size = 58620554 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/ef/ac98346db016ff18a6ad7626a35808f37074d25796fd0234c2bb0ed1e054/scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79", size = 39091068 }, + { url = "https://files.pythonhosted.org/packages/b9/cc/70948fe9f393b911b4251e96b55bbdeaa8cca41f37c26fd1df0232933b9e/scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e", size = 29875417 }, + { url = "https://files.pythonhosted.org/packages/3b/2e/35f549b7d231c1c9f9639f9ef49b815d816bf54dd050da5da1c11517a218/scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73", size = 23084508 }, + { url = "https://files.pythonhosted.org/packages/3f/d6/b028e3f3e59fae61fb8c0f450db732c43dd1d836223a589a8be9f6377203/scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e", size = 25503364 }, + { url = "https://files.pythonhosted.org/packages/a7/2f/6c142b352ac15967744d62b165537a965e95d557085db4beab2a11f7943b/scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d", size = 35292639 }, + { url = "https://files.pythonhosted.org/packages/56/46/2449e6e51e0d7c3575f289f6acb7f828938eaab8874dbccfeb0cd2b71a27/scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e", size = 40798288 }, + { url = "https://files.pythonhosted.org/packages/32/cd/9d86f7ed7f4497c9fd3e39f8918dd93d9f647ba80d7e34e4946c0c2d1a7c/scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06", size = 42524647 }, + { url = "https://files.pythonhosted.org/packages/f5/1b/6ee032251bf4cdb0cc50059374e86a9f076308c1512b61c4e003e241efb7/scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84", size = 44469524 }, +] + +[[package]] +name = "six" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", size = 34041 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254", size = 11053 }, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1", size = 86699 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a", size = 93002 }, +] + +[[package]] +name = "soupsieve" +version = "2.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/ce/fbaeed4f9fb8b2daa961f90591662df6a86c1abf25c548329a86920aedfb/soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb", size = 101569 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/c2/fe97d779f3ef3b15f05c94a2f1e3d21732574ed441687474db9d342a7315/soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9", size = 36186 }, +] + +[[package]] +name = "sphinx" +version = "8.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alabaster" }, + { name = "babel" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "docutils" }, + { name = "imagesize" }, + { name = "jinja2" }, + { name = "packaging" }, + { name = "pygments" }, + { name = "requests" }, + { name = "snowballstemmer" }, + { name = "sphinxcontrib-applehelp" }, + { name = "sphinxcontrib-devhelp" }, + { name = "sphinxcontrib-htmlhelp" }, + { name = "sphinxcontrib-jsmath" }, + { name = "sphinxcontrib-qthelp" }, + { name = "sphinxcontrib-serializinghtml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125 }, +] + +[[package]] +name = "sphinx-autoapi" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid", marker = "python_full_version >= '3.13'" }, + { name = "jinja2" }, + { name = "pyyaml" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/eb/cc243583bb1d518ca3b10998c203d919a8ed90affd4831f2b61ad09043d2/sphinx_autoapi-3.4.0.tar.gz", hash = "sha256:e6d5371f9411bbb9fca358c00a9e57aef3ac94cbfc5df4bab285946462f69e0c", size = 29292 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/d6/f2acdc2567337fd5f5dc091a4e58d8a0fb14927b9779fc1e5ecee96d9824/sphinx_autoapi-3.4.0-py3-none-any.whl", hash = "sha256:4027fef2875a22c5f2a57107c71641d82f6166bf55beb407a47aaf3ef14e7b92", size = 34095 }, +] + +[[package]] +name = "sphinx-pyproject" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dom-toml" }, + { name = "domdf-python-tools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/97/aa8cec3da3e78f2c396b63332e2fe92fe43f7ff2ad19b3998735f28b0a7f/sphinx_pyproject-0.3.0.tar.gz", hash = "sha256:efc4ee9d96f579c4e4ed1ac273868c64565e88c8e37fe6ec2dc59fbcd57684ab", size = 7695 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/d5/89cb47c6399fd57ca451af15361499813c5d53e588cb6e00d89411ce724f/sphinx_pyproject-0.3.0-py3-none-any.whl", hash = "sha256:3aca968919f5ecd390f96874c3f64a43c9c7fcfdc2fd4191a781ad9228501b52", size = 23076 }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300 }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530 }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705 }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071 }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743 }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072 }, +] + +[[package]] +name = "statsmodels" +version = "0.14.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "packaging" }, + { name = "pandas" }, + { name = "patsy" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/3b/963a015dd8ea17e10c7b0e2f14d7c4daec903baf60a017e756b57953a4bf/statsmodels-0.14.4.tar.gz", hash = "sha256:5d69e0f39060dc72c067f9bb6e8033b6dccdb0bae101d76a7ef0bcc94e898b67", size = 20354802 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/f8/2662e6a101315ad336f75168fa9bac71f913ebcb92a6be84031d84a0f21f/statsmodels-0.14.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5a24f5d2c22852d807d2b42daf3a61740820b28d8381daaf59dcb7055bf1a79", size = 10186886 }, + { url = "https://files.pythonhosted.org/packages/fa/c0/ee6e8ed35fc1ca9c7538c592f4974547bf72274bc98db1ae4a6e87481a83/statsmodels-0.14.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df4f7864606fa843d7e7c0e6af288f034a2160dba14e6ccc09020a3cf67cb092", size = 9880066 }, + { url = "https://files.pythonhosted.org/packages/d1/97/3380ca6d8fd66cfb3d12941e472642f26e781a311c355a4e97aab2ed0216/statsmodels-0.14.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91341cbde9e8bea5fb419a76e09114e221567d03f34ca26e6d67ae2c27d8fe3c", size = 10283521 }, + { url = "https://files.pythonhosted.org/packages/fe/2a/55c5b5c5e5124a202ea3fe0bcdbdeceaf91b4ec6164b8434acb9dd97409c/statsmodels-0.14.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1322286a7bfdde2790bf72d29698a1b76c20b8423a55bdcd0d457969d0041f72", size = 10723228 }, + { url = "https://files.pythonhosted.org/packages/4f/76/67747e49dc758daae06f33aad8247b718cd7d224f091d2cd552681215bb2/statsmodels-0.14.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e31b95ac603415887c9f0d344cb523889cf779bc52d68e27e2d23c358958fec7", size = 10859503 }, + { url = "https://files.pythonhosted.org/packages/1d/eb/cb8b01f5edf8f135eb3d0553d159db113a35b2948d0e51eeb735e7ae09ea/statsmodels-0.14.4-cp313-cp313-win_amd64.whl", hash = "sha256:81030108d27aecc7995cac05aa280cf8c6025f6a6119894eef648997936c2dd0", size = 9817574 }, +] + +[[package]] +name = "sympy" +version = "1.13.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/11/8a/5a7fd6284fa8caac23a26c9ddf9c30485a48169344b4bd3b0f02fef1890f/sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9", size = 7533196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/ff/c87e0622b1dadea79d2fb0b25ade9ed98954c9033722eb707053d310d4f3/sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73", size = 6189483 }, +] + +[[package]] +name = "thealgorithms-python" +version = "0.0.1" +source = { virtual = "." } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "fake-useragent" }, + { name = "imageio" }, + { name = "keras" }, + { name = "lxml" }, + { name = "matplotlib" }, + { name = "numpy" }, + { name = "opencv-python" }, + { name = "pandas" }, + { name = "pillow" }, + { name = "requests" }, + { name = "rich" }, + { name = "scikit-learn" }, + { name = "sphinx-pyproject" }, + { name = "statsmodels" }, + { name = "sympy" }, + { name = "tweepy" }, + { name = "typing-extensions" }, + { name = "xgboost" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pytest" }, + { name = "pytest-cov" }, +] +docs = [ + { name = "myst-parser" }, + { name = "sphinx-autoapi" }, + { name = "sphinx-pyproject" }, +] +euler-validate = [ + { name = "numpy" }, + { name = "pytest" }, + { name = "pytest-cov" }, + { name = "requests" }, +] +lint = [ + { name = "codespell" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "beautifulsoup4", specifier = ">=4.12.3" }, + { name = "fake-useragent", specifier = ">=1.5.1" }, + { name = "imageio", specifier = ">=2.36.1" }, + { name = "keras", specifier = ">=3.7" }, + { name = "lxml", specifier = ">=5.3" }, + { name = "matplotlib", specifier = ">=3.9.3" }, + { name = "numpy", specifier = ">=2.1.3" }, + { name = "opencv-python", specifier = ">=4.10.0.84" }, + { name = "pandas", specifier = ">=2.2.3" }, + { name = "pillow", specifier = ">=11" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "rich", specifier = ">=13.9.4" }, + { name = "scikit-learn", specifier = ">=1.5.2" }, + { name = "sphinx-pyproject", specifier = ">=0.3" }, + { name = "statsmodels", specifier = ">=0.14.4" }, + { name = "sympy", specifier = ">=1.13.3" }, + { name = "tweepy", specifier = ">=4.14" }, + { name = "typing-extensions", specifier = ">=4.12.2" }, + { name = "xgboost", specifier = ">=2.1.3" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pytest", specifier = ">=8.3.4" }, + { name = "pytest-cov", specifier = ">=6" }, +] +docs = [ + { name = "myst-parser", specifier = ">=4.0.0" }, + { name = "sphinx-autoapi", specifier = ">=3.4.0" }, + { name = "sphinx-pyproject", specifier = ">=0.3.0" }, +] +euler-validate = [ + { name = "numpy", specifier = ">=2.1.3" }, + { name = "pytest", specifier = ">=8.3.4" }, + { name = "pytest-cov", specifier = ">=6.0.0" }, + { name = "requests", specifier = ">=2.32.3" }, +] +lint = [ + { name = "codespell", specifier = ">=2.3" }, + { name = "ruff", specifier = ">=0.8.1" }, +] + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/55/b5148dcbf72f5cde221f8bfe3b6a540da7aa1842f6b491ad979a6c8b84af/threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107", size = 41936 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/2c/ffbf7a134b9ab11a67b0cf0726453cedd9c5043a4fe7a35d1cefa9a1bcfb/threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467", size = 18414 }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + +[[package]] +name = "tweepy" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/75/1c/0db8c3cf9d31bf63853ff612d201060ae78e6db03468a70e063bef0eda62/tweepy-4.14.0.tar.gz", hash = "sha256:1f9f1707d6972de6cff6c5fd90dfe6a449cd2e0d70bd40043ffab01e07a06c8c", size = 88623 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/78/ba0065d5636bbf4a35b78c4f81b74e7858b609cdf69e629d6da5c91b9d92/tweepy-4.14.0-py3-none-any.whl", hash = "sha256:db6d3844ccc0c6d27f339f12ba8acc89912a961da513c1ae50fa2be502a56afb", size = 98520 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "tzdata" +version = "2024.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/34/943888654477a574a86a98e9896bae89c7aa15078ec29f490fef2f1e5384/tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc", size = 193282 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a6/ab/7e5f53c3b9d14972843a647d8d7a853969a58aecc7559cb3267302c94774/tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd", size = 346586 }, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, +] + +[[package]] +name = "xgboost" +version = "2.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine != 'aarch64' and platform_system == 'Linux'" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/b0/131ffc4a15fd3acee9be3a7baa6b2fa6faa479799c51b880de9fc3ddf550/xgboost-2.1.3.tar.gz", hash = "sha256:7699ec4226156887d3afc665c63ab87469db9d46e361c702ba9fccd22535730c", size = 1090326 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/c6/773ebd84414879bd0566788868ae46a6574f6efaf81e694f01ea1fed3277/xgboost-2.1.3-py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.macosx_12_0_x86_64.whl", hash = "sha256:c9b0c92f13e3650e1e1cf92ff9ecef3efc6f5dc3d10ce17858df2081a89976ef", size = 2139909 }, + { url = "https://files.pythonhosted.org/packages/28/3c/ddf5d9eb742cdb7fbcd5c854bce07471bad01194ac37de91db64fbef0c58/xgboost-2.1.3-py3-none-macosx_12_0_arm64.whl", hash = "sha256:fcbf1912a852bd07a7007be350c8dc3a484c5e775b612f2b3cd082fc76240eb3", size = 1938631 }, + { url = "https://files.pythonhosted.org/packages/4a/3a/8cd69a216993fd9d54ceb079d1b357b7ef50678b3c2695d8a71962b8d0aa/xgboost-2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:27af88df1162cee016c67f267a0a16c3db1c48f256e12f64c45c8f8edf9571cd", size = 4441261 }, + { url = "https://files.pythonhosted.org/packages/48/bc/05d7db90d421c5e3d681a12fd1eb087e37bf2e9bbe2b105422d6319ecc92/xgboost-2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:32a43526208fe676527c698cb852e0e9515e6d7294143780e476d335290a131b", size = 4532380 }, + { url = "https://files.pythonhosted.org/packages/0f/c8/f679a816c06a4a6d23da3f4b448d5f0615b51de2886ad3e3e695d17121b3/xgboost-2.1.3-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:5d33090880f3d474f8cf5dda557c7bf8dbceefb62f2fd655c77efcabb9cac222", size = 4207000 }, + { url = "https://files.pythonhosted.org/packages/32/93/66826e2f50cefecbb0a44bd1e667316bf0a3c8e78cd1f0cdf52f5b2c5c6f/xgboost-2.1.3-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:8d85d38553855a1f8c40b8fbccca86af19202f91b244e2c7f77afbb2a6d9d785", size = 153894508 }, + { url = "https://files.pythonhosted.org/packages/70/58/2f94976df39470fb00eec2cb4f914dde44cd0df8d96483208bf7db4bc97e/xgboost-2.1.3-py3-none-win_amd64.whl", hash = "sha256:25c0ffcbd62aac5bc22c79e08b5b2edad1d5e37f16610ebefa5f06f3e2ea3d96", size = 124909665 }, +] From 98391e33ea2a87375a7f744eba3d57918237b4e7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 18:40:43 +0100 Subject: [PATCH 149/260] [pre-commit.ci] pre-commit autoupdate (#12428) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.1 → v0.8.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.1...v0.8.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bef251749c19..884b10661a49 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.1 + rev: v0.8.2 hooks: - id: ruff - id: ruff-format From f8e595e048f1cbd763e0a1f8c0ffb4dff335b841 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 20:53:36 +0100 Subject: [PATCH 150/260] [pre-commit.ci] pre-commit autoupdate (#12439) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.2 → v0.8.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.2...v0.8.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 884b10661a49..0c8108ac55be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.2 + rev: v0.8.3 hooks: - id: ruff - id: ruff-format From 4abfce2791c081f65580bc1fefdf5a4d8ee7b5fc Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 06:55:22 +0300 Subject: [PATCH 151/260] Fix sphinx/build_docs warnings for audio_filters (#12449) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for audio_filters * Improve * Fix * Fix * Fix --------- Co-authored-by: MaximSmolskiy --- audio_filters/iir_filter.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py index f3c1ad43b001..fa3e6c54b33f 100644 --- a/audio_filters/iir_filter.py +++ b/audio_filters/iir_filter.py @@ -10,13 +10,17 @@ class IIRFilter: Implementation details: Based on the 2nd-order function from - https://en.wikipedia.org/wiki/Digital_biquad_filter, + https://en.wikipedia.org/wiki/Digital_biquad_filter, this generalized N-order function was made. Using the following transfer function - H(z)=\frac{b_{0}+b_{1}z^{-1}+b_{2}z^{-2}+...+b_{k}z^{-k}}{a_{0}+a_{1}z^{-1}+a_{2}z^{-2}+...+a_{k}z^{-k}} + .. math:: H(z)=\frac{b_{0}+b_{1}z^{-1}+b_{2}z^{-2}+...+b_{k}z^{-k}} + {a_{0}+a_{1}z^{-1}+a_{2}z^{-2}+...+a_{k}z^{-k}} + we can rewrite this to - y[n]={\frac{1}{a_{0}}}\left(\left(b_{0}x[n]+b_{1}x[n-1]+b_{2}x[n-2]+...+b_{k}x[n-k]\right)-\left(a_{1}y[n-1]+a_{2}y[n-2]+...+a_{k}y[n-k]\right)\right) + .. math:: y[n]={\frac{1}{a_{0}}} + \left(\left(b_{0}x[n]+b_{1}x[n-1]+b_{2}x[n-2]+...+b_{k}x[n-k]\right)- + \left(a_{1}y[n-1]+a_{2}y[n-2]+...+a_{k}y[n-k]\right)\right) """ def __init__(self, order: int) -> None: @@ -34,17 +38,19 @@ def __init__(self, order: int) -> None: def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None: """ - Set the coefficients for the IIR filter. These should both be of size order + 1. - a_0 may be left out, and it will use 1.0 as default value. + Set the coefficients for the IIR filter. + These should both be of size `order` + 1. + :math:`a_0` may be left out, and it will use 1.0 as default value. This method works well with scipy's filter design functions - >>> # Make a 2nd-order 1000Hz butterworth lowpass filter - >>> import scipy.signal - >>> b_coeffs, a_coeffs = scipy.signal.butter(2, 1000, - ... btype='lowpass', - ... fs=48000) - >>> filt = IIRFilter(2) - >>> filt.set_coefficients(a_coeffs, b_coeffs) + + >>> # Make a 2nd-order 1000Hz butterworth lowpass filter + >>> import scipy.signal + >>> b_coeffs, a_coeffs = scipy.signal.butter(2, 1000, + ... btype='lowpass', + ... fs=48000) + >>> filt = IIRFilter(2) + >>> filt.set_coefficients(a_coeffs, b_coeffs) """ if len(a_coeffs) < self.order: a_coeffs = [1.0, *a_coeffs] @@ -68,7 +74,7 @@ def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None def process(self, sample: float) -> float: """ - Calculate y[n] + Calculate :math:`y[n]` >>> filt = IIRFilter(2) >>> filt.process(0) From 47cd21a110d8e2fc038414bc7f3c7ca8e91d6653 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 14:56:42 +0300 Subject: [PATCH 152/260] Fix sphinx/build_docs warnings for cellular_automata (#12454) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for cellular_automata * Fix * Improve --------- Co-authored-by: MaximSmolskiy --- cellular_automata/wa_tor.py | 54 ++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/cellular_automata/wa_tor.py b/cellular_automata/wa_tor.py index e423d1595bdb..29f7ea510bfe 100644 --- a/cellular_automata/wa_tor.py +++ b/cellular_automata/wa_tor.py @@ -1,9 +1,9 @@ """ Wa-Tor algorithm (1984) -@ https://en.wikipedia.org/wiki/Wa-Tor -@ https://beltoforion.de/en/wator/ -@ https://beltoforion.de/en/wator/images/wator_medium.webm +| @ https://en.wikipedia.org/wiki/Wa-Tor +| @ https://beltoforion.de/en/wator/ +| @ https://beltoforion.de/en/wator/images/wator_medium.webm This solution aims to completely remove any systematic approach to the Wa-Tor planet, and utilise fully random methods. @@ -97,8 +97,8 @@ class WaTor: :attr time_passed: A function that is called every time time passes (a chronon) in order to visually display - the new Wa-Tor planet. The time_passed function can block - using time.sleep to slow the algorithm progression. + the new Wa-Tor planet. The `time_passed` function can block + using ``time.sleep`` to slow the algorithm progression. >>> wt = WaTor(10, 15) >>> wt.width @@ -216,7 +216,7 @@ def get_surrounding_prey(self, entity: Entity) -> list[Entity]: """ Returns all the prey entities around (N, S, E, W) a predator entity. - Subtly different to the try_to_move_to_unoccupied square. + Subtly different to the `move_and_reproduce`. >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet([ @@ -260,7 +260,7 @@ def move_and_reproduce( """ Attempts to move to an unoccupied neighbouring square in either of the four directions (North, South, East, West). - If the move was successful and the remaining_reproduction time is + If the move was successful and the `remaining_reproduction_time` is equal to 0, then a new prey or predator can also be created in the previous square. @@ -351,12 +351,12 @@ def perform_prey_actions( Performs the actions for a prey entity For prey the rules are: - 1. At each chronon, a prey moves randomly to one of the adjacent unoccupied - squares. If there are no free squares, no movement takes place. - 2. Once a prey has survived a certain number of chronons it may reproduce. - This is done as it moves to a neighbouring square, - leaving behind a new prey in its old position. - Its reproduction time is also reset to zero. + 1. At each chronon, a prey moves randomly to one of the adjacent unoccupied + squares. If there are no free squares, no movement takes place. + 2. Once a prey has survived a certain number of chronons it may reproduce. + This is done as it moves to a neighbouring square, + leaving behind a new prey in its old position. + Its reproduction time is also reset to zero. >>> wt = WaTor(WIDTH, HEIGHT) >>> reproducable_entity = Entity(True, coords=(0, 1)) @@ -382,15 +382,15 @@ def perform_predator_actions( :param occupied_by_prey_coords: Move to this location if there is prey there For predators the rules are: - 1. At each chronon, a predator moves randomly to an adjacent square occupied - by a prey. If there is none, the predator moves to a random adjacent - unoccupied square. If there are no free squares, no movement takes place. - 2. At each chronon, each predator is deprived of a unit of energy. - 3. Upon reaching zero energy, a predator dies. - 4. If a predator moves to a square occupied by a prey, - it eats the prey and earns a certain amount of energy. - 5. Once a predator has survived a certain number of chronons - it may reproduce in exactly the same way as the prey. + 1. At each chronon, a predator moves randomly to an adjacent square occupied + by a prey. If there is none, the predator moves to a random adjacent + unoccupied square. If there are no free squares, no movement takes place. + 2. At each chronon, each predator is deprived of a unit of energy. + 3. Upon reaching zero energy, a predator dies. + 4. If a predator moves to a square occupied by a prey, + it eats the prey and earns a certain amount of energy. + 5. Once a predator has survived a certain number of chronons + it may reproduce in exactly the same way as the prey. >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet([[Entity(True, coords=(0, 0)), Entity(False, coords=(0, 1))]]) @@ -430,7 +430,7 @@ def perform_predator_actions( def run(self, *, iteration_count: int) -> None: """ - Emulate time passing by looping iteration_count times + Emulate time passing by looping `iteration_count` times >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.run(iteration_count=PREDATOR_INITIAL_ENERGY_VALUE - 1) @@ -484,11 +484,9 @@ def visualise(wt: WaTor, iter_number: int, *, colour: bool = True) -> None: an ascii code in terminal to clear and re-print the Wa-Tor planet at intervals. - Uses ascii colour codes to colourfully display - the predators and prey. - - (0x60f197) Prey = # - (0xfffff) Predator = x + Uses ascii colour codes to colourfully display the predators and prey: + * (0x60f197) Prey = ``#`` + * (0xfffff) Predator = ``x`` >>> wt = WaTor(30, 30) >>> wt.set_planet([ From c5e603ae4234e5d516d700b01d47f78d42c18008 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 15:43:16 +0300 Subject: [PATCH 153/260] Fix sphinx/build_docs warnings for geodesy (#12462) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for geodesy/haversine_distance.py * Improve --------- Co-authored-by: MaximSmolskiy --- geodesy/haversine_distance.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py index 93e625770f9d..39cd250af965 100644 --- a/geodesy/haversine_distance.py +++ b/geodesy/haversine_distance.py @@ -21,10 +21,11 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl computation like Haversine can be handy for shorter range distances. Args: - lat1, lon1: latitude and longitude of coordinate 1 - lat2, lon2: latitude and longitude of coordinate 2 + * `lat1`, `lon1`: latitude and longitude of coordinate 1 + * `lat2`, `lon2`: latitude and longitude of coordinate 2 Returns: geographical distance between two points in metres + >>> from collections import namedtuple >>> point_2d = namedtuple("point_2d", "lat lon") >>> SAN_FRANCISCO = point_2d(37.774856, -122.424227) From b0cb13fea54854b3a60eced27026db9a9c5dc5ab Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 16:11:58 +0300 Subject: [PATCH 154/260] Fix sphinx/build_docs warnings for greedy_methods (#12463) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for greedy_methods * Improve --------- Co-authored-by: MaximSmolskiy --- greedy_methods/smallest_range.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/greedy_methods/smallest_range.py b/greedy_methods/smallest_range.py index e2b7f8d7e96a..9adb12bf9029 100644 --- a/greedy_methods/smallest_range.py +++ b/greedy_methods/smallest_range.py @@ -14,12 +14,13 @@ def smallest_range(nums: list[list[int]]) -> list[int]: Uses min heap for efficiency. The range includes at least one number from each list. Args: - nums: List of k sorted integer lists. + `nums`: List of k sorted integer lists. Returns: list: Smallest range as a two-element list. Examples: + >>> smallest_range([[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]) [20, 24] >>> smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) From 04fbfd6eae38b9897c1b8ff6aee487dd2523665b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 03:14:11 +0300 Subject: [PATCH 155/260] Fix sphinx/build_docs warnings for maths/volume (#12464) * Fix sphinx/build_docs warnings for maths/volume * Fix * Fix * Fix * Fix * Fix * Fix * Fix --- maths/volume.py | 149 ++++++++++++++++++++++++++++++------------------ 1 file changed, 95 insertions(+), 54 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index 23fcf6be6ef1..08bdf72b013b 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -1,5 +1,6 @@ """ Find the volume of various shapes. + * https://en.wikipedia.org/wiki/Volume * https://en.wikipedia.org/wiki/Spherical_cap """ @@ -12,6 +13,7 @@ def vol_cube(side_length: float) -> float: """ Calculate the Volume of a Cube. + >>> vol_cube(1) 1.0 >>> vol_cube(3) @@ -33,6 +35,7 @@ def vol_cube(side_length: float) -> float: def vol_spherical_cap(height: float, radius: float) -> float: """ Calculate the volume of the spherical cap. + >>> vol_spherical_cap(1, 2) 5.235987755982988 >>> vol_spherical_cap(1.6, 2.6) @@ -57,20 +60,29 @@ def vol_spherical_cap(height: float, radius: float) -> float: def vol_spheres_intersect( radius_1: float, radius_2: float, centers_distance: float ) -> float: - """ + r""" Calculate the volume of the intersection of two spheres. + The intersection is composed by two spherical caps and therefore its volume is the - sum of the volumes of the spherical caps. First, it calculates the heights (h1, h2) - of the spherical caps, then the two volumes and it returns the sum. + sum of the volumes of the spherical caps. + First, it calculates the heights :math:`(h_1, h_2)` of the spherical caps, + then the two volumes and it returns the sum. The height formulas are - h1 = (radius_1 - radius_2 + centers_distance) - * (radius_1 + radius_2 - centers_distance) - / (2 * centers_distance) - h2 = (radius_2 - radius_1 + centers_distance) - * (radius_2 + radius_1 - centers_distance) - / (2 * centers_distance) - if centers_distance is 0 then it returns the volume of the smallers sphere - :return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) + + .. math:: + h_1 = \frac{(radius_1 - radius_2 + centers\_distance) + \cdot (radius_1 + radius_2 - centers\_distance)} + {2 \cdot centers\_distance} + + h_2 = \frac{(radius_2 - radius_1 + centers\_distance) + \cdot (radius_2 + radius_1 - centers\_distance)} + {2 \cdot centers\_distance} + + if `centers_distance` is 0 then it returns the volume of the smallers sphere + + :return: ``vol_spherical_cap`` (:math:`h_1`, :math:`radius_2`) + + ``vol_spherical_cap`` (:math:`h_2`, :math:`radius_1`) + >>> vol_spheres_intersect(2, 2, 1) 21.205750411731103 >>> vol_spheres_intersect(2.6, 2.6, 1.6) @@ -112,14 +124,18 @@ def vol_spheres_intersect( def vol_spheres_union( radius_1: float, radius_2: float, centers_distance: float ) -> float: - """ + r""" Calculate the volume of the union of two spheres that possibly intersect. - It is the sum of sphere A and sphere B minus their intersection. - First, it calculates the volumes (v1, v2) of the spheres, - then the volume of the intersection (i) and it returns the sum v1+v2-i. - If centers_distance is 0 then it returns the volume of the larger sphere - :return vol_sphere(radius_1) + vol_sphere(radius_2) - - vol_spheres_intersect(radius_1, radius_2, centers_distance) + + It is the sum of sphere :math:`A` and sphere :math:`B` minus their intersection. + First, it calculates the volumes :math:`(v_1, v_2)` of the spheres, + then the volume of the intersection :math:`i` and + it returns the sum :math:`v_1 + v_2 - i`. + If `centers_distance` is 0 then it returns the volume of the larger sphere + + :return: ``vol_sphere`` (:math:`radius_1`) + ``vol_sphere`` (:math:`radius_2`) + - ``vol_spheres_intersect`` + (:math:`radius_1`, :math:`radius_2`, :math:`centers\_distance`) >>> vol_spheres_union(2, 2, 1) 45.814892864851146 @@ -157,7 +173,9 @@ def vol_spheres_union( def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. - :return multiple of width, length and height + + :return: multiple of `width`, `length` and `height` + >>> vol_cuboid(1, 1, 1) 1.0 >>> vol_cuboid(1, 2, 3) @@ -185,10 +203,12 @@ def vol_cuboid(width: float, height: float, length: float) -> float: def vol_cone(area_of_base: float, height: float) -> float: - """ - Calculate the Volume of a Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone - :return (1/3) * area_of_base * height + r""" + | Calculate the Volume of a Cone. + | Wikipedia reference: https://en.wikipedia.org/wiki/Cone + + :return: :math:`\frac{1}{3} \cdot area\_of\_base \cdot height` + >>> vol_cone(10, 3) 10.0 >>> vol_cone(1, 1) @@ -212,10 +232,12 @@ def vol_cone(area_of_base: float, height: float) -> float: def vol_right_circ_cone(radius: float, height: float) -> float: - """ - Calculate the Volume of a Right Circular Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone - :return (1/3) * pi * radius^2 * height + r""" + | Calculate the Volume of a Right Circular Cone. + | Wikipedia reference: https://en.wikipedia.org/wiki/Cone + + :return: :math:`\frac{1}{3} \cdot \pi \cdot radius^2 \cdot height` + >>> vol_right_circ_cone(2, 3) 12.566370614359172 >>> vol_right_circ_cone(0, 0) @@ -237,10 +259,12 @@ def vol_right_circ_cone(radius: float, height: float) -> float: def vol_prism(area_of_base: float, height: float) -> float: - """ - Calculate the Volume of a Prism. - Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) - :return V = Bh + r""" + | Calculate the Volume of a Prism. + | Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) + + :return: :math:`V = B \cdot h` + >>> vol_prism(10, 2) 20.0 >>> vol_prism(11, 1) @@ -264,10 +288,12 @@ def vol_prism(area_of_base: float, height: float) -> float: def vol_pyramid(area_of_base: float, height: float) -> float: - """ - Calculate the Volume of a Pyramid. - Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) - :return (1/3) * Bh + r""" + | Calculate the Volume of a Pyramid. + | Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) + + :return: :math:`\frac{1}{3} \cdot B \cdot h` + >>> vol_pyramid(10, 3) 10.0 >>> vol_pyramid(1.5, 3) @@ -291,10 +317,12 @@ def vol_pyramid(area_of_base: float, height: float) -> float: def vol_sphere(radius: float) -> float: - """ - Calculate the Volume of a Sphere. - Wikipedia reference: https://en.wikipedia.org/wiki/Sphere - :return (4/3) * pi * r^3 + r""" + | Calculate the Volume of a Sphere. + | Wikipedia reference: https://en.wikipedia.org/wiki/Sphere + + :return: :math:`\frac{4}{3} \cdot \pi \cdot r^3` + >>> vol_sphere(5) 523.5987755982989 >>> vol_sphere(1) @@ -315,10 +343,13 @@ def vol_sphere(radius: float) -> float: def vol_hemisphere(radius: float) -> float: - """Calculate the volume of a hemisphere - Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere - Other references: https://www.cuemath.com/geometry/hemisphere - :return 2/3 * pi * radius^3 + r""" + | Calculate the volume of a hemisphere + | Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere + | Other references: https://www.cuemath.com/geometry/hemisphere + + :return: :math:`\frac{2}{3} \cdot \pi \cdot radius^3` + >>> vol_hemisphere(1) 2.0943951023931953 >>> vol_hemisphere(7) @@ -339,9 +370,12 @@ def vol_hemisphere(radius: float) -> float: def vol_circular_cylinder(radius: float, height: float) -> float: - """Calculate the Volume of a Circular Cylinder. - Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder - :return pi * radius^2 * height + r""" + | Calculate the Volume of a Circular Cylinder. + | Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder + + :return: :math:`\pi \cdot radius^2 \cdot height` + >>> vol_circular_cylinder(1, 1) 3.141592653589793 >>> vol_circular_cylinder(4, 3) @@ -368,7 +402,9 @@ def vol_circular_cylinder(radius: float, height: float) -> float: def vol_hollow_circular_cylinder( inner_radius: float, outer_radius: float, height: float ) -> float: - """Calculate the Volume of a Hollow Circular Cylinder. + """ + Calculate the Volume of a Hollow Circular Cylinder. + >>> vol_hollow_circular_cylinder(1, 2, 3) 28.274333882308138 >>> vol_hollow_circular_cylinder(1.6, 2.6, 3.6) @@ -405,8 +441,9 @@ def vol_hollow_circular_cylinder( def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> float: - """Calculate the Volume of a Conical Frustum. - Wikipedia reference: https://en.wikipedia.org/wiki/Frustum + """ + | Calculate the Volume of a Conical Frustum. + | Wikipedia reference: https://en.wikipedia.org/wiki/Frustum >>> vol_conical_frustum(45, 7, 28) 48490.482608158454 @@ -443,9 +480,12 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> floa def vol_torus(torus_radius: float, tube_radius: float) -> float: - """Calculate the Volume of a Torus. - Wikipedia reference: https://en.wikipedia.org/wiki/Torus - :return 2pi^2 * torus_radius * tube_radius^2 + r""" + | Calculate the Volume of a Torus. + | Wikipedia reference: https://en.wikipedia.org/wiki/Torus + + :return: :math:`2 \pi^2 \cdot torus\_radius \cdot tube\_radius^2` + >>> vol_torus(1, 1) 19.739208802178716 >>> vol_torus(4, 3) @@ -471,8 +511,9 @@ def vol_torus(torus_radius: float, tube_radius: float) -> float: def vol_icosahedron(tri_side: float) -> float: - """Calculate the Volume of an Icosahedron. - Wikipedia reference: https://en.wikipedia.org/wiki/Regular_icosahedron + """ + | Calculate the Volume of an Icosahedron. + | Wikipedia reference: https://en.wikipedia.org/wiki/Regular_icosahedron >>> from math import isclose >>> isclose(vol_icosahedron(2.5), 34.088984228514256) From e9721aad59743d01e82582017884db528bad3e21 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 06:06:59 +0300 Subject: [PATCH 156/260] Fix sphinx/build_docs warnings for physics/horizontal_projectile_motion (#12467) --- physics/horizontal_projectile_motion.py | 68 +++++++++++++++---------- 1 file changed, 41 insertions(+), 27 deletions(-) diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index 80f85a1b7146..60f21c2b39c4 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -1,15 +1,18 @@ """ Horizontal Projectile Motion problem in physics. + This algorithm solves a specific problem in which -the motion starts from the ground as can be seen below: - (v = 0) - * * - * * - * * - * * - * * - * * -GROUND GROUND +the motion starts from the ground as can be seen below:: + + (v = 0) + * * + * * + * * + * * + * * + * * + GROUND GROUND + For more info: https://en.wikipedia.org/wiki/Projectile_motion """ @@ -43,14 +46,17 @@ def check_args(init_velocity: float, angle: float) -> None: def horizontal_distance(init_velocity: float, angle: float) -> float: - """ + r""" Returns the horizontal distance that the object cover + Formula: - v_0^2 * sin(2 * alpha) - --------------------- - g - v_0 - initial velocity - alpha - angle + .. math:: + \frac{v_0^2 \cdot \sin(2 \alpha)}{g} + + v_0 - \text{initial velocity} + + \alpha - \text{angle} + >>> horizontal_distance(30, 45) 91.77 >>> horizontal_distance(100, 78) @@ -70,14 +76,17 @@ def horizontal_distance(init_velocity: float, angle: float) -> float: def max_height(init_velocity: float, angle: float) -> float: - """ + r""" Returns the maximum height that the object reach + Formula: - v_0^2 * sin^2(alpha) - -------------------- - 2g - v_0 - initial velocity - alpha - angle + .. math:: + \frac{v_0^2 \cdot \sin^2 (\alpha)}{2 g} + + v_0 - \text{initial velocity} + + \alpha - \text{angle} + >>> max_height(30, 45) 22.94 >>> max_height(100, 78) @@ -97,14 +106,17 @@ def max_height(init_velocity: float, angle: float) -> float: def total_time(init_velocity: float, angle: float) -> float: - """ + r""" Returns total time of the motion + Formula: - 2 * v_0 * sin(alpha) - -------------------- - g - v_0 - initial velocity - alpha - angle + .. math:: + \frac{2 v_0 \cdot \sin (\alpha)}{g} + + v_0 - \text{initial velocity} + + \alpha - \text{angle} + >>> total_time(30, 45) 4.33 >>> total_time(100, 78) @@ -125,6 +137,8 @@ def total_time(init_velocity: float, angle: float) -> float: def test_motion() -> None: """ + Test motion + >>> test_motion() """ v0, angle = 25, 20 From c36aaf0fbcbc0f1a6c82b689ee87e383104b9e96 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 11:48:37 +0300 Subject: [PATCH 157/260] Fix sphinx/build_docs warnings for graphs/check_bipatrite (#12469) * Fix sphinx/build_docs warnings for graphs/check_bipatrite * Fix --- graphs/check_bipatrite.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/graphs/check_bipatrite.py b/graphs/check_bipatrite.py index 10b9cc965251..213f3f9480b5 100644 --- a/graphs/check_bipatrite.py +++ b/graphs/check_bipatrite.py @@ -6,16 +6,17 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: Check if a graph is bipartite using depth-first search (DFS). Args: - graph: Adjacency list representing the graph. + `graph`: Adjacency list representing the graph. Returns: - True if bipartite, False otherwise. + ``True`` if bipartite, ``False`` otherwise. Checks if the graph can be divided into two sets of vertices, such that no two vertices within the same set are connected by an edge. Examples: - # FIXME: This test should pass. + + >>> # FIXME: This test should pass. >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) Traceback (most recent call last): ... @@ -37,7 +38,7 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with KeyError: 4. + >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_dfs({0: [-1, 3], 1: [0, -2]}) @@ -51,7 +52,8 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> # FIXME: This test should fails with + >>> # TypeError: list indices must be integers or... >>> is_bipartite_dfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_dfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) @@ -95,16 +97,17 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: Check if a graph is bipartite using a breadth-first search (BFS). Args: - graph: Adjacency list representing the graph. + `graph`: Adjacency list representing the graph. Returns: - True if bipartite, False otherwise. + ``True`` if bipartite, ``False`` otherwise. Check if the graph can be divided into two sets of vertices, such that no two vertices within the same set are connected by an edge. Examples: - # FIXME: This test should pass. + + >>> # FIXME: This test should pass. >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) Traceback (most recent call last): ... @@ -126,7 +129,7 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with KeyError: 4. + >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_bfs({0: [-1, 3], 1: [0, -2]}) @@ -140,7 +143,8 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> # FIXME: This test should fails with + >>> # TypeError: list indices must be integers or... >>> is_bipartite_bfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_bfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) From ae28fa7fe362c8cb0238dbb6b237d42179e8beb3 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 16:17:22 +0300 Subject: [PATCH 158/260] Fix sphinx/build_docs warnings for data_structures/binary_tree/mirror_binary_tree (#12470) --- .../binary_tree/mirror_binary_tree.py | 52 +++++++++++-------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/data_structures/binary_tree/mirror_binary_tree.py b/data_structures/binary_tree/mirror_binary_tree.py index 62e2f08dd4e0..f6611d66d676 100644 --- a/data_structures/binary_tree/mirror_binary_tree.py +++ b/data_structures/binary_tree/mirror_binary_tree.py @@ -56,6 +56,8 @@ def mirror(self) -> Node: def make_tree_seven() -> Node: r""" Return a binary tree with 7 nodes that looks like this: + :: + 1 / \ 2 3 @@ -81,13 +83,15 @@ def make_tree_seven() -> Node: def make_tree_nine() -> Node: r""" Return a binary tree with 9 nodes that looks like this: - 1 - / \ - 2 3 - / \ \ - 4 5 6 - / \ \ - 7 8 9 + :: + + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 >>> tree_nine = make_tree_nine() >>> len(tree_nine) @@ -117,23 +121,25 @@ def main() -> None: >>> tuple(tree.mirror()) (6, 3, 1, 9, 5, 2, 8, 4, 7) - nine_tree: - 1 - / \ - 2 3 - / \ \ - 4 5 6 - / \ \ - 7 8 9 - - The mirrored tree looks like this: + nine_tree:: + + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 + + The mirrored tree looks like this:: + 1 - / \ - 3 2 - / / \ - 6 5 4 - / / \ - 9 8 7 + / \ + 3 2 + / / \ + 6 5 4 + / / \ + 9 8 7 """ trees = {"zero": Node(0), "seven": make_tree_seven(), "nine": make_tree_nine()} for name, tree in trees.items(): From eb652cf3d48fbd3b51450e95640ce5aec63a066b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 00:18:08 +0300 Subject: [PATCH 159/260] Bump astral-sh/setup-uv from 4 to 5 (#12445) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 4 to 5. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/v4...v5) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- .github/workflows/ruff.yml | 2 +- .github/workflows/sphinx.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a6f308715cc2..62829b2b45a5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: uv.lock diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 84c55335451e..8d51ad8850cf 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - uses: actions/setup-python@v5 with: python-version: 3.x @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - uses: actions/setup-python@v5 with: python-version: 3.x diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 2c6f92fcf7bf..cfe127b3521f 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -12,5 +12,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index e3e2ce81a95d..d02435d98028 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - uses: actions/setup-python@v5 with: python-version: 3.13 From 5bef6ac9296c20250db7d494bbbc9c8bf4bfccdc Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Fri, 27 Dec 2024 23:22:36 +0100 Subject: [PATCH 160/260] Improve coverage special_numbers (#12414) * Improve coverage bell_numbers * improve more function * Update hamming_numbers.py --------- Co-authored-by: Maxim Smolskiy --- maths/special_numbers/bell_numbers.py | 4 ++++ maths/special_numbers/hamming_numbers.py | 6 +++++- maths/special_numbers/harshad_numbers.py | 8 ++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/maths/special_numbers/bell_numbers.py b/maths/special_numbers/bell_numbers.py index 5d99334d7add..d573e7a3962d 100644 --- a/maths/special_numbers/bell_numbers.py +++ b/maths/special_numbers/bell_numbers.py @@ -21,6 +21,10 @@ def bell_numbers(max_set_length: int) -> list[int]: list: A list of Bell numbers for sets of lengths from 0 to max_set_length. Examples: + >>> bell_numbers(-2) + Traceback (most recent call last): + ... + ValueError: max_set_length must be non-negative >>> bell_numbers(0) [1] >>> bell_numbers(1) diff --git a/maths/special_numbers/hamming_numbers.py b/maths/special_numbers/hamming_numbers.py index 4575119c8a95..a473cc93883b 100644 --- a/maths/special_numbers/hamming_numbers.py +++ b/maths/special_numbers/hamming_numbers.py @@ -13,6 +13,10 @@ def hamming(n_element: int) -> list: :param n_element: The number of elements on the list :return: The nth element of the list + >>> hamming(-5) + Traceback (most recent call last): + ... + ValueError: n_element should be a positive number >>> hamming(5) [1, 2, 3, 4, 5] >>> hamming(10) @@ -22,7 +26,7 @@ def hamming(n_element: int) -> list: """ n_element = int(n_element) if n_element < 1: - my_error = ValueError("a should be a positive number") + my_error = ValueError("n_element should be a positive number") raise my_error hamming_list = [1] diff --git a/maths/special_numbers/harshad_numbers.py b/maths/special_numbers/harshad_numbers.py index 61667adfa127..417120bd840e 100644 --- a/maths/special_numbers/harshad_numbers.py +++ b/maths/special_numbers/harshad_numbers.py @@ -11,6 +11,8 @@ def int_to_base(number: int, base: int) -> str: Where 'base' ranges from 2 to 36. Examples: + >>> int_to_base(0, 21) + '0' >>> int_to_base(23, 2) '10111' >>> int_to_base(58, 5) @@ -26,6 +28,10 @@ def int_to_base(number: int, base: int) -> str: Traceback (most recent call last): ... ValueError: 'base' must be between 2 and 36 inclusive + >>> int_to_base(-99, 16) + Traceback (most recent call last): + ... + ValueError: number must be a positive integer """ if base < 2 or base > 36: @@ -101,6 +107,8 @@ def harshad_numbers_in_base(limit: int, base: int) -> list[str]: Traceback (most recent call last): ... ValueError: 'base' must be between 2 and 36 inclusive + >>> harshad_numbers_in_base(-12, 6) + [] """ if base < 2 or base > 36: From 8bbe8caa256882ef2ebdbb3274e6f99f804716bd Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Fri, 27 Dec 2024 23:40:35 +0100 Subject: [PATCH 161/260] Improve test coverage for matrix exponentiation (#12388) * #9943 : Adding coverage test for basic_graphs.py * #9943 : Adding coverage test for basic_graphs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Solve problem of line too long * Improving coverage for matrix_exponentiation.py * fix more than one file * Update matrix_exponentiation.py * Update matrix_exponentiation.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/matrix_exponentiation.py | 36 ++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/maths/matrix_exponentiation.py b/maths/matrix_exponentiation.py index 7c37151c87ca..7cdac9d34674 100644 --- a/maths/matrix_exponentiation.py +++ b/maths/matrix_exponentiation.py @@ -39,6 +39,21 @@ def modular_exponentiation(a, b): def fibonacci_with_matrix_exponentiation(n, f1, f2): + """ + Returns the nth number of the Fibonacci sequence that + starts with f1 and f2 + Uses the matrix exponentiation + >>> fibonacci_with_matrix_exponentiation(1, 5, 6) + 5 + >>> fibonacci_with_matrix_exponentiation(2, 10, 11) + 11 + >>> fibonacci_with_matrix_exponentiation(13, 0, 1) + 144 + >>> fibonacci_with_matrix_exponentiation(10, 5, 9) + 411 + >>> fibonacci_with_matrix_exponentiation(9, 2, 3) + 89 + """ # Trivial Cases if n == 1: return f1 @@ -50,21 +65,34 @@ def fibonacci_with_matrix_exponentiation(n, f1, f2): def simple_fibonacci(n, f1, f2): + """ + Returns the nth number of the Fibonacci sequence that + starts with f1 and f2 + Uses the definition + >>> simple_fibonacci(1, 5, 6) + 5 + >>> simple_fibonacci(2, 10, 11) + 11 + >>> simple_fibonacci(13, 0, 1) + 144 + >>> simple_fibonacci(10, 5, 9) + 411 + >>> simple_fibonacci(9, 2, 3) + 89 + """ # Trivial Cases if n == 1: return f1 elif n == 2: return f2 - fn_1 = f1 - fn_2 = f2 n -= 2 while n > 0: - fn_1, fn_2 = fn_1 + fn_2, fn_1 + f2, f1 = f1 + f2, f2 n -= 1 - return fn_1 + return f2 def matrix_exponentiation_time(): From 76471819bd5b9df6fe5fde4c763396412ce45edc Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Fri, 27 Dec 2024 23:52:40 +0100 Subject: [PATCH 162/260] Improve test coverage for armstrong numbers (#12327) --- maths/special_numbers/armstrong_numbers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/maths/special_numbers/armstrong_numbers.py b/maths/special_numbers/armstrong_numbers.py index b2b4010a8f5b..a3cb69b814de 100644 --- a/maths/special_numbers/armstrong_numbers.py +++ b/maths/special_numbers/armstrong_numbers.py @@ -43,9 +43,9 @@ def armstrong_number(n: int) -> bool: def pluperfect_number(n: int) -> bool: """Return True if n is a pluperfect number or False if it is not - >>> all(armstrong_number(n) for n in PASSING) + >>> all(pluperfect_number(n) for n in PASSING) True - >>> any(armstrong_number(n) for n in FAILING) + >>> any(pluperfect_number(n) for n in FAILING) False """ if not isinstance(n, int) or n < 1: @@ -70,9 +70,9 @@ def pluperfect_number(n: int) -> bool: def narcissistic_number(n: int) -> bool: """Return True if n is a narcissistic number or False if it is not. - >>> all(armstrong_number(n) for n in PASSING) + >>> all(narcissistic_number(n) for n in PASSING) True - >>> any(armstrong_number(n) for n in FAILING) + >>> any(narcissistic_number(n) for n in FAILING) False """ if not isinstance(n, int) or n < 1: From 2ae9534fc68b1901d8056331aa2a4dbedc9d947e Mon Sep 17 00:00:00 2001 From: Anamaria Miranda Date: Sat, 28 Dec 2024 00:03:13 +0100 Subject: [PATCH 163/260] Added test to linear regression (#12353) --- machine_learning/linear_regression.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 839a5366d1cc..1d11e5a9cc2b 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -41,6 +41,14 @@ def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta): :param theta : Feature vector (weight's for our model) ;param return : Updated Feature's, using curr_features - alpha_ * gradient(w.r.t. feature) + >>> import numpy as np + >>> data_x = np.array([[1, 2], [3, 4]]) + >>> data_y = np.array([5, 6]) + >>> len_data = len(data_x) + >>> alpha = 0.01 + >>> theta = np.array([0.1, 0.2]) + >>> run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta) + array([0.196, 0.343]) """ n = len_data @@ -58,6 +66,12 @@ def sum_of_square_error(data_x, data_y, len_data, theta): :param len_data : len of the dataset :param theta : contains the feature vector :return : sum of square error computed from given feature's + + Example: + >>> vc_x = np.array([[1.1], [2.1], [3.1]]) + >>> vc_y = np.array([1.2, 2.2, 3.2]) + >>> round(sum_of_square_error(vc_x, vc_y, 3, np.array([1])),3) + np.float64(0.005) """ prod = np.dot(theta, data_x.transpose()) prod -= data_y.transpose() @@ -93,6 +107,11 @@ def mean_absolute_error(predicted_y, original_y): :param predicted_y : contains the output of prediction (result vector) :param original_y : contains values of expected outcome :return : mean absolute error computed from given feature's + + >>> predicted_y = [3, -0.5, 2, 7] + >>> original_y = [2.5, 0.0, 2, 8] + >>> mean_absolute_error(predicted_y, original_y) + 0.5 """ total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y)) return total / len(original_y) @@ -114,4 +133,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() From 1652d05e9ee25d54eea5576976d537975dcad9bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julia=20Arag=C3=A3o?= <101305675+juliaaragao@users.noreply.github.com> Date: Sat, 28 Dec 2024 00:26:29 +0100 Subject: [PATCH 164/260] adding test to electronics/electric_power.py (#12387) * test electric_power * Update electric_power.py * Update electric_power.py * Update electric_power.py * Update electric_power.py --------- Co-authored-by: Julia Co-authored-by: Maxim Smolskiy --- electronics/electric_power.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/electronics/electric_power.py b/electronics/electric_power.py index 8b92e320ace3..8e3454e39c3f 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -23,20 +23,22 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: >>> electric_power(voltage=2, current=4, power=2) Traceback (most recent call last): ... - ValueError: Only one argument must be 0 + ValueError: Exactly one argument must be 0 >>> electric_power(voltage=0, current=0, power=2) Traceback (most recent call last): ... - ValueError: Only one argument must be 0 + ValueError: Exactly one argument must be 0 >>> electric_power(voltage=0, current=2, power=-4) Traceback (most recent call last): ... ValueError: Power cannot be negative in any electrical/electronics system >>> electric_power(voltage=2.2, current=2.2, power=0) Result(name='power', value=4.84) + >>> electric_power(current=0, power=6, voltage=2) + Result(name='current', value=3.0) """ if (voltage, current, power).count(0) != 1: - raise ValueError("Only one argument must be 0") + raise ValueError("Exactly one argument must be 0") elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" @@ -48,7 +50,7 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: elif power == 0: return Result("power", float(round(abs(voltage * current), 2))) else: - raise ValueError("Exactly one argument must be 0") + raise AssertionError if __name__ == "__main__": From 929b7dc057cd56f90b260cd665fb67886bcadeea Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 28 Dec 2024 11:43:25 +0300 Subject: [PATCH 165/260] Fix Gaussian elimination pivoting (#11393) * updating DIRECTORY.md * Fix Gaussian elimination pivoting * Fix review issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../src/gaussian_elimination_pivoting.py | 39 ++++++++----------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/linear_algebra/src/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting.py index ecaacce19a31..efc1ddd64a2e 100644 --- a/linear_algebra/src/gaussian_elimination_pivoting.py +++ b/linear_algebra/src/gaussian_elimination_pivoting.py @@ -22,40 +22,33 @@ def solve_linear_system(matrix: np.ndarray) -> np.ndarray: >>> solution = solve_linear_system(np.column_stack((A, B))) >>> np.allclose(solution, np.array([2., 3., -1.])) True - >>> solve_linear_system(np.array([[0, 0], [0, 0]], dtype=float)) - array([nan, nan]) + >>> solve_linear_system(np.array([[0, 0, 0]], dtype=float)) + Traceback (most recent call last): + ... + ValueError: Matrix is not square + >>> solve_linear_system(np.array([[0, 0, 0], [0, 0, 0]], dtype=float)) + Traceback (most recent call last): + ... + ValueError: Matrix is singular """ ab = np.copy(matrix) num_of_rows = ab.shape[0] num_of_columns = ab.shape[1] - 1 x_lst: list[float] = [] - # Lead element search - for column_num in range(num_of_rows): - for i in range(column_num, num_of_columns): - if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): - ab[[column_num, i]] = ab[[i, column_num]] - if ab[column_num, column_num] == 0.0: - raise ValueError("Matrix is not correct") - else: - pass - if column_num != 0: - for i in range(column_num, num_of_rows): - ab[i, :] -= ( - ab[i, column_num - 1] - / ab[column_num - 1, column_num - 1] - * ab[column_num - 1, :] - ) + if num_of_rows != num_of_columns: + raise ValueError("Matrix is not square") - # Upper triangular matrix for column_num in range(num_of_rows): + # Lead element search for i in range(column_num, num_of_columns): if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): ab[[column_num, i]] = ab[[i, column_num]] - if ab[column_num, column_num] == 0.0: - raise ValueError("Matrix is not correct") - else: - pass + + # Upper triangular matrix + if abs(ab[column_num, column_num]) < 1e-8: + raise ValueError("Matrix is singular") + if column_num != 0: for i in range(column_num, num_of_rows): ab[i, :] -= ( From b5c8fbf2e8254b53056b741aacce3842736ba177 Mon Sep 17 00:00:00 2001 From: Joy Khandelwal <116290658+joy-programs@users.noreply.github.com> Date: Sat, 28 Dec 2024 14:21:28 +0530 Subject: [PATCH 166/260] Add additional doctests, fix grammatical errors for maths/perfect_number.py (#12477) * Add additional doctests for the perfect number algorithm and fix grammatical errors. Contributes to #9943 * Added newline at End of file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/perfect_number.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/maths/perfect_number.py b/maths/perfect_number.py index df6b6e3d91d8..52c816cc7895 100644 --- a/maths/perfect_number.py +++ b/maths/perfect_number.py @@ -46,17 +46,27 @@ def perfect(number: int) -> bool: False >>> perfect(-1) False + >>> perfect(33550336) # Large perfect number + True + >>> perfect(33550337) # Just above a large perfect number + False + >>> perfect(1) # Edge case: 1 is not a perfect number + False + >>> perfect("123") # String representation of a number + Traceback (most recent call last): + ... + ValueError: number must be an integer >>> perfect(12.34) Traceback (most recent call last): ... - ValueError: number must an integer + ValueError: number must be an integer >>> perfect("Hello") Traceback (most recent call last): ... - ValueError: number must an integer + ValueError: number must be an integer """ if not isinstance(number, int): - raise ValueError("number must an integer") + raise ValueError("number must be an integer") if number <= 0: return False return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number @@ -70,8 +80,7 @@ def perfect(number: int) -> bool: try: number = int(input("Enter a positive integer: ").strip()) except ValueError: - msg = "number must an integer" - print(msg) + msg = "number must be an integer" raise ValueError(msg) print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.") From d496d5611fe266b7123b4a1f3efb4bcb7c2f38f2 Mon Sep 17 00:00:00 2001 From: Shi Entong <144505619+setbit123@users.noreply.github.com> Date: Sat, 28 Dec 2024 17:22:07 +0800 Subject: [PATCH 167/260] Remove inaccessible URL in computer_vision/README.md (#12383) Remove inaccessible URL. --- computer_vision/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/computer_vision/README.md b/computer_vision/README.md index 1657128fd25e..61462567b662 100644 --- a/computer_vision/README.md +++ b/computer_vision/README.md @@ -8,4 +8,3 @@ Image processing and computer vision are a little different from each other. Ima While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision). * -* From 1909f2272f11ebe7626d2dee78c11a91134e39e7 Mon Sep 17 00:00:00 2001 From: jperezr <122382210+MRJPEREZR@users.noreply.github.com> Date: Sat, 28 Dec 2024 11:03:24 +0100 Subject: [PATCH 168/260] adding doctests to maths/trapezoidal_rule.py (#12193) * adding doctests to trapezoidal_rule.py * adding algorithm delta-star transformation * updating DIRECTORY.md * delete file star_delta_transform.py * updating DIRECTORY.md * modified: ../DIRECTORY.md --------- Co-authored-by: MRJPEREZR --- maths/trapezoidal_rule.py | 48 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/maths/trapezoidal_rule.py b/maths/trapezoidal_rule.py index 9a4ddc8af66b..0186629ee378 100644 --- a/maths/trapezoidal_rule.py +++ b/maths/trapezoidal_rule.py @@ -5,13 +5,25 @@ method 1: "extended trapezoidal rule" +int(f) = dx/2 * (f1 + 2f2 + ... + fn) """ def method_1(boundary, steps): - # "extended trapezoidal rule" - # int(f) = dx/2 * (f1 + 2f2 + ... + fn) + """ + Apply the extended trapezoidal rule to approximate the integral of function f(x) + over the interval defined by 'boundary' with the number of 'steps'. + + Args: + boundary (list of floats): A list containing the start and end values [a, b]. + steps (int): The number of steps or subintervals. + Returns: + float: Approximation of the integral of f(x) over [a, b]. + Examples: + >>> method_1([0, 1], 10) + 0.3349999999999999 + """ h = (boundary[1] - boundary[0]) / steps a = boundary[0] b = boundary[1] @@ -26,13 +38,40 @@ def method_1(boundary, steps): def make_points(a, b, h): + """ + Generates points between 'a' and 'b' with step size 'h', excluding the end points. + Args: + a (float): Start value + b (float): End value + h (float): Step size + Examples: + >>> list(make_points(0, 10, 2.5)) + [2.5, 5.0, 7.5] + + >>> list(make_points(0, 10, 2)) + [2, 4, 6, 8] + + >>> list(make_points(1, 21, 5)) + [6, 11, 16] + + >>> list(make_points(1, 5, 2)) + [3] + + >>> list(make_points(1, 4, 3)) + [] + """ x = a + h - while x < (b - h): + while x <= (b - h): yield x x = x + h def f(x): # enter your function here + """ + Example: + >>> f(2) + 4 + """ y = (x - 0) * (x - 0) return y @@ -47,4 +86,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() From 2b58ab040295fbbf2e463ba8cd77ad935d942968 Mon Sep 17 00:00:00 2001 From: Andrwaa <165920381+Andrwaa@users.noreply.github.com> Date: Sat, 28 Dec 2024 12:17:48 +0100 Subject: [PATCH 169/260] compare-method added to Vector class in lib.py (#12448) * compare-method added to Vector class in lib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated lib.py with suggestions * Updated lib.py with suggestions * Updated lib.py with __eq__ method --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- linear_algebra/src/lib.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 5af6c62e3ad4..0d6a348475cd 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -46,7 +46,6 @@ class Vector: change_component(pos: int, value: float): changes specified component euclidean_length(): returns the euclidean length of the vector angle(other: Vector, deg: bool): returns the angle between two vectors - TODO: compare-operator """ def __init__(self, components: Collection[float] | None = None) -> None: @@ -96,6 +95,16 @@ def __sub__(self, other: Vector) -> Vector: else: # error case raise Exception("must have the same size") + def __eq__(self, other: object) -> bool: + """ + performs the comparison between two vectors + """ + if not isinstance(other, Vector): + return NotImplemented + if len(self) != len(other): + return False + return all(self.component(i) == other.component(i) for i in range(len(self))) + @overload def __mul__(self, other: float) -> Vector: ... From 2d68bb50e5f12532b5a0d616305c4f805d2b8ff9 Mon Sep 17 00:00:00 2001 From: KICH Yassine Date: Sun, 29 Dec 2024 12:56:36 +0100 Subject: [PATCH 170/260] Fix split function to handle trailing delimiters correctly (#12423) * Fix split function to handle trailing delimiters correctly * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update split.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- strings/split.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/strings/split.py b/strings/split.py index b62b86d2401f..ed194ec69c2f 100644 --- a/strings/split.py +++ b/strings/split.py @@ -14,6 +14,9 @@ def split(string: str, separator: str = " ") -> list: >>> split("12:43:39",separator = ":") ['12', '43', '39'] + + >>> split(";abbb;;c;", separator=';') + ['', 'abbb', '', 'c', ''] """ split_words = [] @@ -23,7 +26,7 @@ def split(string: str, separator: str = " ") -> list: if char == separator: split_words.append(string[last_index:index]) last_index = index + 1 - elif index + 1 == len(string): + if index + 1 == len(string): split_words.append(string[last_index : index + 1]) return split_words From 972a5c1e432e0a3fa9e990422318269219192a53 Mon Sep 17 00:00:00 2001 From: RajdeepBakolia2004 <144157867+RajdeepBakolia2004@users.noreply.github.com> Date: Sun, 29 Dec 2024 19:05:33 +0530 Subject: [PATCH 171/260] fixed the issue in strings/join.py (#12434) * fixed the issue in strings/join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update join.py * Update join.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- strings/join.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/strings/join.py b/strings/join.py index 5c02f65a20ce..cdcc3a1377f4 100644 --- a/strings/join.py +++ b/strings/join.py @@ -24,6 +24,8 @@ def join(separator: str, separated: list[str]) -> str: 'a' >>> join(" ", ["You", "are", "amazing!"]) 'You are amazing!' + >>> join(",", ["", "", ""]) + ',,' This example should raise an exception for non-string elements: @@ -37,15 +39,33 @@ def join(separator: str, separated: list[str]) -> str: 'apple-banana-cherry' """ - joined = "" + # Check that all elements are strings for word_or_phrase in separated: + # If the element is not a string, raise an exception if not isinstance(word_or_phrase, str): raise Exception("join() accepts only strings") + + joined: str = "" + """ + The last element of the list is not followed by the separator. + So, we need to iterate through the list and join each element + with the separator except the last element. + """ + last_index: int = len(separated) - 1 + """ + Iterate through the list and join each element with the separator. + Except the last element, all other elements are followed by the separator. + """ + for word_or_phrase in separated[:last_index]: + # join the element with the separator. joined += word_or_phrase + separator - # Remove the trailing separator - # by stripping it from the result - return joined.strip(separator) + # If the list is not empty, join the last element. + if separated != []: + joined += separated[last_index] + + # Return the joined string. + return joined if __name__ == "__main__": From d9092d88dd8b47323d14f87025195e0c76fe7889 Mon Sep 17 00:00:00 2001 From: Sankalpa Sarkar <137193167+sanks011@users.noreply.github.com> Date: Sun, 29 Dec 2024 19:23:31 +0530 Subject: [PATCH 172/260] fixes requirements error (#12438) * fixes join.py action * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixes split.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed two requirements * Custom Implementation of join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update split.py * Update join.py * Update join.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4cc83f44987d..b104505e01bc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ beautifulsoup4 -fake_useragent +fake-useragent imageio keras lxml @@ -11,7 +11,7 @@ pillow requests rich scikit-learn -sphinx_pyproject +sphinx-pyproject statsmodels sympy tweepy From bfc804a41c6fb7f3c2e371b15d50ba4830bab3a7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 18:41:28 +0300 Subject: [PATCH 173/260] Fix sphinx/build_docs warnings for physics/newtons_second_law_of_motion (#12480) * Fix sphinx/build_docs warnings for physics/newtons_second_law_of_motion * Fix * Fix * Fix review issue --- physics/newtons_second_law_of_motion.py | 83 ++++++++++++++----------- 1 file changed, 47 insertions(+), 36 deletions(-) diff --git a/physics/newtons_second_law_of_motion.py b/physics/newtons_second_law_of_motion.py index 53fab6ce78b9..4149e2494f31 100644 --- a/physics/newtons_second_law_of_motion.py +++ b/physics/newtons_second_law_of_motion.py @@ -1,18 +1,22 @@ -""" -Description : -Newton's second law of motion pertains to the behavior of objects for which -all existing forces are not balanced. -The second law states that the acceleration of an object is dependent upon two variables -- the net force acting upon the object and the mass of the object. -The acceleration of an object depends directly -upon the net force acting upon the object, -and inversely upon the mass of the object. -As the force acting upon an object is increased, -the acceleration of the object is increased. -As the mass of an object is increased, the acceleration of the object is decreased. +r""" +Description: + Newton's second law of motion pertains to the behavior of objects for which + all existing forces are not balanced. + The second law states that the acceleration of an object is dependent upon + two variables - the net force acting upon the object and the mass of the object. + The acceleration of an object depends directly + upon the net force acting upon the object, + and inversely upon the mass of the object. + As the force acting upon an object is increased, + the acceleration of the object is increased. + As the mass of an object is increased, the acceleration of the object is decreased. + Source: https://www.physicsclassroom.com/class/newtlaws/Lesson-3/Newton-s-Second-Law -Formulation: Fnet = m • a -Diagrammatic Explanation: + +Formulation: F_net = m • a + +Diagrammatic Explanation:: + Forces are unbalanced | | @@ -26,35 +30,42 @@ / \ / \ / \ - __________________ ____ ________________ - |The acceleration | |The acceleration | - |depends directly | |depends inversely | - |on the net Force | |upon the object's | - |_________________| |mass_______________| -Units: -1 Newton = 1 kg X meters / (seconds^2) + __________________ ____________________ + | The acceleration | | The acceleration | + | depends directly | | depends inversely | + | on the net force | | upon the object's | + | | | mass | + |__________________| |____________________| + +Units: 1 Newton = 1 kg • meters/seconds^2 + How to use? -Inputs: - ___________________________________________________ - |Name | Units | Type | - |-------------|-------------------------|-----------| - |mass | (in kgs) | float | - |-------------|-------------------------|-----------| - |acceleration | (in meters/(seconds^2)) | float | - |_____________|_________________________|___________| - -Output: - ___________________________________________________ - |Name | Units | Type | - |-------------|-------------------------|-----------| - |force | (in Newtons) | float | - |_____________|_________________________|___________| + +Inputs:: + + ______________ _____________________ ___________ + | Name | Units | Type | + |--------------|---------------------|-----------| + | mass | in kgs | float | + |--------------|---------------------|-----------| + | acceleration | in meters/seconds^2 | float | + |______________|_____________________|___________| + +Output:: + + ______________ _______________________ ___________ + | Name | Units | Type | + |--------------|-----------------------|-----------| + | force | in Newtons | float | + |______________|_______________________|___________| """ def newtons_second_law_of_motion(mass: float, acceleration: float) -> float: """ + Calculates force from `mass` and `acceleration` + >>> newtons_second_law_of_motion(10, 10) 100 >>> newtons_second_law_of_motion(2.0, 1) From c93288389d220297f972137293f4565c62131516 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 29 Dec 2024 18:16:45 +0100 Subject: [PATCH 174/260] [pre-commit.ci] pre-commit autoupdate (#12466) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.3 → v0.8.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.3...v0.8.4) - [github.com/pre-commit/mirrors-mypy: v1.13.0 → v1.14.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.13.0...v1.14.0) * Update convert_number_to_words.py * Update convert_number_to_words.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- conversions/convert_number_to_words.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0c8108ac55be..71ac72c29b5f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.3 + rev: v0.8.4 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.13.0 + rev: v1.14.0 hooks: - id: mypy args: diff --git a/conversions/convert_number_to_words.py b/conversions/convert_number_to_words.py index dbab44c72e1f..6aa43738b9fe 100644 --- a/conversions/convert_number_to_words.py +++ b/conversions/convert_number_to_words.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import ClassVar, Literal +from typing import Literal class NumberingSystem(Enum): @@ -54,7 +54,7 @@ def max_value(cls, system: str) -> int: class NumberWords(Enum): - ONES: ClassVar[dict[int, str]] = { + ONES = { # noqa: RUF012 0: "", 1: "one", 2: "two", @@ -67,7 +67,7 @@ class NumberWords(Enum): 9: "nine", } - TEENS: ClassVar[dict[int, str]] = { + TEENS = { # noqa: RUF012 0: "ten", 1: "eleven", 2: "twelve", @@ -80,7 +80,7 @@ class NumberWords(Enum): 9: "nineteen", } - TENS: ClassVar[dict[int, str]] = { + TENS = { # noqa: RUF012 2: "twenty", 3: "thirty", 4: "forty", From bfb0447efb73dd049c6a56331cea36cb1345686b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 20:29:48 +0300 Subject: [PATCH 175/260] Fix sphinx/build_docs warnings for maths/zellers_congruence (#12481) * Fix sphinx/build_docs warnings for maths/zellers_congruence * Fix --- maths/zellers_congruence.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py index 483fb000f86b..b958ed3b8659 100644 --- a/maths/zellers_congruence.py +++ b/maths/zellers_congruence.py @@ -4,13 +4,14 @@ def zeller(date_input: str) -> str: """ - Zellers Congruence Algorithm - Find the day of the week for nearly any Gregorian or Julian calendar date + | Zellers Congruence Algorithm + | Find the day of the week for nearly any Gregorian or Julian calendar date >>> zeller('01-31-2010') 'Your date 01-31-2010, is a Sunday!' - Validate out of range month + Validate out of range month: + >>> zeller('13-31-2010') Traceback (most recent call last): ... @@ -21,6 +22,7 @@ def zeller(date_input: str) -> str: ValueError: invalid literal for int() with base 10: '.2' Validate out of range date: + >>> zeller('01-33-2010') Traceback (most recent call last): ... @@ -31,30 +33,35 @@ def zeller(date_input: str) -> str: ValueError: invalid literal for int() with base 10: '.4' Validate second separator: + >>> zeller('01-31*2010') Traceback (most recent call last): ... ValueError: Date separator must be '-' or '/' Validate first separator: + >>> zeller('01^31-2010') Traceback (most recent call last): ... ValueError: Date separator must be '-' or '/' Validate out of range year: + >>> zeller('01-31-8999') Traceback (most recent call last): ... ValueError: Year out of range. There has to be some sort of limit...right? Test null input: + >>> zeller() Traceback (most recent call last): ... TypeError: zeller() missing 1 required positional argument: 'date_input' - Test length of date_input: + Test length of `date_input`: + >>> zeller('') Traceback (most recent call last): ... From ce036db2131626b86b94ab87854c82a9bc6c3d0e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 23:01:15 +0300 Subject: [PATCH 176/260] Fix sphinx/build_docs warnings for physics/speeds_of_gas_molecules (#12471) * Fix sphinx/build_docs warnings for physics/speeds_of_gas_molecules * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix review issue * Fix * Fix * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/speeds_of_gas_molecules.py | 36 ++++++++++++++++-------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/physics/speeds_of_gas_molecules.py b/physics/speeds_of_gas_molecules.py index a50d1c0f6d76..42f90a9fd6f3 100644 --- a/physics/speeds_of_gas_molecules.py +++ b/physics/speeds_of_gas_molecules.py @@ -4,43 +4,43 @@ distribution is a probability distribution that describes the distribution of speeds of particles in an ideal gas. -The distribution is given by the following equation: +The distribution is given by the following equation:: ------------------------------------------------- | f(v) = (M/2πRT)^(3/2) * 4πv^2 * e^(-Mv^2/2RT) | ------------------------------------------------- where: - f(v) is the fraction of molecules with a speed v - M is the molar mass of the gas in kg/mol - R is the gas constant - T is the absolute temperature + * ``f(v)`` is the fraction of molecules with a speed ``v`` + * ``M`` is the molar mass of the gas in kg/mol + * ``R`` is the gas constant + * ``T`` is the absolute temperature More information about the Maxwell-Boltzmann distribution can be found here: https://en.wikipedia.org/wiki/Maxwell%E2%80%93Boltzmann_distribution The average speed can be calculated by integrating the Maxwell-Boltzmann distribution -from 0 to infinity and dividing by the total number of molecules. The result is: +from 0 to infinity and dividing by the total number of molecules. The result is:: - --------------------- - | vavg = √(8RT/πM) | - --------------------- + ---------------------- + | v_avg = √(8RT/πM) | + ---------------------- The most probable speed is the speed at which the Maxwell-Boltzmann distribution is at its maximum. This can be found by differentiating the Maxwell-Boltzmann -distribution with respect to v and setting the result equal to zero. The result is: +distribution with respect to ``v`` and setting the result equal to zero. The result is:: - --------------------- - | vmp = √(2RT/M) | - --------------------- + ---------------------- + | v_mp = √(2RT/M) | + ---------------------- The root-mean-square speed is another measure of the average speed of the molecules in a gas. It is calculated by taking the square root -of the average of the squares of the speeds of the molecules. The result is: +of the average of the squares of the speeds of the molecules. The result is:: - --------------------- - | vrms = √(3RT/M) | - --------------------- + ---------------------- + | v_rms = √(3RT/M) | + ---------------------- Here we have defined functions to calculate the average and most probable speeds of molecules in a gas given the @@ -57,6 +57,7 @@ def avg_speed_of_molecule(temperature: float, molar_mass: float) -> float: and returns the average speed of a molecule in the gas (in m/s). Examples: + >>> avg_speed_of_molecule(273, 0.028) # nitrogen at 273 K 454.3488755020387 >>> avg_speed_of_molecule(300, 0.032) # oxygen at 300 K @@ -84,6 +85,7 @@ def mps_speed_of_molecule(temperature: float, molar_mass: float) -> float: and returns the most probable speed of a molecule in the gas (in m/s). Examples: + >>> mps_speed_of_molecule(273, 0.028) # nitrogen at 273 K 402.65620701908966 >>> mps_speed_of_molecule(300, 0.032) # oxygen at 300 K From 3622e940c9db74ebac06a5b12f83fd638d7c5511 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 23:31:53 +0300 Subject: [PATCH 177/260] Fix sphinx/build_docs warnings for other (#12482) * Fix sphinx/build_docs warnings for other * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/bankers_algorithm.py | 16 +++-- other/davis_putnam_logemann_loveland.py | 94 ++++++++++++++----------- other/scoring_algorithm.py | 30 ++++---- 3 files changed, 77 insertions(+), 63 deletions(-) diff --git a/other/bankers_algorithm.py b/other/bankers_algorithm.py index d4254f479a4f..b1da851fc0f3 100644 --- a/other/bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -10,9 +10,10 @@ predetermined maximum possible amounts of all resources, and then makes a "s-state" check to test for possible deadlock conditions for all other pending activities, before deciding whether allocation should be allowed to continue. -[Source] Wikipedia -[Credit] Rosetta Code C implementation helped very much. - (https://rosettacode.org/wiki/Banker%27s_algorithm) + +| [Source] Wikipedia +| [Credit] Rosetta Code C implementation helped very much. +| (https://rosettacode.org/wiki/Banker%27s_algorithm) """ from __future__ import annotations @@ -75,7 +76,7 @@ def __available_resources(self) -> list[int]: def __need(self) -> list[list[int]]: """ Implement safety checker that calculates the needs by ensuring that - max_claim[i][j] - alloc_table[i][j] <= avail[j] + ``max_claim[i][j] - alloc_table[i][j] <= avail[j]`` """ return [ list(np.array(self.__maximum_claim_table[i]) - np.array(allocated_resource)) @@ -86,7 +87,9 @@ def __need_index_manager(self) -> dict[int, list[int]]: """ This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" - Return: {0: [a: int, b: int], 1: [c: int, d: int]} + + :Return: {0: [a: int, b: int], 1: [c: int, d: int]} + >>> index_control = BankersAlgorithm( ... test_claim_vector, test_allocated_res_table, test_maximum_claim_table ... )._BankersAlgorithm__need_index_manager() @@ -100,7 +103,8 @@ def __need_index_manager(self) -> dict[int, list[int]]: def main(self, **kwargs) -> None: """ Utilize various methods in this class to simulate the Banker's algorithm - Return: None + :Return: None + >>> BankersAlgorithm(test_claim_vector, test_allocated_res_table, ... test_maximum_claim_table).main(describe=True) Allocated Resource Table diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 0f3100b1bc2e..e95bf371a817 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -17,13 +17,15 @@ class Clause: """ - A clause represented in Conjunctive Normal Form. - A clause is a set of literals, either complemented or otherwise. + | A clause represented in Conjunctive Normal Form. + | A clause is a set of literals, either complemented or otherwise. + For example: - {A1, A2, A3'} is the clause (A1 v A2 v A3') - {A5', A2', A1} is the clause (A5' v A2' v A1) + * {A1, A2, A3'} is the clause (A1 v A2 v A3') + * {A5', A2', A1} is the clause (A5' v A2' v A1) Create model + >>> clause = Clause(["A1", "A2'", "A3"]) >>> clause.evaluate({"A1": True}) True @@ -39,6 +41,7 @@ def __init__(self, literals: list[str]) -> None: def __str__(self) -> str: """ To print a clause as in Conjunctive Normal Form. + >>> str(Clause(["A1", "A2'", "A3"])) "{A1 , A2' , A3}" """ @@ -47,6 +50,7 @@ def __str__(self) -> str: def __len__(self) -> int: """ To print a clause as in Conjunctive Normal Form. + >>> len(Clause([])) 0 >>> len(Clause(["A1", "A2'", "A3"])) @@ -72,11 +76,13 @@ def assign(self, model: dict[str, bool | None]) -> None: def evaluate(self, model: dict[str, bool | None]) -> bool | None: """ Evaluates the clause with the assignments in model. + This has the following steps: - 1. Return True if both a literal and its complement exist in the clause. - 2. Return True if a single literal has the assignment True. - 3. Return None(unable to complete evaluation) if a literal has no assignment. - 4. Compute disjunction of all values assigned in clause. + 1. Return ``True`` if both a literal and its complement exist in the clause. + 2. Return ``True`` if a single literal has the assignment ``True``. + 3. Return ``None`` (unable to complete evaluation) + if a literal has no assignment. + 4. Compute disjunction of all values assigned in clause. """ for literal in self.literals: symbol = literal.rstrip("'") if literal.endswith("'") else literal + "'" @@ -92,10 +98,10 @@ def evaluate(self, model: dict[str, bool | None]) -> bool | None: class Formula: """ - A formula represented in Conjunctive Normal Form. - A formula is a set of clauses. - For example, - {{A1, A2, A3'}, {A5', A2', A1}} is ((A1 v A2 v A3') and (A5' v A2' v A1)) + | A formula represented in Conjunctive Normal Form. + | A formula is a set of clauses. + | For example, + | {{A1, A2, A3'}, {A5', A2', A1}} is ((A1 v A2 v A3') and (A5' v A2' v A1)) """ def __init__(self, clauses: Iterable[Clause]) -> None: @@ -107,7 +113,8 @@ def __init__(self, clauses: Iterable[Clause]) -> None: def __str__(self) -> str: """ To print a formula as in Conjunctive Normal Form. - str(Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])])) + + >>> str(Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])])) "{{A1 , A2' , A3} , {A5' , A2' , A1}}" """ return "{" + " , ".join(str(clause) for clause in self.clauses) + "}" @@ -115,8 +122,8 @@ def __str__(self) -> str: def generate_clause() -> Clause: """ - Randomly generate a clause. - All literals have the name Ax, where x is an integer from 1 to 5. + | Randomly generate a clause. + | All literals have the name Ax, where x is an integer from ``1`` to ``5``. """ literals = [] no_of_literals = random.randint(1, 5) @@ -149,11 +156,12 @@ def generate_formula() -> Formula: def generate_parameters(formula: Formula) -> tuple[list[Clause], list[str]]: """ - Return the clauses and symbols from a formula. - A symbol is the uncomplemented form of a literal. + | Return the clauses and symbols from a formula. + | A symbol is the uncomplemented form of a literal. + For example, - Symbol of A3 is A3. - Symbol of A5' is A5. + * Symbol of A3 is A3. + * Symbol of A5' is A5. >>> formula = Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])]) >>> clauses, symbols = generate_parameters(formula) @@ -177,21 +185,20 @@ def find_pure_symbols( clauses: list[Clause], symbols: list[str], model: dict[str, bool | None] ) -> tuple[list[str], dict[str, bool | None]]: """ - Return pure symbols and their values to satisfy clause. - Pure symbols are symbols in a formula that exist only - in one form, either complemented or otherwise. - For example, - { { A4 , A3 , A5' , A1 , A3' } , { A4 } , { A3 } } has - pure symbols A4, A5' and A1. + | Return pure symbols and their values to satisfy clause. + | Pure symbols are symbols in a formula that exist only in one form, + | either complemented or otherwise. + | For example, + | {{A4 , A3 , A5' , A1 , A3'} , {A4} , {A3}} has pure symbols A4, A5' and A1. + This has the following steps: - 1. Ignore clauses that have already evaluated to be True. - 2. Find symbols that occur only in one form in the rest of the clauses. - 3. Assign value True or False depending on whether the symbols occurs - in normal or complemented form respectively. + 1. Ignore clauses that have already evaluated to be ``True``. + 2. Find symbols that occur only in one form in the rest of the clauses. + 3. Assign value ``True`` or ``False`` depending on whether the symbols occurs + in normal or complemented form respectively. >>> formula = Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])]) >>> clauses, symbols = generate_parameters(formula) - >>> pure_symbols, values = find_pure_symbols(clauses, symbols, {}) >>> pure_symbols ['A1', 'A2', 'A3', 'A5'] @@ -231,20 +238,21 @@ def find_unit_clauses( ) -> tuple[list[str], dict[str, bool | None]]: """ Returns the unit symbols and their values to satisfy clause. + Unit symbols are symbols in a formula that are: - - Either the only symbol in a clause - - Or all other literals in that clause have been assigned False + - Either the only symbol in a clause + - Or all other literals in that clause have been assigned ``False`` + This has the following steps: - 1. Find symbols that are the only occurrences in a clause. - 2. Find symbols in a clause where all other literals are assigned False. - 3. Assign True or False depending on whether the symbols occurs in - normal or complemented form respectively. + 1. Find symbols that are the only occurrences in a clause. + 2. Find symbols in a clause where all other literals are assigned ``False``. + 3. Assign ``True`` or ``False`` depending on whether the symbols occurs in + normal or complemented form respectively. >>> clause1 = Clause(["A4", "A3", "A5'", "A1", "A3'"]) >>> clause2 = Clause(["A4"]) >>> clause3 = Clause(["A3"]) >>> clauses, symbols = generate_parameters(Formula([clause1, clause2, clause3])) - >>> unit_clauses, values = find_unit_clauses(clauses, {}) >>> unit_clauses ['A4', 'A3'] @@ -278,16 +286,16 @@ def dpll_algorithm( clauses: list[Clause], symbols: list[str], model: dict[str, bool | None] ) -> tuple[bool | None, dict[str, bool | None] | None]: """ - Returns the model if the formula is satisfiable, else None + Returns the model if the formula is satisfiable, else ``None`` + This has the following steps: - 1. If every clause in clauses is True, return True. - 2. If some clause in clauses is False, return False. - 3. Find pure symbols. - 4. Find unit symbols. + 1. If every clause in clauses is ``True``, return ``True``. + 2. If some clause in clauses is ``False``, return ``False``. + 3. Find pure symbols. + 4. Find unit symbols. >>> formula = Formula([Clause(["A4", "A3", "A5'", "A1", "A3'"]), Clause(["A4"])]) >>> clauses, symbols = generate_parameters(formula) - >>> soln, model = dpll_algorithm(clauses, symbols, {}) >>> soln True diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index af04f432e433..0185d7a2e0c0 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -1,25 +1,26 @@ """ -developed by: markmelnic -original repo: https://github.com/markmelnic/Scoring-Algorithm +| developed by: markmelnic +| original repo: https://github.com/markmelnic/Scoring-Algorithm Analyse data using a range based percentual proximity algorithm and calculate the linear maximum likelihood estimation. The basic principle is that all values supplied will be broken -down to a range from 0 to 1 and each column's score will be added +down to a range from ``0`` to ``1`` and each column's score will be added up to get the total score. -========== Example for data of vehicles -price|mileage|registration_year -20k |60k |2012 -22k |50k |2011 -23k |90k |2015 -16k |210k |2010 +:: + + price|mileage|registration_year + 20k |60k |2012 + 22k |50k |2011 + 23k |90k |2015 + 16k |210k |2010 We want the vehicle with the lowest price, lowest mileage but newest registration year. Thus the weights for each column are as follows: -[0, 0, 1] +``[0, 0, 1]`` """ @@ -97,10 +98,11 @@ def procentual_proximity( source_data: list[list[float]], weights: list[int] ) -> list[list[float]]: """ - weights - int list - possible values - 0 / 1 - 0 if lower values have higher weight in the data set - 1 if higher values have higher weight in the data set + | `weights` - ``int`` list + | possible values - ``0`` / ``1`` + + * ``0`` if lower values have higher weight in the data set + * ``1`` if higher values have higher weight in the data set >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] From 94b3777936101bcc592fc5ef143ac08ad49195e7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 30 Dec 2024 00:35:34 +0300 Subject: [PATCH 178/260] Fix sphinx/build_docs warnings for linear_algebra (#12483) * Fix sphinx/build_docs warnings for linear_algebra/ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- linear_algebra/gaussian_elimination.py | 28 +++++++++++++------ linear_algebra/lu_decomposition.py | 20 +++++++------ .../src/gaussian_elimination_pivoting.py | 7 +++-- linear_algebra/src/rank_of_matrix.py | 6 +++- linear_algebra/src/schur_complement.py | 13 +++++---- linear_algebra/src/transformations_2d.py | 14 ++++++---- 6 files changed, 54 insertions(+), 34 deletions(-) diff --git a/linear_algebra/gaussian_elimination.py b/linear_algebra/gaussian_elimination.py index 724773c0db98..6f4075b710fd 100644 --- a/linear_algebra/gaussian_elimination.py +++ b/linear_algebra/gaussian_elimination.py @@ -1,6 +1,6 @@ """ -Gaussian elimination method for solving a system of linear equations. -Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination +| Gaussian elimination method for solving a system of linear equations. +| Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination """ import numpy as np @@ -13,12 +13,17 @@ def retroactive_resolution( ) -> NDArray[float64]: """ This function performs a retroactive linear system resolution - for triangular matrix + for triangular matrix Examples: - 2x1 + 2x2 - 1x3 = 5 2x1 + 2x2 = -1 - 0x1 - 2x2 - 1x3 = -7 0x1 - 2x2 = -1 - 0x1 + 0x2 + 5x3 = 15 + 1. + * 2x1 + 2x2 - 1x3 = 5 + * 0x1 - 2x2 - 1x3 = -7 + * 0x1 + 0x2 + 5x3 = 15 + 2. + * 2x1 + 2x2 = -1 + * 0x1 - 2x2 = -1 + >>> gaussian_elimination([[2, 2, -1], [0, -2, -1], [0, 0, 5]], [[5], [-7], [15]]) array([[2.], [2.], @@ -45,9 +50,14 @@ def gaussian_elimination( This function performs Gaussian elimination method Examples: - 1x1 - 4x2 - 2x3 = -2 1x1 + 2x2 = 5 - 5x1 + 2x2 - 2x3 = -3 5x1 + 2x2 = 5 - 1x1 - 1x2 + 0x3 = 4 + 1. + * 1x1 - 4x2 - 2x3 = -2 + * 5x1 + 2x2 - 2x3 = -3 + * 1x1 - 1x2 + 0x3 = 4 + 2. + * 1x1 + 2x2 = 5 + * 5x1 + 2x2 = 5 + >>> gaussian_elimination([[1, -4, -2], [5, 2, -2], [1, -1, 0]], [[-2], [-3], [4]]) array([[ 2.3 ], [-1.7 ], diff --git a/linear_algebra/lu_decomposition.py b/linear_algebra/lu_decomposition.py index 3620674835cd..3d89b53a48fb 100644 --- a/linear_algebra/lu_decomposition.py +++ b/linear_algebra/lu_decomposition.py @@ -2,13 +2,14 @@ Lower-upper (LU) decomposition factors a matrix as a product of a lower triangular matrix and an upper triangular matrix. A square matrix has an LU decomposition under the following conditions: + - If the matrix is invertible, then it has an LU decomposition if and only - if all of its leading principal minors are non-zero (see - https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of - leading principal minors of a matrix). + if all of its leading principal minors are non-zero (see + https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of + leading principal minors of a matrix). - If the matrix is singular (i.e., not invertible) and it has a rank of k - (i.e., it has k linearly independent columns), then it has an LU - decomposition if its first k leading principal minors are non-zero. + (i.e., it has k linearly independent columns), then it has an LU + decomposition if its first k leading principal minors are non-zero. This algorithm will simply attempt to perform LU decomposition on any square matrix and raise an error if no such decomposition exists. @@ -25,6 +26,7 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray """ Perform LU decomposition on a given matrix and raises an error if the matrix isn't square or if no such decomposition exists + >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) >>> lower_mat @@ -45,7 +47,7 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray array([[ 4. , 3. ], [ 0. , -1.5]]) - # Matrix is not square + >>> # Matrix is not square >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): @@ -54,14 +56,14 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray [[ 2 -2 1] [ 0 1 2]] - # Matrix is invertible, but its first leading principal minor is 0 + >>> # Matrix is invertible, but its first leading principal minor is 0 >>> matrix = np.array([[0, 1], [1, 0]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): ... ArithmeticError: No LU decomposition exists - # Matrix is singular, but its first leading principal minor is 1 + >>> # Matrix is singular, but its first leading principal minor is 1 >>> matrix = np.array([[1, 0], [1, 0]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) >>> lower_mat @@ -71,7 +73,7 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray array([[1., 0.], [0., 0.]]) - # Matrix is singular, but its first leading principal minor is 0 + >>> # Matrix is singular, but its first leading principal minor is 0 >>> matrix = np.array([[0, 1], [0, 1]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): diff --git a/linear_algebra/src/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting.py index efc1ddd64a2e..540f57b0cff6 100644 --- a/linear_algebra/src/gaussian_elimination_pivoting.py +++ b/linear_algebra/src/gaussian_elimination_pivoting.py @@ -6,17 +6,18 @@ def solve_linear_system(matrix: np.ndarray) -> np.ndarray: Solve a linear system of equations using Gaussian elimination with partial pivoting Args: - - matrix: Coefficient matrix with the last column representing the constants. + - `matrix`: Coefficient matrix with the last column representing the constants. Returns: - - Solution vector. + - Solution vector. Raises: - - ValueError: If the matrix is not correct (i.e., singular). + - ``ValueError``: If the matrix is not correct (i.e., singular). https://courses.engr.illinois.edu/cs357/su2013/lect.htm Lecture 7 Example: + >>> A = np.array([[2, 1, -1], [-3, -1, 2], [-2, 1, 2]], dtype=float) >>> B = np.array([8, -11, -3], dtype=float) >>> solution = solve_linear_system(np.column_stack((A, B))) diff --git a/linear_algebra/src/rank_of_matrix.py b/linear_algebra/src/rank_of_matrix.py index 7ff3c1699a69..2c4fe2a8d1da 100644 --- a/linear_algebra/src/rank_of_matrix.py +++ b/linear_algebra/src/rank_of_matrix.py @@ -8,11 +8,15 @@ def rank_of_matrix(matrix: list[list[int | float]]) -> int: """ Finds the rank of a matrix. + Args: - matrix: The matrix as a list of lists. + `matrix`: The matrix as a list of lists. + Returns: The rank of the matrix. + Example: + >>> matrix1 = [[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9]] diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 7c79bb70abfc..74ac75e3fce2 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -12,13 +12,14 @@ def schur_complement( ) -> np.ndarray: """ Schur complement of a symmetric matrix X given as a 2x2 block matrix - consisting of matrices A, B and C. - Matrix A must be quadratic and non-singular. - In case A is singular, a pseudo-inverse may be provided using - the pseudo_inv argument. + consisting of matrices `A`, `B` and `C`. + Matrix `A` must be quadratic and non-singular. + In case `A` is singular, a pseudo-inverse may be provided using + the `pseudo_inv` argument. + + | Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement + | See also Convex Optimization - Boyd and Vandenberghe, A.5.5 - Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement - See also Convex Optimization - Boyd and Vandenberghe, A.5.5 >>> import numpy as np >>> a = np.array([[1, 2], [2, 1]]) >>> b = np.array([[0, 3], [3, 0]]) diff --git a/linear_algebra/src/transformations_2d.py b/linear_algebra/src/transformations_2d.py index b4185cd2848f..5dee59024752 100644 --- a/linear_algebra/src/transformations_2d.py +++ b/linear_algebra/src/transformations_2d.py @@ -3,13 +3,15 @@ I have added the codes for reflection, projection, scaling and rotation 2D matrices. +.. code-block:: python + scaling(5) = [[5.0, 0.0], [0.0, 5.0]] - rotation(45) = [[0.5253219888177297, -0.8509035245341184], - [0.8509035245341184, 0.5253219888177297]] -projection(45) = [[0.27596319193541496, 0.446998331800279], - [0.446998331800279, 0.7240368080645851]] -reflection(45) = [[0.05064397763545947, 0.893996663600558], - [0.893996663600558, 0.7018070490682369]] + rotation(45) = [[0.5253219888177297, -0.8509035245341184], + [0.8509035245341184, 0.5253219888177297]] + projection(45) = [[0.27596319193541496, 0.446998331800279], + [0.446998331800279, 0.7240368080645851]] + reflection(45) = [[0.05064397763545947, 0.893996663600558], + [0.893996663600558, 0.7018070490682369]] """ from math import cos, sin From f45e392cf6e94259eca8c47b13cd3ae22bcd901e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 30 Dec 2024 12:56:24 +0300 Subject: [PATCH 179/260] Fix sphinx/build_docs warnings for ciphers (#12485) * Fix sphinx/build_docs warnings for ciphers * Fix --- ciphers/autokey.py | 7 +- ciphers/caesar_cipher.py | 77 ++++++++++------- ciphers/decrypt_caesar_with_chi_squared.py | 99 +++++++++++----------- ciphers/enigma_machine2.py | 64 +++++++------- ciphers/rsa_factorization.py | 13 +-- ciphers/simple_keyword_cypher.py | 10 ++- ciphers/trifid_cipher.py | 25 +++--- 7 files changed, 170 insertions(+), 125 deletions(-) diff --git a/ciphers/autokey.py b/ciphers/autokey.py index 05d8c066b139..7751a32d7546 100644 --- a/ciphers/autokey.py +++ b/ciphers/autokey.py @@ -1,5 +1,6 @@ """ https://en.wikipedia.org/wiki/Autokey_cipher + An autokey cipher (also known as the autoclave cipher) is a cipher that incorporates the message (the plaintext) into the key. The key is generated from the message in some automated fashion, @@ -10,8 +11,9 @@ def encrypt(plaintext: str, key: str) -> str: """ - Encrypt a given plaintext (string) and key (string), returning the + Encrypt a given `plaintext` (string) and `key` (string), returning the encrypted ciphertext. + >>> encrypt("hello world", "coffee") 'jsqqs avvwo' >>> encrypt("coffee is good as python", "TheAlgorithms") @@ -74,8 +76,9 @@ def encrypt(plaintext: str, key: str) -> str: def decrypt(ciphertext: str, key: str) -> str: """ - Decrypt a given ciphertext (string) and key (string), returning the decrypted + Decrypt a given `ciphertext` (string) and `key` (string), returning the decrypted ciphertext. + >>> decrypt("jsqqs avvwo", "coffee") 'hello world' >>> decrypt("vvjfpk wj ohvp su ddylsv", "TheAlgorithms") diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index d19b9a337221..9c096fe8a7da 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -7,24 +7,29 @@ def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str: """ encrypt ======= + Encodes a given string with the caesar cipher and returns the encoded message Parameters: ----------- - * input_string: the plain-text that needs to be encoded - * key: the number of letters to shift the message by + + * `input_string`: the plain-text that needs to be encoded + * `key`: the number of letters to shift the message by Optional: - * alphabet (None): the alphabet used to encode the cipher, if not + + * `alphabet` (``None``): the alphabet used to encode the cipher, if not specified, the standard english alphabet with upper and lowercase letters is used Returns: + * A string containing the encoded cipher-text More on the caesar cipher ========================= + The caesar cipher is named after Julius Caesar who used it when sending secret military messages to his troops. This is a simple substitution cipher where every character in the plain-text is shifted by a certain number known @@ -32,26 +37,28 @@ def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str: Example: Say we have the following message: - "Hello, captain" + ``Hello, captain`` And our alphabet is made up of lower and uppercase letters: - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ``abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`` - And our shift is "2" + And our shift is ``2`` - We can then encode the message, one letter at a time. "H" would become "J", - since "J" is two letters away, and so on. If the shift is ever two large, or + We can then encode the message, one letter at a time. ``H`` would become ``J``, + since ``J`` is two letters away, and so on. If the shift is ever two large, or our letter is at the end of the alphabet, we just start at the beginning - ("Z" would shift to "a" then "b" and so on). + (``Z`` would shift to ``a`` then ``b`` and so on). - Our final message would be "Jgnnq, ecrvckp" + Our final message would be ``Jgnnq, ecrvckp`` Further reading =============== + * https://en.m.wikipedia.org/wiki/Caesar_cipher Doctests ======== + >>> encrypt('The quick brown fox jumps over the lazy dog', 8) 'bpm yCqks jzwEv nwF rCuxA wDmz Bpm tiHG lwo' @@ -85,23 +92,28 @@ def decrypt(input_string: str, key: int, alphabet: str | None = None) -> str: """ decrypt ======= + Decodes a given string of cipher-text and returns the decoded plain-text Parameters: ----------- - * input_string: the cipher-text that needs to be decoded - * key: the number of letters to shift the message backwards by to decode + + * `input_string`: the cipher-text that needs to be decoded + * `key`: the number of letters to shift the message backwards by to decode Optional: - * alphabet (None): the alphabet used to decode the cipher, if not + + * `alphabet` (``None``): the alphabet used to decode the cipher, if not specified, the standard english alphabet with upper and lowercase letters is used Returns: + * A string containing the decoded plain-text More on the caesar cipher ========================= + The caesar cipher is named after Julius Caesar who used it when sending secret military messages to his troops. This is a simple substitution cipher where very character in the plain-text is shifted by a certain number known @@ -110,27 +122,29 @@ def decrypt(input_string: str, key: int, alphabet: str | None = None) -> str: Example: Say we have the following cipher-text: - "Jgnnq, ecrvckp" + ``Jgnnq, ecrvckp`` And our alphabet is made up of lower and uppercase letters: - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ``abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`` - And our shift is "2" + And our shift is ``2`` To decode the message, we would do the same thing as encoding, but in - reverse. The first letter, "J" would become "H" (remember: we are decoding) - because "H" is two letters in reverse (to the left) of "J". We would - continue doing this. A letter like "a" would shift back to the end of - the alphabet, and would become "Z" or "Y" and so on. + reverse. The first letter, ``J`` would become ``H`` (remember: we are decoding) + because ``H`` is two letters in reverse (to the left) of ``J``. We would + continue doing this. A letter like ``a`` would shift back to the end of + the alphabet, and would become ``Z`` or ``Y`` and so on. - Our final message would be "Hello, captain" + Our final message would be ``Hello, captain`` Further reading =============== + * https://en.m.wikipedia.org/wiki/Caesar_cipher Doctests ======== + >>> decrypt('bpm yCqks jzwEv nwF rCuxA wDmz Bpm tiHG lwo', 8) 'The quick brown fox jumps over the lazy dog' @@ -150,41 +164,44 @@ def brute_force(input_string: str, alphabet: str | None = None) -> dict[int, str """ brute_force =========== + Returns all the possible combinations of keys and the decoded strings in the form of a dictionary Parameters: ----------- - * input_string: the cipher-text that needs to be used during brute-force + + * `input_string`: the cipher-text that needs to be used during brute-force Optional: - * alphabet: (None): the alphabet used to decode the cipher, if not + + * `alphabet` (``None``): the alphabet used to decode the cipher, if not specified, the standard english alphabet with upper and lowercase letters is used More about brute force ====================== + Brute force is when a person intercepts a message or password, not knowing the key and tries every single combination. This is easy with the caesar cipher since there are only all the letters in the alphabet. The more complex the cipher, the larger amount of time it will take to do brute force Ex: - Say we have a 5 letter alphabet (abcde), for simplicity and we intercepted the - following message: - - "dbc" - + Say we have a ``5`` letter alphabet (``abcde``), for simplicity and we intercepted + the following message: ``dbc``, we could then just write out every combination: - ecd... and so on, until we reach a combination that makes sense: - "cab" + ``ecd``... and so on, until we reach a combination that makes sense: + ``cab`` Further reading =============== + * https://en.wikipedia.org/wiki/Brute_force Doctests ======== + >>> brute_force("jFyuMy xIH'N vLONy zILwy Gy!")[20] "Please don't brute force me!" diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 10832203e531..fb95c0f90628 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -11,33 +11,31 @@ def decrypt_caesar_with_chi_squared( """ Basic Usage =========== + Arguments: - * ciphertext (str): the text to decode (encoded with the caesar cipher) + * `ciphertext` (str): the text to decode (encoded with the caesar cipher) Optional Arguments: - * cipher_alphabet (list): the alphabet used for the cipher (each letter is - a string separated by commas) - * frequencies_dict (dict): a dictionary of word frequencies where keys are - the letters and values are a percentage representation of the frequency as - a decimal/float - * case_sensitive (bool): a boolean value: True if the case matters during - decryption, False if it doesn't + * `cipher_alphabet` (list): the alphabet used for the cipher (each letter is + a string separated by commas) + * `frequencies_dict` (dict): a dictionary of word frequencies where keys are + the letters and values are a percentage representation of the frequency as + a decimal/float + * `case_sensitive` (bool): a boolean value: ``True`` if the case matters during + decryption, ``False`` if it doesn't Returns: - * A tuple in the form of: - ( - most_likely_cipher, - most_likely_cipher_chi_squared_value, - decoded_most_likely_cipher - ) + * A tuple in the form of: + (`most_likely_cipher`, `most_likely_cipher_chi_squared_value`, + `decoded_most_likely_cipher`) - where... - - most_likely_cipher is an integer representing the shift of the smallest - chi-squared statistic (most likely key) - - most_likely_cipher_chi_squared_value is a float representing the - chi-squared statistic of the most likely shift - - decoded_most_likely_cipher is a string with the decoded cipher - (decoded by the most_likely_cipher key) + where... + - `most_likely_cipher` is an integer representing the shift of the smallest + chi-squared statistic (most likely key) + - `most_likely_cipher_chi_squared_value` is a float representing the + chi-squared statistic of the most likely shift + - `decoded_most_likely_cipher` is a string with the decoded cipher + (decoded by the most_likely_cipher key) The Chi-squared test @@ -45,52 +43,57 @@ def decrypt_caesar_with_chi_squared( The caesar cipher ----------------- + The caesar cipher is a very insecure encryption algorithm, however it has been used since Julius Caesar. The cipher is a simple substitution cipher where each character in the plain text is replaced by a character in the alphabet a certain number of characters after the original character. The number of characters away is called the shift or key. For example: - Plain text: hello - Key: 1 - Cipher text: ifmmp - (each letter in hello has been shifted one to the right in the eng. alphabet) + | Plain text: ``hello`` + | Key: ``1`` + | Cipher text: ``ifmmp`` + | (each letter in ``hello`` has been shifted one to the right in the eng. alphabet) As you can imagine, this doesn't provide lots of security. In fact decrypting ciphertext by brute-force is extremely easy even by hand. However - one way to do that is the chi-squared test. + one way to do that is the chi-squared test. The chi-squared test - ------------------- + -------------------- + Each letter in the english alphabet has a frequency, or the amount of times it shows up compared to other letters (usually expressed as a decimal representing the percentage likelihood). The most common letter in the - english language is "e" with a frequency of 0.11162 or 11.162%. The test is - completed in the following fashion. + english language is ``e`` with a frequency of ``0.11162`` or ``11.162%``. + The test is completed in the following fashion. 1. The ciphertext is decoded in a brute force way (every combination of the - 26 possible combinations) + ``26`` possible combinations) 2. For every combination, for each letter in the combination, the average amount of times the letter should appear the message is calculated by - multiplying the total number of characters by the frequency of the letter + multiplying the total number of characters by the frequency of the letter. + + | For example: + | In a message of ``100`` characters, ``e`` should appear around ``11.162`` + times. - For example: - In a message of 100 characters, e should appear around 11.162 times. + 3. Then, to calculate the margin of error (the amount of times the letter + SHOULD appear with the amount of times the letter DOES appear), we use + the chi-squared test. The following formula is used: - 3. Then, to calculate the margin of error (the amount of times the letter - SHOULD appear with the amount of times the letter DOES appear), we use - the chi-squared test. The following formula is used: + Let: + - n be the number of times the letter actually appears + - p be the predicted value of the number of times the letter should + appear (see item ``2``) + - let v be the chi-squared test result (referred to here as chi-squared + value/statistic) - Let: - - n be the number of times the letter actually appears - - p be the predicted value of the number of times the letter should - appear (see #2) - - let v be the chi-squared test result (referred to here as chi-squared - value/statistic) + :: - (n - p)^2 - --------- = v - p + (n - p)^2 + --------- = v + p 4. Each chi squared value for each letter is then added up to the total. The total is the chi-squared statistic for that encryption key. @@ -98,16 +101,16 @@ def decrypt_caesar_with_chi_squared( to be the decoded answer. Further Reading - ================ + =============== - * http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared- - statistic/ + * http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-statistic/ * https://en.wikipedia.org/wiki/Letter_frequency * https://en.wikipedia.org/wiki/Chi-squared_test * https://en.m.wikipedia.org/wiki/Caesar_cipher Doctests ======== + >>> decrypt_caesar_with_chi_squared( ... 'dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!' ... ) # doctest: +NORMALIZE_WHITESPACE diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 163aa7172c11..e42fdd82ed41 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -1,14 +1,16 @@ """ -Wikipedia: https://en.wikipedia.org/wiki/Enigma_machine -Video explanation: https://youtu.be/QwQVMqfoB2E -Also check out Numberphile's and Computerphile's videos on this topic +| Wikipedia: https://en.wikipedia.org/wiki/Enigma_machine +| Video explanation: https://youtu.be/QwQVMqfoB2E +| Also check out Numberphile's and Computerphile's videos on this topic -This module contains function 'enigma' which emulates +This module contains function ``enigma`` which emulates the famous Enigma machine from WWII. + Module includes: -- enigma function + +- ``enigma`` function - showcase of function usage -- 9 randomly generated rotors +- ``9`` randomly generated rotors - reflector (aka static rotor) - original alphabet @@ -73,7 +75,7 @@ def _validator( rotpos: RotorPositionT, rotsel: RotorSelectionT, pb: str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """ - Checks if the values can be used for the 'enigma' function + Checks if the values can be used for the ``enigma`` function >>> _validator((1,1,1), (rotor1, rotor2, rotor3), 'POLAND') ((1, 1, 1), ('EGZWVONAHDCLFQMSIPJBYUKXTR', 'FOBHMDKEXQNRAULPGSJVTYICZW', \ @@ -83,7 +85,7 @@ def _validator( :param rotpos: rotor_positon :param rotsel: rotor_selection :param pb: plugb -> validated and transformed - :return: (rotpos, rotsel, pb) + :return: (`rotpos`, `rotsel`, `pb`) """ # Checks if there are 3 unique rotors @@ -118,9 +120,10 @@ def _plugboard(pbstring: str) -> dict[str, str]: >>> _plugboard('POLAND') {'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'} - In the code, 'pb' stands for 'plugboard' + In the code, ``pb`` stands for ``plugboard`` Pairs can be separated by spaces + :param pbstring: string containing plugboard setting for the Enigma machine :return: dictionary containing converted pairs """ @@ -168,31 +171,34 @@ def enigma( plugb: str = "", ) -> str: """ - The only difference with real-world enigma is that I allowed string input. + The only difference with real-world enigma is that ``I`` allowed string input. All characters are converted to uppercase. (non-letter symbol are ignored) - How it works: - (for every letter in the message) + + | How it works: + | (for every letter in the message) - Input letter goes into the plugboard. - If it is connected to another one, switch it. + If it is connected to another one, switch it. + + - Letter goes through ``3`` rotors. + Each rotor can be represented as ``2`` sets of symbol, where one is shuffled. + Each symbol from the first set has corresponding symbol in + the second set and vice versa. - - Letter goes through 3 rotors. - Each rotor can be represented as 2 sets of symbol, where one is shuffled. - Each symbol from the first set has corresponding symbol in - the second set and vice versa. + example:: - example: - | ABCDEFGHIJKLMNOPQRSTUVWXYZ | e.g. F=D and D=F - | VKLEPDBGRNWTFCJOHQAMUZYIXS | + | ABCDEFGHIJKLMNOPQRSTUVWXYZ | e.g. F=D and D=F + | VKLEPDBGRNWTFCJOHQAMUZYIXS | - Symbol then goes through reflector (static rotor). - There it is switched with paired symbol - The reflector can be represented as2 sets, each with half of the alphanet. - There are usually 10 pairs of letters. + There it is switched with paired symbol. + The reflector can be represented as ``2`` sets, each with half of the alphanet. + There are usually ``10`` pairs of letters. + + Example:: - Example: - | ABCDEFGHIJKLM | e.g. E is paired to X - | ZYXWVUTSRQPON | so when E goes in X goes out and vice versa + | ABCDEFGHIJKLM | e.g. E is paired to X + | ZYXWVUTSRQPON | so when E goes in X goes out and vice versa - Letter then goes through the rotors again @@ -211,9 +217,9 @@ def enigma( :param text: input message - :param rotor_position: tuple with 3 values in range 1..26 - :param rotor_selection: tuple with 3 rotors () - :param plugb: string containing plugboard configuration (default '') + :param rotor_position: tuple with ``3`` values in range ``1``.. ``26`` + :param rotor_selection: tuple with ``3`` rotors + :param plugb: string containing plugboard configuration (default ``''``) :return: en/decrypted string """ diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index 0a358a4fc2d4..585b21fac856 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -3,8 +3,10 @@ The program can efficiently factor RSA prime number given the private key d and public key e. -Source: on page 3 of https://crypto.stanford.edu/~dabo/papers/RSA-survey.pdf -More readable source: https://www.di-mgt.com.au/rsa_factorize_n.html + +| Source: on page ``3`` of https://crypto.stanford.edu/~dabo/papers/RSA-survey.pdf +| More readable source: https://www.di-mgt.com.au/rsa_factorize_n.html + large number can take minutes to factor, therefore are not included in doctest. """ @@ -17,13 +19,14 @@ def rsafactor(d: int, e: int, n: int) -> list[int]: """ This function returns the factors of N, where p*q=N - Return: [p, q] + + Return: [p, q] We call N the RSA modulus, e the encryption exponent, and d the decryption exponent. The pair (N, e) is the public key. As its name suggests, it is public and is used to - encrypt messages. + encrypt messages. The pair (N, d) is the secret key or private key and is known only to the recipient - of encrypted messages. + of encrypted messages. >>> rsafactor(3, 16971, 25777) [149, 173] diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py index 9dc624e7762c..bde137d826c3 100644 --- a/ciphers/simple_keyword_cypher.py +++ b/ciphers/simple_keyword_cypher.py @@ -1,9 +1,11 @@ def remove_duplicates(key: str) -> str: """ Removes duplicate alphabetic characters in a keyword (letter is ignored after its - first appearance). + first appearance). + :param key: Keyword to use :return: String with duplicates removed + >>> remove_duplicates('Hello World!!') 'Helo Wrd' """ @@ -18,6 +20,7 @@ def remove_duplicates(key: str) -> str: def create_cipher_map(key: str) -> dict[str, str]: """ Returns a cipher map given a keyword. + :param key: keyword to use :return: dictionary cipher map """ @@ -43,9 +46,11 @@ def create_cipher_map(key: str) -> dict[str, str]: def encipher(message: str, cipher_map: dict[str, str]) -> str: """ Enciphers a message given a cipher map. + :param message: Message to encipher :param cipher_map: Cipher map :return: enciphered string + >>> encipher('Hello World!!', create_cipher_map('Goodbye!!')) 'CYJJM VMQJB!!' """ @@ -55,9 +60,11 @@ def encipher(message: str, cipher_map: dict[str, str]) -> str: def decipher(message: str, cipher_map: dict[str, str]) -> str: """ Deciphers a message given a cipher map + :param message: Message to decipher :param cipher_map: Dictionary mapping to use :return: Deciphered string + >>> cipher_map = create_cipher_map('Goodbye!!') >>> decipher(encipher('Hello World!!', cipher_map), cipher_map) 'HELLO WORLD!!' @@ -70,6 +77,7 @@ def decipher(message: str, cipher_map: dict[str, str]) -> str: def main() -> None: """ Handles I/O + :return: void """ message = input("Enter message to encode or decode: ").strip() diff --git a/ciphers/trifid_cipher.py b/ciphers/trifid_cipher.py index 16b9faf67688..9613cee0669d 100644 --- a/ciphers/trifid_cipher.py +++ b/ciphers/trifid_cipher.py @@ -22,7 +22,7 @@ def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str: """ - Arrange the triagram value of each letter of 'message_part' vertically and join + Arrange the triagram value of each letter of `message_part` vertically and join them horizontally. >>> __encrypt_part('ASK', TEST_CHARACTER_TO_NUMBER) @@ -65,8 +65,8 @@ def __prepare( """ A helper function that generates the triagrams and assigns each letter of the alphabet to its corresponding triagram and stores this in a dictionary - ("character_to_number" and "number_to_character") after confirming if the - alphabet's length is 27. + (`character_to_number` and `number_to_character`) after confirming if the + alphabet's length is ``27``. >>> test = __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxYZ+') >>> expected = ('IAMABOY','ABCDEFGHIJKLMNOPQRSTUVWXYZ+', @@ -75,24 +75,28 @@ def __prepare( True Testing with incomplete alphabet + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVw') Traceback (most recent call last): ... KeyError: 'Length of alphabet has to be 27.' Testing with extra long alphabets + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxyzzwwtyyujjgfd') Traceback (most recent call last): ... KeyError: 'Length of alphabet has to be 27.' Testing with punctuations that are not in the given alphabet + >>> __prepare('am i a boy?','abCdeFghijkLmnopqrStuVwxYZ+') Traceback (most recent call last): ... ValueError: Each message character has to be included in alphabet! Testing with numbers + >>> __prepare(500,'abCdeFghijkLmnopqrStuVwxYZ+') Traceback (most recent call last): ... @@ -130,9 +134,9 @@ def encrypt_message( PARAMETERS ---------- - * message: The message you want to encrypt. - * alphabet (optional): The characters to be used for the cipher . - * period (optional): The number of characters you want in a group whilst + * `message`: The message you want to encrypt. + * `alphabet` (optional): The characters to be used for the cipher . + * `period` (optional): The number of characters you want in a group whilst encrypting. >>> encrypt_message('I am a boy') @@ -169,20 +173,21 @@ def decrypt_message( decrypt_message =============== - Decrypts a trifid_cipher encrypted message . + Decrypts a trifid_cipher encrypted message. PARAMETERS ---------- - * message: The message you want to decrypt . - * alphabet (optional): The characters used for the cipher. - * period (optional): The number of characters used in grouping when it + * `message`: The message you want to decrypt. + * `alphabet` (optional): The characters used for the cipher. + * `period` (optional): The number of characters used in grouping when it was encrypted. >>> decrypt_message('BCDGBQY') 'IAMABOY' Decrypting with your own alphabet and period + >>> decrypt_message('FMJFVOISSUFTFPUFEQQC','FELIXMARDSTBCGHJKNOPQUVWYZ+',5) 'AIDETOILECIELTAIDERA' """ From 68b4c6b4793867126f71ebf2a399402b02472edb Mon Sep 17 00:00:00 2001 From: mahdi tavasoli Date: Mon, 30 Dec 2024 13:52:20 +0330 Subject: [PATCH 180/260] fix is_ip_v4_address_valid.py (#12394) * fix is_ip_v4_address_valid * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update is_ip_v4_address_valid.py --------- Co-authored-by: m.tavasoli Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/is_ip_v4_address_valid.py | 37 +++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/maths/is_ip_v4_address_valid.py b/maths/is_ip_v4_address_valid.py index 0ae8e021ead1..305afabffed3 100644 --- a/maths/is_ip_v4_address_valid.py +++ b/maths/is_ip_v4_address_valid.py @@ -1,13 +1,15 @@ """ +wiki: https://en.wikipedia.org/wiki/IPv4 + Is IP v4 address valid? A valid IP address must be four octets in the form of A.B.C.D, -where A,B,C and D are numbers from 0-254 -for example: 192.168.23.1, 172.254.254.254 are valid IP address - 192.168.255.0, 255.192.3.121 are invalid IP address +where A, B, C and D are numbers from 0-255 +for example: 192.168.23.1, 172.255.255.255 are valid IP address + 192.168.256.0, 256.192.3.121 are invalid IP address """ -def is_ip_v4_address_valid(ip_v4_address: str) -> bool: +def is_ip_v4_address_valid(ip: str) -> bool: """ print "Valid IP address" If IP is valid. or @@ -16,13 +18,13 @@ def is_ip_v4_address_valid(ip_v4_address: str) -> bool: >>> is_ip_v4_address_valid("192.168.0.23") True - >>> is_ip_v4_address_valid("192.255.15.8") + >>> is_ip_v4_address_valid("192.256.15.8") False >>> is_ip_v4_address_valid("172.100.0.8") True - >>> is_ip_v4_address_valid("254.255.0.255") + >>> is_ip_v4_address_valid("255.256.0.256") False >>> is_ip_v4_address_valid("1.2.33333333.4") @@ -45,12 +47,29 @@ def is_ip_v4_address_valid(ip_v4_address: str) -> bool: >>> is_ip_v4_address_valid("1.2.3.") False + + >>> is_ip_v4_address_valid("1.2.3.05") + False """ - octets = [int(i) for i in ip_v4_address.split(".") if i.isdigit()] - return len(octets) == 4 and all(0 <= int(octet) <= 254 for octet in octets) + octets = ip.split(".") + if len(octets) != 4: + return False + + for octet in octets: + if not octet.isdigit(): + return False + + number = int(octet) + if len(str(number)) != len(octet): + return False + + if not 0 <= number <= 255: + return False + + return True if __name__ == "__main__": ip = input().strip() valid_or_invalid = "valid" if is_ip_v4_address_valid(ip) else "invalid" - print(f"{ip} is a {valid_or_invalid} IP v4 address.") + print(f"{ip} is a {valid_or_invalid} IPv4 address.") From 2ca96b7c8ec2134a7282bed13f1cc93358c13c45 Mon Sep 17 00:00:00 2001 From: jperezr <122382210+MRJPEREZR@users.noreply.github.com> Date: Mon, 30 Dec 2024 11:37:21 +0100 Subject: [PATCH 181/260] current_stock_price test added (#12390) * adding test to web_programming/current_stock_price * adding test to web_programming/current_stock_price * Update current_stock_price.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_stock_price.py --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/current_stock_price.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index 9567c05b0558..d0a65e9aac84 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -1,14 +1,30 @@ import requests from bs4 import BeautifulSoup +""" +Get the HTML code of finance yahoo and select the current qsp-price +Current AAPL stock price is 228.43 +Current AMZN stock price is 201.85 +Current IBM stock price is 210.30 +Current GOOG stock price is 177.86 +Current MSFT stock price is 414.82 +Current ORCL stock price is 188.87 +""" + def stock_price(symbol: str = "AAPL") -> str: + """ + >>> stock_price("EEEE") + '-' + >>> isinstance(float(stock_price("GOOG")),float) + True + """ url = f"https://finance.yahoo.com/quote/%7Bsymbol%7D?p={symbol}" yahoo_finance_source = requests.get( url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 ).text soup = BeautifulSoup(yahoo_finance_source, "html.parser") - specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"}) + specific_fin_streamer_tag = soup.find("fin-streamer", {"data-testid": "qsp-price"}) if specific_fin_streamer_tag: text = specific_fin_streamer_tag.get_text() @@ -18,5 +34,9 @@ def stock_price(symbol: str = "AAPL") -> str: # Search for the symbol at https://finance.yahoo.com/lookup if __name__ == "__main__": + from doctest import testmod + + testmod() + for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}") From 24923ee635973a05f7713dd672fea07361fa0466 Mon Sep 17 00:00:00 2001 From: Shikhar Maheshwari <83123897+shikhar-sm@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:21:10 +0530 Subject: [PATCH 182/260] Add doctest to maths/numerical_analysis/intersection.py (#12148) --- maths/numerical_analysis/intersection.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/maths/numerical_analysis/intersection.py b/maths/numerical_analysis/intersection.py index 826c0ead0a00..325abeaca996 100644 --- a/maths/numerical_analysis/intersection.py +++ b/maths/numerical_analysis/intersection.py @@ -42,6 +42,11 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl def f(x: float) -> float: + """ + function is f(x) = x^3 - 2x - 5 + >>> f(2) + -1.0 + """ return math.pow(x, 3) - (2 * x) - 5 From da587d06ac88e338e7db8f10fa8ca2ae556e7bae Mon Sep 17 00:00:00 2001 From: Sankabapur <152031570+Parthjhalani07@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:29:03 +0530 Subject: [PATCH 183/260] Added doctest to /maths/power_using_recursion.py (#11994) --- maths/power_using_recursion.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index 29283ca0f67c..eb775b161ae8 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -38,6 +38,14 @@ def power(base: int, exponent: int) -> float: Traceback (most recent call last): ... RecursionError: maximum recursion depth exceeded + >>> power(0, 0) + 1 + >>> power(0, 1) + 0 + >>> power(5,6) + 15625 + >>> power(23, 12) + 21914624432020321 """ return base * power(base, (exponent - 1)) if exponent else 1 From 493a7c153c1ca1805e60c109842ee1a1ee63cde2 Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Mon, 30 Dec 2024 16:40:44 +0530 Subject: [PATCH 184/260] feat: add testcase of assemble_transformation (#11810) --- strings/min_cost_string_conversion.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 93791e2a7ed3..87eb5189e16a 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -91,6 +91,14 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: >>> y1 = len(ops1[0]) - 1 >>> assemble_transformation(ops1, x1, y1) [] + + >>> ops2 = [['0', 'I1', 'I2', 'I3'], + ... ['D1', 'C1', 'I2', 'I3'], + ... ['D2', 'D2', 'R23', 'R23']] + >>> x2 = len(ops2) - 1 + >>> y2 = len(ops2[0]) - 1 + >>> assemble_transformation(ops2, x2, y2) + ['C1', 'I2', 'R23'] """ if i == 0 and j == 0: return [] From 7fa9b4bf1bc9822517bb0046aebc2e8b2997d3e1 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 30 Dec 2024 14:52:03 +0300 Subject: [PATCH 185/260] Fix sphinx/build_docs warnings for dynamic_programming (#12484) * Fix sphinx/build_docs warnings for dynamic_programming * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * Fix * Fix * Fix * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/all_construct.py | 7 +- dynamic_programming/combination_sum_iv.py | 23 +- dynamic_programming/fizz_buzz.py | 11 +- dynamic_programming/knapsack.py | 40 ++-- .../longest_common_substring.py | 14 +- .../longest_increasing_subsequence.py | 13 +- .../matrix_chain_multiplication.py | 89 ++++---- dynamic_programming/max_product_subarray.py | 3 +- .../minimum_squares_to_represent_a_number.py | 1 + dynamic_programming/regex_match.py | 22 +- dynamic_programming/rod_cutting.py | 83 ++++--- dynamic_programming/subset_generation.py | 63 +++--- dynamic_programming/viterbi.py | 212 ++++++++---------- 13 files changed, 295 insertions(+), 286 deletions(-) diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 5d585fc7fcec..ca00f2beb06a 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -8,9 +8,10 @@ def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[str]]: """ - returns the list containing all the possible - combinations a string(target) can be constructed from - the given list of substrings(word_bank) + returns the list containing all the possible + combinations a string(`target`) can be constructed from + the given list of substrings(`word_bank`) + >>> all_construct("hello", ["he", "l", "o"]) [['he', 'l', 'l', 'o']] >>> all_construct("purple",["purp","p","ur","le","purpl"]) diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py index 113c06a27a9e..ed8dcd88e6fd 100644 --- a/dynamic_programming/combination_sum_iv.py +++ b/dynamic_programming/combination_sum_iv.py @@ -1,24 +1,25 @@ """ Question: -You are given an array of distinct integers and you have to tell how many -different ways of selecting the elements from the array are there such that -the sum of chosen elements is equal to the target number tar. + You are given an array of distinct integers and you have to tell how many + different ways of selecting the elements from the array are there such that + the sum of chosen elements is equal to the target number tar. Example Input: -N = 3 -target = 5 -array = [1, 2, 5] + * N = 3 + * target = 5 + * array = [1, 2, 5] Output: -9 + 9 Approach: -The basic idea is to go over recursively to find the way such that the sum -of chosen elements is “tar”. For every element, we have two choices - 1. Include the element in our set of chosen elements. - 2. Don't include the element in our set of chosen elements. + The basic idea is to go over recursively to find the way such that the sum + of chosen elements is `target`. For every element, we have two choices + + 1. Include the element in our set of chosen elements. + 2. Don't include the element in our set of chosen elements. """ diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py index e29116437a93..0cb48897875b 100644 --- a/dynamic_programming/fizz_buzz.py +++ b/dynamic_programming/fizz_buzz.py @@ -3,11 +3,12 @@ def fizz_buzz(number: int, iterations: int) -> str: """ - Plays FizzBuzz. - Prints Fizz if number is a multiple of 3. - Prints Buzz if its a multiple of 5. - Prints FizzBuzz if its a multiple of both 3 and 5 or 15. - Else Prints The Number Itself. + | Plays FizzBuzz. + | Prints Fizz if number is a multiple of ``3``. + | Prints Buzz if its a multiple of ``5``. + | Prints FizzBuzz if its a multiple of both ``3`` and ``5`` or ``15``. + | Else Prints The Number Itself. + >>> fizz_buzz(1,7) '1 2 Fizz 4 Buzz Fizz 7 ' >>> fizz_buzz(1,0) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 489b5ada450a..28c5b19dbe36 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -11,7 +11,7 @@ def mf_knapsack(i, wt, val, j): """ This code involves the concept of memory functions. Here we solve the subproblems which are needed unlike the below example - F is a 2D array with -1s filled up + F is a 2D array with ``-1`` s filled up """ global f # a global dp table for knapsack if f[i][j] < 0: @@ -45,22 +45,24 @@ def knapsack_with_example_solution(w: int, wt: list, val: list): the several possible optimal subsets. Parameters - --------- + ---------- - W: int, the total maximum weight for the given knapsack problem. - wt: list, the vector of weights for all items where wt[i] is the weight - of the i-th item. - val: list, the vector of values for all items where val[i] is the value - of the i-th item + * `w`: int, the total maximum weight for the given knapsack problem. + * `wt`: list, the vector of weights for all items where ``wt[i]`` is the weight + of the ``i``-th item. + * `val`: list, the vector of values for all items where ``val[i]`` is the value + of the ``i``-th item Returns ------- - optimal_val: float, the optimal value for the given knapsack problem - example_optional_set: set, the indices of one of the optimal subsets - which gave rise to the optimal value. + + * `optimal_val`: float, the optimal value for the given knapsack problem + * `example_optional_set`: set, the indices of one of the optimal subsets + which gave rise to the optimal value. Examples - ------- + -------- + >>> knapsack_with_example_solution(10, [1, 3, 5, 2], [10, 20, 100, 22]) (142, {2, 3, 4}) >>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4, 4]) @@ -104,19 +106,19 @@ def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set): a filled DP table and the vector of weights Parameters - --------- - - dp: list of list, the table of a solved integer weight dynamic programming problem + ---------- - wt: list or tuple, the vector of weights of the items - i: int, the index of the item under consideration - j: int, the current possible maximum weight - optimal_set: set, the optimal subset so far. This gets modified by the function. + * `dp`: list of list, the table of a solved integer weight dynamic programming + problem + * `wt`: list or tuple, the vector of weights of the items + * `i`: int, the index of the item under consideration + * `j`: int, the current possible maximum weight + * `optimal_set`: set, the optimal subset so far. This gets modified by the function. Returns ------- - None + ``None`` """ # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py index e2f944a5e336..ea5233eb2d17 100644 --- a/dynamic_programming/longest_common_substring.py +++ b/dynamic_programming/longest_common_substring.py @@ -1,15 +1,19 @@ """ -Longest Common Substring Problem Statement: Given two sequences, find the -longest common substring present in both of them. A substring is -necessarily continuous. -Example: "abcdef" and "xabded" have two longest common substrings, "ab" or "de". -Therefore, algorithm should return any one of them. +Longest Common Substring Problem Statement: + Given two sequences, find the + longest common substring present in both of them. A substring is + necessarily continuous. + +Example: + ``abcdef`` and ``xabded`` have two longest common substrings, ``ab`` or ``de``. + Therefore, algorithm should return any one of them. """ def longest_common_substring(text1: str, text2: str) -> str: """ Finds the longest common substring between two strings. + >>> longest_common_substring("", "") '' >>> longest_common_substring("a","") diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index 2a78e2e7ad1d..d839757f6da5 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -4,11 +4,13 @@ This is a pure Python implementation of Dynamic Programming solution to the longest increasing subsequence of a given sequence. -The problem is : -Given an array, to find the longest and increasing sub-array in that given array and -return it. -Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return - [10, 22, 33, 41, 60, 80] as output +The problem is: + Given an array, to find the longest and increasing sub-array in that given array and + return it. + +Example: + ``[10, 22, 9, 33, 21, 50, 41, 60, 80]`` as input will return + ``[10, 22, 33, 41, 60, 80]`` as output """ from __future__ import annotations @@ -17,6 +19,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recursive """ Some examples + >>> longest_subsequence([10, 22, 9, 33, 21, 50, 41, 60, 80]) [10, 22, 33, 41, 60, 80] >>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9]) diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py index da6e525ce816..10e136b9f0db 100644 --- a/dynamic_programming/matrix_chain_multiplication.py +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -1,42 +1,48 @@ """ -Find the minimum number of multiplications needed to multiply chain of matrices. -Reference: https://www.geeksforgeeks.org/matrix-chain-multiplication-dp-8/ +| Find the minimum number of multiplications needed to multiply chain of matrices. +| Reference: https://www.geeksforgeeks.org/matrix-chain-multiplication-dp-8/ -The algorithm has interesting real-world applications. Example: -1. Image transformations in Computer Graphics as images are composed of matrix. -2. Solve complex polynomial equations in the field of algebra using least processing - power. -3. Calculate overall impact of macroeconomic decisions as economic equations involve a - number of variables. -4. Self-driving car navigation can be made more accurate as matrix multiplication can - accurately determine position and orientation of obstacles in short time. +The algorithm has interesting real-world applications. -Python doctests can be run with the following command: -python -m doctest -v matrix_chain_multiply.py +Example: + 1. Image transformations in Computer Graphics as images are composed of matrix. + 2. Solve complex polynomial equations in the field of algebra using least processing + power. + 3. Calculate overall impact of macroeconomic decisions as economic equations involve a + number of variables. + 4. Self-driving car navigation can be made more accurate as matrix multiplication can + accurately determine position and orientation of obstacles in short time. -Given a sequence arr[] that represents chain of 2D matrices such that the dimension of -the ith matrix is arr[i-1]*arr[i]. -So suppose arr = [40, 20, 30, 10, 30] means we have 4 matrices of dimensions -40*20, 20*30, 30*10 and 10*30. +Python doctests can be run with the following command:: -matrix_chain_multiply() returns an integer denoting minimum number of multiplications to -multiply the chain. + python -m doctest -v matrix_chain_multiply.py + +Given a sequence ``arr[]`` that represents chain of 2D matrices such that the dimension +of the ``i`` th matrix is ``arr[i-1]*arr[i]``. +So suppose ``arr = [40, 20, 30, 10, 30]`` means we have ``4`` matrices of dimensions +``40*20``, ``20*30``, ``30*10`` and ``10*30``. + +``matrix_chain_multiply()`` returns an integer denoting minimum number of +multiplications to multiply the chain. We do not need to perform actual multiplication here. We only need to decide the order in which to perform the multiplication. Hints: -1. Number of multiplications (ie cost) to multiply 2 matrices -of size m*p and p*n is m*p*n. -2. Cost of matrix multiplication is associative ie (M1*M2)*M3 != M1*(M2*M3) -3. Matrix multiplication is not commutative. So, M1*M2 does not mean M2*M1 can be done. -4. To determine the required order, we can try different combinations. + 1. Number of multiplications (ie cost) to multiply ``2`` matrices + of size ``m*p`` and ``p*n`` is ``m*p*n``. + 2. Cost of matrix multiplication is not associative ie ``(M1*M2)*M3 != M1*(M2*M3)`` + 3. Matrix multiplication is not commutative. So, ``M1*M2`` does not mean ``M2*M1`` + can be done. + 4. To determine the required order, we can try different combinations. + So, this problem has overlapping sub-problems and can be solved using recursion. We use Dynamic Programming for optimal time complexity. Example input: -arr = [40, 20, 30, 10, 30] -output: 26000 + ``arr = [40, 20, 30, 10, 30]`` +output: + ``26000`` """ from collections.abc import Iterator @@ -50,25 +56,25 @@ def matrix_chain_multiply(arr: list[int]) -> int: Find the minimum number of multiplcations required to multiply the chain of matrices Args: - arr: The input array of integers. + `arr`: The input array of integers. Returns: Minimum number of multiplications needed to multiply the chain Examples: - >>> matrix_chain_multiply([1, 2, 3, 4, 3]) - 30 - >>> matrix_chain_multiply([10]) - 0 - >>> matrix_chain_multiply([10, 20]) - 0 - >>> matrix_chain_multiply([19, 2, 19]) - 722 - >>> matrix_chain_multiply(list(range(1, 100))) - 323398 - - # >>> matrix_chain_multiply(list(range(1, 251))) - # 2626798 + + >>> matrix_chain_multiply([1, 2, 3, 4, 3]) + 30 + >>> matrix_chain_multiply([10]) + 0 + >>> matrix_chain_multiply([10, 20]) + 0 + >>> matrix_chain_multiply([19, 2, 19]) + 722 + >>> matrix_chain_multiply(list(range(1, 100))) + 323398 + >>> # matrix_chain_multiply(list(range(1, 251))) + # 2626798 """ if len(arr) < 2: return 0 @@ -93,8 +99,10 @@ def matrix_chain_multiply(arr: list[int]) -> int: def matrix_chain_order(dims: list[int]) -> int: """ Source: https://en.wikipedia.org/wiki/Matrix_chain_multiplication + The dynamic programming solution is faster than cached the recursive solution and can handle larger inputs. + >>> matrix_chain_order([1, 2, 3, 4, 3]) 30 >>> matrix_chain_order([10]) @@ -105,8 +113,7 @@ def matrix_chain_order(dims: list[int]) -> int: 722 >>> matrix_chain_order(list(range(1, 100))) 323398 - - # >>> matrix_chain_order(list(range(1, 251))) # Max before RecursionError is raised + >>> # matrix_chain_order(list(range(1, 251))) # Max before RecursionError is raised # 2626798 """ diff --git a/dynamic_programming/max_product_subarray.py b/dynamic_programming/max_product_subarray.py index 425859bc03e3..6f4f38e38942 100644 --- a/dynamic_programming/max_product_subarray.py +++ b/dynamic_programming/max_product_subarray.py @@ -1,9 +1,10 @@ def max_product_subarray(numbers: list[int]) -> int: """ Returns the maximum product that can be obtained by multiplying a - contiguous subarray of the given integer list `nums`. + contiguous subarray of the given integer list `numbers`. Example: + >>> max_product_subarray([2, 3, -2, 4]) 6 >>> max_product_subarray((-2, 0, -1)) diff --git a/dynamic_programming/minimum_squares_to_represent_a_number.py b/dynamic_programming/minimum_squares_to_represent_a_number.py index bf5849f5bcb3..98c0602fa831 100644 --- a/dynamic_programming/minimum_squares_to_represent_a_number.py +++ b/dynamic_programming/minimum_squares_to_represent_a_number.py @@ -5,6 +5,7 @@ def minimum_squares_to_represent_a_number(number: int) -> int: """ Count the number of minimum squares to represent a number + >>> minimum_squares_to_represent_a_number(25) 1 >>> minimum_squares_to_represent_a_number(37) diff --git a/dynamic_programming/regex_match.py b/dynamic_programming/regex_match.py index 200a882831c0..e94d82093c8b 100644 --- a/dynamic_programming/regex_match.py +++ b/dynamic_programming/regex_match.py @@ -1,23 +1,25 @@ """ Regex matching check if a text matches pattern or not. Pattern: - '.' Matches any single character. - '*' Matches zero or more of the preceding element. + + 1. ``.`` Matches any single character. + 2. ``*`` Matches zero or more of the preceding element. + More info: https://medium.com/trick-the-interviwer/regular-expression-matching-9972eb74c03 """ def recursive_match(text: str, pattern: str) -> bool: - """ + r""" Recursive matching algorithm. - Time complexity: O(2 ^ (|text| + |pattern|)) - Space complexity: Recursion depth is O(|text| + |pattern|). + | Time complexity: O(2^(\|text\| + \|pattern\|)) + | Space complexity: Recursion depth is O(\|text\| + \|pattern\|). :param text: Text to match. :param pattern: Pattern to match. - :return: True if text matches pattern, False otherwise. + :return: ``True`` if `text` matches `pattern`, ``False`` otherwise. >>> recursive_match('abc', 'a.c') True @@ -48,15 +50,15 @@ def recursive_match(text: str, pattern: str) -> bool: def dp_match(text: str, pattern: str) -> bool: - """ + r""" Dynamic programming matching algorithm. - Time complexity: O(|text| * |pattern|) - Space complexity: O(|text| * |pattern|) + | Time complexity: O(\|text\| * \|pattern\|) + | Space complexity: O(\|text\| * \|pattern\|) :param text: Text to match. :param pattern: Pattern to match. - :return: True if text matches pattern, False otherwise. + :return: ``True`` if `text` matches `pattern`, ``False`` otherwise. >>> dp_match('abc', 'a.c') True diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py index f80fa440ae86..d12c759dc928 100644 --- a/dynamic_programming/rod_cutting.py +++ b/dynamic_programming/rod_cutting.py @@ -1,7 +1,7 @@ """ This module provides two implementations for the rod-cutting problem: -1. A naive recursive implementation which has an exponential runtime -2. Two dynamic programming implementations which have quadratic runtime + 1. A naive recursive implementation which has an exponential runtime + 2. Two dynamic programming implementations which have quadratic runtime The rod-cutting problem is the problem of finding the maximum possible revenue obtainable from a rod of length ``n`` given a list of prices for each integral piece @@ -20,18 +20,21 @@ def naive_cut_rod_recursive(n: int, prices: list): Runtime: O(2^n) Arguments - ------- - n: int, the length of the rod - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` + --------- + + * `n`: int, the length of the rod + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` Returns ------- - The maximum revenue obtainable for a rod of length n given the list of prices + + The maximum revenue obtainable for a rod of length `n` given the list of prices for each piece. Examples -------- + >>> naive_cut_rod_recursive(4, [1, 5, 8, 9]) 10 >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) @@ -54,28 +57,30 @@ def top_down_cut_rod(n: int, prices: list): """ Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. This function serves as a wrapper for - _top_down_cut_rod_recursive + ``_top_down_cut_rod_recursive`` Runtime: O(n^2) Arguments - -------- - n: int, the length of the rod - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` + --------- - Note - ---- - For convenience and because Python's lists using 0-indexing, length(max_rev) = - n + 1, to accommodate for the revenue obtainable from a rod of length 0. + * `n`: int, the length of the rod + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` + + .. note:: + For convenience and because Python's lists using ``0``-indexing, ``length(max_rev) + = n + 1``, to accommodate for the revenue obtainable from a rod of length ``0``. Returns ------- - The maximum revenue obtainable for a rod of length n given the list of prices + + The maximum revenue obtainable for a rod of length `n` given the list of prices for each piece. Examples - ------- + -------- + >>> top_down_cut_rod(4, [1, 5, 8, 9]) 10 >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) @@ -94,16 +99,18 @@ def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list): Runtime: O(n^2) Arguments - -------- - n: int, the length of the rod - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` - max_rev: list, the computed maximum revenue for a piece of rod. - ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i`` + --------- + + * `n`: int, the length of the rod + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` + * `max_rev`: list, the computed maximum revenue for a piece of rod. + ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i`` Returns ------- - The maximum revenue obtainable for a rod of length n given the list of prices + + The maximum revenue obtainable for a rod of length `n` given the list of prices for each piece. """ if max_rev[n] >= 0: @@ -130,18 +137,21 @@ def bottom_up_cut_rod(n: int, prices: list): Runtime: O(n^2) Arguments - ---------- - n: int, the maximum length of the rod. - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` + --------- + + * `n`: int, the maximum length of the rod. + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` Returns ------- - The maximum revenue obtainable from cutting a rod of length n given + + The maximum revenue obtainable from cutting a rod of length `n` given the prices for each piece of rod p. Examples - ------- + -------- + >>> bottom_up_cut_rod(4, [1, 5, 8, 9]) 10 >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) @@ -168,13 +178,12 @@ def _enforce_args(n: int, prices: list): """ Basic checks on the arguments to the rod-cutting algorithms - n: int, the length of the rod - prices: list, the price list for each piece of rod. - - Throws ValueError: + * `n`: int, the length of the rod + * `prices`: list, the price list for each piece of rod. - if n is negative or there are fewer items in the price list than the length of - the rod + Throws ``ValueError``: + if `n` is negative or there are fewer items in the price list than the length of + the rod """ if n < 0: msg = f"n must be greater than or equal to 0. Got n = {n}" diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index d490bca737ba..08daaac6f88a 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -1,38 +1,41 @@ def subset_combinations(elements: list[int], n: int) -> list: """ Compute n-element combinations from a given list using dynamic programming. + Args: - elements: The list of elements from which combinations will be generated. - n: The number of elements in each combination. + * `elements`: The list of elements from which combinations will be generated. + * `n`: The number of elements in each combination. + Returns: - A list of tuples, each representing a combination of n elements. - >>> subset_combinations(elements=[10, 20, 30, 40], n=2) - [(10, 20), (10, 30), (10, 40), (20, 30), (20, 40), (30, 40)] - >>> subset_combinations(elements=[1, 2, 3], n=1) - [(1,), (2,), (3,)] - >>> subset_combinations(elements=[1, 2, 3], n=3) - [(1, 2, 3)] - >>> subset_combinations(elements=[42], n=1) - [(42,)] - >>> subset_combinations(elements=[6, 7, 8, 9], n=4) - [(6, 7, 8, 9)] - >>> subset_combinations(elements=[10, 20, 30, 40, 50], n=0) - [()] - >>> subset_combinations(elements=[1, 2, 3, 4], n=2) - [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] - >>> subset_combinations(elements=[1, 'apple', 3.14], n=2) - [(1, 'apple'), (1, 3.14), ('apple', 3.14)] - >>> subset_combinations(elements=['single'], n=0) - [()] - >>> subset_combinations(elements=[], n=9) - [] - >>> from itertools import combinations - >>> all(subset_combinations(items, n) == list(combinations(items, n)) - ... for items, n in ( - ... ([10, 20, 30, 40], 2), ([1, 2, 3], 1), ([1, 2, 3], 3), ([42], 1), - ... ([6, 7, 8, 9], 4), ([10, 20, 30, 40, 50], 1), ([1, 2, 3, 4], 2), - ... ([1, 'apple', 3.14], 2), (['single'], 0), ([], 9))) - True + A list of tuples, each representing a combination of `n` elements. + + >>> subset_combinations(elements=[10, 20, 30, 40], n=2) + [(10, 20), (10, 30), (10, 40), (20, 30), (20, 40), (30, 40)] + >>> subset_combinations(elements=[1, 2, 3], n=1) + [(1,), (2,), (3,)] + >>> subset_combinations(elements=[1, 2, 3], n=3) + [(1, 2, 3)] + >>> subset_combinations(elements=[42], n=1) + [(42,)] + >>> subset_combinations(elements=[6, 7, 8, 9], n=4) + [(6, 7, 8, 9)] + >>> subset_combinations(elements=[10, 20, 30, 40, 50], n=0) + [()] + >>> subset_combinations(elements=[1, 2, 3, 4], n=2) + [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + >>> subset_combinations(elements=[1, 'apple', 3.14], n=2) + [(1, 'apple'), (1, 3.14), ('apple', 3.14)] + >>> subset_combinations(elements=['single'], n=0) + [()] + >>> subset_combinations(elements=[], n=9) + [] + >>> from itertools import combinations + >>> all(subset_combinations(items, n) == list(combinations(items, n)) + ... for items, n in ( + ... ([10, 20, 30, 40], 2), ([1, 2, 3], 1), ([1, 2, 3], 3), ([42], 1), + ... ([6, 7, 8, 9], 4), ([10, 20, 30, 40, 50], 1), ([1, 2, 3, 4], 2), + ... ([1, 'apple', 3.14], 2), (['single'], 0), ([], 9))) + True """ r = len(elements) if n > r: diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py index 764d45dc2c05..5b78fa9e46d0 100644 --- a/dynamic_programming/viterbi.py +++ b/dynamic_programming/viterbi.py @@ -9,119 +9,102 @@ def viterbi( emission_probabilities: dict, ) -> list: """ - Viterbi Algorithm, to find the most likely path of - states from the start and the expected output. - https://en.wikipedia.org/wiki/Viterbi_algorithm - sdafads - Wikipedia example - >>> observations = ["normal", "cold", "dizzy"] - >>> states = ["Healthy", "Fever"] - >>> start_p = {"Healthy": 0.6, "Fever": 0.4} - >>> trans_p = { - ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, - ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, - ... } - >>> emit_p = { - ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, - ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, - ... } - >>> viterbi(observations, states, start_p, trans_p, emit_p) - ['Healthy', 'Healthy', 'Fever'] + Viterbi Algorithm, to find the most likely path of + states from the start and the expected output. - >>> viterbi((), states, start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, (), start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, states, {}, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, states, start_p, {}, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, states, start_p, trans_p, {}) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi("invalid", states, start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: observations_space must be a list + https://en.wikipedia.org/wiki/Viterbi_algorithm - >>> viterbi(["valid", 123], states, start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: observations_space must be a list of strings - - >>> viterbi(observations, "invalid", start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: states_space must be a list - - >>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: states_space must be a list of strings - - >>> viterbi(observations, states, "invalid", trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: initial_probabilities must be a dict - - >>> viterbi(observations, states, {2:2}, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: initial_probabilities all keys must be strings - - >>> viterbi(observations, states, {"a":2}, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: initial_probabilities all values must be float + Wikipedia example - >>> viterbi(observations, states, start_p, "invalid", emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities must be a dict - - >>> viterbi(observations, states, start_p, {"a":2}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities all values must be dict - - >>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities all keys must be strings - - >>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities all keys must be strings - - >>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities nested dictionary all values must be float - - >>> viterbi(observations, states, start_p, trans_p, "invalid") - Traceback (most recent call last): - ... - ValueError: emission_probabilities must be a dict - - >>> viterbi(observations, states, start_p, trans_p, None) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter + >>> observations = ["normal", "cold", "dizzy"] + >>> states = ["Healthy", "Fever"] + >>> start_p = {"Healthy": 0.6, "Fever": 0.4} + >>> trans_p = { + ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, + ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, + ... } + >>> emit_p = { + ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, + ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, + ... } + >>> viterbi(observations, states, start_p, trans_p, emit_p) + ['Healthy', 'Healthy', 'Fever'] + >>> viterbi((), states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, (), start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, states, {}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, states, start_p, {}, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, states, start_p, trans_p, {}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi("invalid", states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list + >>> viterbi(["valid", 123], states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list of strings + >>> viterbi(observations, "invalid", start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list + >>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list of strings + >>> viterbi(observations, states, "invalid", trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities must be a dict + >>> viterbi(observations, states, {2:2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all keys must be strings + >>> viterbi(observations, states, {"a":2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all values must be float + >>> viterbi(observations, states, start_p, "invalid", emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities must be a dict + >>> viterbi(observations, states, start_p, {"a":2}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all values must be dict + >>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + >>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + >>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities nested dictionary all values must be float + >>> viterbi(observations, states, start_p, trans_p, "invalid") + Traceback (most recent call last): + ... + ValueError: emission_probabilities must be a dict + >>> viterbi(observations, states, start_p, trans_p, None) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter """ _validation( @@ -213,7 +196,6 @@ def _validation( ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, ... } >>> _validation(observations, states, start_p, trans_p, emit_p) - >>> _validation([], states, start_p, trans_p, emit_p) Traceback (most recent call last): ... @@ -242,7 +224,6 @@ def _validate_not_empty( """ >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, ... {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) - >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, {}, {"f": {"g": 0.7}}) Traceback (most recent call last): ... @@ -267,12 +248,10 @@ def _validate_not_empty( def _validate_lists(observations_space: Any, states_space: Any) -> None: """ >>> _validate_lists(["a"], ["b"]) - >>> _validate_lists(1234, ["b"]) Traceback (most recent call last): ... ValueError: observations_space must be a list - >>> _validate_lists(["a"], [3]) Traceback (most recent call last): ... @@ -285,7 +264,6 @@ def _validate_lists(observations_space: Any, states_space: Any) -> None: def _validate_list(_object: Any, var_name: str) -> None: """ >>> _validate_list(["a"], "mock_name") - >>> _validate_list("a", "mock_name") Traceback (most recent call last): ... @@ -294,7 +272,6 @@ def _validate_list(_object: Any, var_name: str) -> None: Traceback (most recent call last): ... ValueError: mock_name must be a list of strings - """ if not isinstance(_object, list): msg = f"{var_name} must be a list" @@ -313,7 +290,6 @@ def _validate_dicts( ) -> None: """ >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) - >>> _validate_dicts("invalid", {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) Traceback (most recent call last): ... @@ -339,7 +315,6 @@ def _validate_dicts( def _validate_nested_dict(_object: Any, var_name: str) -> None: """ >>> _validate_nested_dict({"a":{"b": 0.5}}, "mock_name") - >>> _validate_nested_dict("invalid", "mock_name") Traceback (most recent call last): ... @@ -367,7 +342,6 @@ def _validate_dict( ) -> None: """ >>> _validate_dict({"b": 0.5}, "mock_name", float) - >>> _validate_dict("invalid", "mock_name", float) Traceback (most recent call last): ... From a2be5adf673c32720bbfb649c368af8fd7ae9dbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julia=20Arag=C3=A3o?= <101305675+juliaaragao@users.noreply.github.com> Date: Mon, 30 Dec 2024 13:36:55 +0100 Subject: [PATCH 186/260] Tests electronics/electric_conductivity.py #9943 (#12437) * Function conversion rectangular number to polar * #9943 : adding test to elelectronics/electric_conductivity.py * updating DIRECTORY.md * Apply suggestions from code review * updating DIRECTORY.md * Rename rec_to_pol.py to rectangular_to_polar.py * updating DIRECTORY.md * Update conversions/rectangular_to_polar.py * Update conversions/rectangular_to_polar.py --------- Co-authored-by: Julia Co-authored-by: juliaaragao Co-authored-by: Christian Clauss Co-authored-by: cclauss --- DIRECTORY.md | 1 + conversions/rectangular_to_polar.py | 32 ++++++++++++++++++++++++++++ electronics/electric_conductivity.py | 20 +++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 conversions/rectangular_to_polar.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d234d366df06..44d0414a37c8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -169,6 +169,7 @@ * [Prefix Conversions](conversions/prefix_conversions.py) * [Prefix Conversions String](conversions/prefix_conversions_string.py) * [Pressure Conversions](conversions/pressure_conversions.py) + * [Rectangular To Polar](conversions/rectangular_to_polar.py) * [Rgb Cmyk Conversion](conversions/rgb_cmyk_conversion.py) * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py) * [Roman Numerals](conversions/roman_numerals.py) diff --git a/conversions/rectangular_to_polar.py b/conversions/rectangular_to_polar.py new file mode 100644 index 000000000000..bed97d7410ec --- /dev/null +++ b/conversions/rectangular_to_polar.py @@ -0,0 +1,32 @@ +import math + + +def rectangular_to_polar(real: float, img: float) -> tuple[float, float]: + """ + https://en.wikipedia.org/wiki/Polar_coordinate_system + + >>> rectangular_to_polar(5,-5) + (7.07, -45.0) + >>> rectangular_to_polar(-1,1) + (1.41, 135.0) + >>> rectangular_to_polar(-1,-1) + (1.41, -135.0) + >>> rectangular_to_polar(1e-10,1e-10) + (0.0, 45.0) + >>> rectangular_to_polar(-1e-10,1e-10) + (0.0, 135.0) + >>> rectangular_to_polar(9.75,5.93) + (11.41, 31.31) + >>> rectangular_to_polar(10000,99999) + (100497.76, 84.29) + """ + + mod = round(math.sqrt((real**2) + (img**2)), 2) + ang = round(math.degrees(math.atan2(img, real)), 2) + return (mod, ang) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/electronics/electric_conductivity.py b/electronics/electric_conductivity.py index 11f2a607d214..65bb6c5ceaf0 100644 --- a/electronics/electric_conductivity.py +++ b/electronics/electric_conductivity.py @@ -21,6 +21,26 @@ def electric_conductivity( ('conductivity', 5.12672e-14) >>> electric_conductivity(conductivity=1000, electron_conc=0, mobility=1200) ('electron_conc', 5.201506356240767e+18) + >>> electric_conductivity(conductivity=-10, electron_conc=100, mobility=0) + Traceback (most recent call last): + ... + ValueError: Conductivity cannot be negative + >>> electric_conductivity(conductivity=50, electron_conc=-10, mobility=0) + Traceback (most recent call last): + ... + ValueError: Electron concentration cannot be negative + >>> electric_conductivity(conductivity=50, electron_conc=0, mobility=-10) + Traceback (most recent call last): + ... + ValueError: mobility cannot be negative + >>> electric_conductivity(conductivity=50, electron_conc=0, mobility=0) + Traceback (most recent call last): + ... + ValueError: You cannot supply more or less than 2 values + >>> electric_conductivity(conductivity=50, electron_conc=200, mobility=300) + Traceback (most recent call last): + ... + ValueError: You cannot supply more or less than 2 values """ if (conductivity, electron_conc, mobility).count(0) != 1: raise ValueError("You cannot supply more or less than 2 values") From f24ddba5b2600486f7c3a4c5807cf2aeed421870 Mon Sep 17 00:00:00 2001 From: Matej <83732219+IsxImattI@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:04:28 +0100 Subject: [PATCH 187/260] Implemented doctests for geometry-related classes (#12368) * Implemented doctests for geometry-related classes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed unused noqa directive * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactored sudoku_solver.py * refactored sudoku_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * context manager for file handling changed too in from_file function --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- data_structures/arrays/sudoku_solver.py | 5 +++-- geometry/geometry.py | 29 +++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 7e38e1465728..fd1a4f3e37b8 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -23,7 +23,7 @@ def cross(items_a, items_b): + [cross(rs, cs) for rs in ("ABC", "DEF", "GHI") for cs in ("123", "456", "789")] ) units = {s: [u for u in unitlist if s in u] for s in squares} -peers = {s: set(sum(units[s], [])) - {s} for s in squares} # noqa: RUF017 +peers = {s: {x for u in units[s] for x in u} - {s} for s in squares} def test(): @@ -172,7 +172,8 @@ def unitsolved(unit): def from_file(filename, sep="\n"): "Parse a file into a list of strings, separated by sep." - return open(filename).read().strip().split(sep) + with open(filename) as file: + return file.read().strip().split(sep) def random_puzzle(assignments=17): diff --git a/geometry/geometry.py b/geometry/geometry.py index 9e353dee17a7..a0be8eb3befc 100644 --- a/geometry/geometry.py +++ b/geometry/geometry.py @@ -48,6 +48,18 @@ class Side: Side(length=5, angle=Angle(degrees=45.6), next_side=None) >>> Side(5, Angle(45.6), Side(1, Angle(2))) # doctest: +ELLIPSIS Side(length=5, angle=Angle(degrees=45.6), next_side=Side(length=1, angle=Angle(d... + >>> Side(-1) + Traceback (most recent call last): + ... + TypeError: length must be a positive numeric value. + >>> Side(5, None) + Traceback (most recent call last): + ... + TypeError: angle must be an Angle object. + >>> Side(5, Angle(90), "Invalid next_side") + Traceback (most recent call last): + ... + TypeError: next_side must be a Side or None. """ length: float @@ -162,6 +174,19 @@ class Polygon: >>> Polygon() Polygon(sides=[]) + >>> polygon = Polygon() + >>> polygon.add_side(Side(5)).get_side(0) + Side(length=5, angle=Angle(degrees=90), next_side=None) + >>> polygon.get_side(1) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> polygon.set_side(0, Side(10)).get_side(0) + Side(length=10, angle=Angle(degrees=90), next_side=None) + >>> polygon.set_side(1, Side(10)) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range """ sides: list[Side] = field(default_factory=list) @@ -207,6 +232,10 @@ class Rectangle(Polygon): 30 >>> rectangle_one.area() 50 + >>> Rectangle(-5, 10) + Traceback (most recent call last): + ... + TypeError: length must be a positive numeric value. """ def __init__(self, short_side_length: float, long_side_length: float) -> None: From 77425364c87908bf061ad78b770ec840086b4efb Mon Sep 17 00:00:00 2001 From: SUDO_USER <110802232+AtharvMalusare@users.noreply.github.com> Date: Mon, 30 Dec 2024 20:42:04 +0530 Subject: [PATCH 188/260] Intensity_based_Segmentation (#12491) * Add files via upload * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update intensity-based_segmentation.py * Update and rename intensity-based_segmentation.py to intensity_based_segmentation.py * Update intensity_based_segmentation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [0, 1, 1]], dtype=int32) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../intensity_based_segmentation.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 computer_vision/intensity_based_segmentation.py diff --git a/computer_vision/intensity_based_segmentation.py b/computer_vision/intensity_based_segmentation.py new file mode 100644 index 000000000000..7f2b1141acc4 --- /dev/null +++ b/computer_vision/intensity_based_segmentation.py @@ -0,0 +1,62 @@ +# Source: "https://www.ijcse.com/docs/IJCSE11-02-03-117.pdf" + +# Importing necessary libraries +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image + + +def segment_image(image: np.ndarray, thresholds: list[int]) -> np.ndarray: + """ + Performs image segmentation based on intensity thresholds. + + Args: + image: Input grayscale image as a 2D array. + thresholds: Intensity thresholds to define segments. + + Returns: + A labeled 2D array where each region corresponds to a threshold range. + + Example: + >>> img = np.array([[80, 120, 180], [40, 90, 150], [20, 60, 100]]) + >>> segment_image(img, [50, 100, 150]) + array([[1, 2, 3], + [0, 1, 2], + [0, 1, 1]], dtype=int32) + """ + # Initialize segmented array with zeros + segmented = np.zeros_like(image, dtype=np.int32) + + # Assign labels based on thresholds + for i, threshold in enumerate(thresholds): + segmented[image > threshold] = i + 1 + + return segmented + + +if __name__ == "__main__": + # Load the image + image_path = "path_to_image" # Replace with your image path + original_image = Image.open(image_path).convert("L") + image_array = np.array(original_image) + + # Define thresholds + thresholds = [50, 100, 150, 200] + + # Perform segmentation + segmented_image = segment_image(image_array, thresholds) + + # Display the results + plt.figure(figsize=(10, 5)) + + plt.subplot(1, 2, 1) + plt.title("Original Image") + plt.imshow(image_array, cmap="gray") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.title("Segmented Image") + plt.imshow(segmented_image, cmap="tab20") + plt.axis("off") + + plt.show() From 75c5c411133f7e0f339c8d68c7c76c8054eb4249 Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:12:26 +0100 Subject: [PATCH 189/260] #9943 : Adding coverage test for basic_graphs.py (#12354) * #9943 : Adding coverage test for basic_graphs.py * #9943 : Adding coverage test for basic_graphs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Solve problem of line too long --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/basic_graphs.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 567fa65040ae..286e9b195796 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -77,6 +77,14 @@ def initialize_weighted_undirected_graph( def dfs(g, s): + """ + >>> dfs({1: [2, 3], 2: [4, 5], 3: [], 4: [], 5: []}, 1) + 1 + 2 + 4 + 5 + 3 + """ vis, _s = {s}, [s] print(s) while _s: @@ -104,6 +112,17 @@ def dfs(g, s): def bfs(g, s): + """ + >>> bfs({1: [2, 3], 2: [4, 5], 3: [6, 7], 4: [], 5: [8], 6: [], 7: [], 8: []}, 1) + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + """ vis, q = {s}, deque([s]) print(s) while q: @@ -128,6 +147,19 @@ def bfs(g, s): def dijk(g, s): + """ + dijk({1: [(2, 7), (3, 9), (6, 14)], + 2: [(1, 7), (3, 10), (4, 15)], + 3: [(1, 9), (2, 10), (4, 11), (6, 2)], + 4: [(2, 15), (3, 11), (5, 6)], + 5: [(4, 6), (6, 9)], + 6: [(1, 14), (3, 2), (5, 9)]}, 1) + 7 + 9 + 11 + 20 + 20 + """ dist, known, path = {s: 0}, set(), {s: 0} while True: if len(known) == len(g) - 1: From 7e55fb6474a06ecf0000fe11494fe5eefeeb54ab Mon Sep 17 00:00:00 2001 From: Jeffrey Yancey Date: Mon, 30 Dec 2024 12:00:30 -0700 Subject: [PATCH 190/260] - Implemented `find_lanczos_eigenvectors` to approximate the largest eigenvalues and corresponding eigenvectors of a graph based on its adjacency list. (#11906) - Utilized `lanczos_iteration` to construct tridiagonal matrices, optimized for large, sparse matrices. - Added `multiply_matrix_vector` for efficient matrix-vector multiplication using adjacency lists. - Included `validate_adjacency_list` for input validation. - Supports varied graph analysis applications, particularly for analyzing graph centrality. - Included type hints, comprehensive docstrings, and doctests. - PEP-8 compliant, with optimized handling of inputs and outputs. This module provides essential tools for eigenvalue-based graph analysis, ideal for centrality insights and structural assessments. --- graphs/lanczos_eigenvectors.py | 206 +++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 graphs/lanczos_eigenvectors.py diff --git a/graphs/lanczos_eigenvectors.py b/graphs/lanczos_eigenvectors.py new file mode 100644 index 000000000000..581a81a1127f --- /dev/null +++ b/graphs/lanczos_eigenvectors.py @@ -0,0 +1,206 @@ +""" +Lanczos Method for Finding Eigenvalues and Eigenvectors of a Graph. + +This module demonstrates the Lanczos method to approximate the largest eigenvalues +and corresponding eigenvectors of a symmetric matrix represented as a graph's +adjacency list. The method efficiently handles large, sparse matrices by converting +the graph to a tridiagonal matrix, whose eigenvalues and eigenvectors are then +computed. + +Key Functions: +- `find_lanczos_eigenvectors`: Computes the k largest eigenvalues and vectors. +- `lanczos_iteration`: Constructs the tridiagonal matrix and orthonormal basis vectors. +- `multiply_matrix_vector`: Multiplies an adjacency list graph with a vector. + +Complexity: +- Time: O(k * n), where k is the number of eigenvalues and n is the matrix size. +- Space: O(n), due to sparse representation and tridiagonal matrix structure. + +Further Reading: +- Lanczos Algorithm: https://en.wikipedia.org/wiki/Lanczos_algorithm +- Eigenvector Centrality: https://en.wikipedia.org/wiki/Eigenvector_centrality + +Example Usage: +Given a graph represented by an adjacency list, the `find_lanczos_eigenvectors` +function returns the largest eigenvalues and eigenvectors. This can be used to +analyze graph centrality. +""" + +import numpy as np + + +def validate_adjacency_list(graph: list[list[int | None]]) -> None: + """Validates the adjacency list format for the graph. + + Args: + graph: A list of lists where each sublist contains the neighbors of a node. + + Raises: + ValueError: If the graph is not a list of lists, or if any node has + invalid neighbors (e.g., out-of-range or non-integer values). + + >>> validate_adjacency_list([[1, 2], [0], [0, 1]]) + >>> validate_adjacency_list([[]]) # No neighbors, valid case + >>> validate_adjacency_list([[1], [2], [-1]]) # Invalid neighbor + Traceback (most recent call last): + ... + ValueError: Invalid neighbor -1 in node 2 adjacency list. + """ + if not isinstance(graph, list): + raise ValueError("Graph should be a list of lists.") + + for node_index, neighbors in enumerate(graph): + if not isinstance(neighbors, list): + no_neighbors_message: str = ( + f"Node {node_index} should have a list of neighbors." + ) + raise ValueError(no_neighbors_message) + for neighbor_index in neighbors: + if ( + not isinstance(neighbor_index, int) + or neighbor_index < 0 + or neighbor_index >= len(graph) + ): + invalid_neighbor_message: str = ( + f"Invalid neighbor {neighbor_index} in node {node_index} " + f"adjacency list." + ) + raise ValueError(invalid_neighbor_message) + + +def lanczos_iteration( + graph: list[list[int | None]], num_eigenvectors: int +) -> tuple[np.ndarray, np.ndarray]: + """Constructs the tridiagonal matrix and orthonormal basis vectors using the + Lanczos method. + + Args: + graph: The graph represented as a list of adjacency lists. + num_eigenvectors: The number of largest eigenvalues and eigenvectors + to approximate. + + Returns: + A tuple containing: + - tridiagonal_matrix: A (num_eigenvectors x num_eigenvectors) symmetric + matrix. + - orthonormal_basis: A (num_nodes x num_eigenvectors) matrix of orthonormal + basis vectors. + + Raises: + ValueError: If num_eigenvectors is less than 1 or greater than the number of + nodes. + + >>> graph = [[1, 2], [0, 2], [0, 1]] + >>> T, Q = lanczos_iteration(graph, 2) + >>> T.shape == (2, 2) and Q.shape == (3, 2) + True + """ + num_nodes: int = len(graph) + if not (1 <= num_eigenvectors <= num_nodes): + raise ValueError( + "Number of eigenvectors must be between 1 and the number of " + "nodes in the graph." + ) + + orthonormal_basis: np.ndarray = np.zeros((num_nodes, num_eigenvectors)) + tridiagonal_matrix: np.ndarray = np.zeros((num_eigenvectors, num_eigenvectors)) + + rng = np.random.default_rng() + initial_vector: np.ndarray = rng.random(num_nodes) + initial_vector /= np.sqrt(np.dot(initial_vector, initial_vector)) + orthonormal_basis[:, 0] = initial_vector + + prev_beta: float = 0.0 + for iter_index in range(num_eigenvectors): + result_vector: np.ndarray = multiply_matrix_vector( + graph, orthonormal_basis[:, iter_index] + ) + if iter_index > 0: + result_vector -= prev_beta * orthonormal_basis[:, iter_index - 1] + alpha_value: float = np.dot(orthonormal_basis[:, iter_index], result_vector) + result_vector -= alpha_value * orthonormal_basis[:, iter_index] + + prev_beta = np.sqrt(np.dot(result_vector, result_vector)) + if iter_index < num_eigenvectors - 1 and prev_beta > 1e-10: + orthonormal_basis[:, iter_index + 1] = result_vector / prev_beta + tridiagonal_matrix[iter_index, iter_index] = alpha_value + if iter_index < num_eigenvectors - 1: + tridiagonal_matrix[iter_index, iter_index + 1] = prev_beta + tridiagonal_matrix[iter_index + 1, iter_index] = prev_beta + return tridiagonal_matrix, orthonormal_basis + + +def multiply_matrix_vector( + graph: list[list[int | None]], vector: np.ndarray +) -> np.ndarray: + """Performs multiplication of a graph's adjacency list representation with a vector. + + Args: + graph: The adjacency list of the graph. + vector: A 1D numpy array representing the vector to multiply. + + Returns: + A numpy array representing the product of the adjacency list and the vector. + + Raises: + ValueError: If the vector's length does not match the number of nodes in the + graph. + + >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([1, 1, 1])) + array([2., 2., 2.]) + >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([0, 1, 0])) + array([1., 0., 1.]) + """ + num_nodes: int = len(graph) + if vector.shape[0] != num_nodes: + raise ValueError("Vector length must match the number of nodes in the graph.") + + result: np.ndarray = np.zeros(num_nodes) + for node_index, neighbors in enumerate(graph): + for neighbor_index in neighbors: + result[node_index] += vector[neighbor_index] + return result + + +def find_lanczos_eigenvectors( + graph: list[list[int | None]], num_eigenvectors: int +) -> tuple[np.ndarray, np.ndarray]: + """Computes the largest eigenvalues and their corresponding eigenvectors using the + Lanczos method. + + Args: + graph: The graph as a list of adjacency lists. + num_eigenvectors: Number of largest eigenvalues and eigenvectors to compute. + + Returns: + A tuple containing: + - eigenvalues: 1D array of the largest eigenvalues in descending order. + - eigenvectors: 2D array where each column is an eigenvector corresponding + to an eigenvalue. + + Raises: + ValueError: If the graph format is invalid or num_eigenvectors is out of bounds. + + >>> eigenvalues, eigenvectors = find_lanczos_eigenvectors( + ... [[1, 2], [0, 2], [0, 1]], 2 + ... ) + >>> len(eigenvalues) == 2 and eigenvectors.shape[1] == 2 + True + """ + validate_adjacency_list(graph) + tridiagonal_matrix, orthonormal_basis = lanczos_iteration(graph, num_eigenvectors) + eigenvalues, eigenvectors = np.linalg.eigh(tridiagonal_matrix) + return eigenvalues[::-1], np.dot(orthonormal_basis, eigenvectors[:, ::-1]) + + +def main() -> None: + """ + Main driver function for testing the implementation with doctests. + """ + import doctest + + doctest.testmod() + + +if __name__ == "__main__": + main() From 8921b56a8517cdc9455d764d7cddb10b2d7f2145 Mon Sep 17 00:00:00 2001 From: Melih Mehmet Sahin Date: Mon, 30 Dec 2024 19:53:50 +0000 Subject: [PATCH 191/260] Adding tests to monotonic_array.py (#12073) * Contributes to #9943 by adding tests to monotonic_array.py Addeded doctest in the if __name__. Checks for negaitves and an array of same integers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/monotonic_array.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/data_structures/arrays/monotonic_array.py b/data_structures/arrays/monotonic_array.py index c50a21530814..342d443a9cfc 100644 --- a/data_structures/arrays/monotonic_array.py +++ b/data_structures/arrays/monotonic_array.py @@ -9,6 +9,16 @@ def is_monotonic(nums: list[int]) -> bool: True >>> is_monotonic([1, 3, 2]) False + >>> is_monotonic([1,2,3,4,5,6,5]) + False + >>> is_monotonic([-3,-2,-1]) + True + >>> is_monotonic([-5,-6,-7]) + True + >>> is_monotonic([0,0,0]) + True + >>> is_monotonic([-100,0,100]) + True """ return all(nums[i] <= nums[i + 1] for i in range(len(nums) - 1)) or all( nums[i] >= nums[i + 1] for i in range(len(nums) - 1) @@ -21,3 +31,7 @@ def is_monotonic(nums: list[int]) -> bool: print(is_monotonic([1, 2, 2, 3])) # Output: True print(is_monotonic([6, 5, 4, 4])) # Output: True print(is_monotonic([1, 3, 2])) # Output: False + + import doctest + + doctest.testmod() From 5942059cb571b213e5ec82fe9b45e5a9bef4864b Mon Sep 17 00:00:00 2001 From: Giulio Tantaro Date: Mon, 30 Dec 2024 21:03:31 +0100 Subject: [PATCH 192/260] add doctest for quick_sort_3_partition (#11779) --- sorts/quick_sort_3_partition.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/sorts/quick_sort_3_partition.py b/sorts/quick_sort_3_partition.py index 1a6db6a364f0..279b9a68f5a6 100644 --- a/sorts/quick_sort_3_partition.py +++ b/sorts/quick_sort_3_partition.py @@ -1,4 +1,27 @@ def quick_sort_3partition(sorting: list, left: int, right: int) -> None: + """ " + Python implementation of quick sort algorithm with 3-way partition. + The idea of 3-way quick sort is based on "Dutch National Flag algorithm". + + :param sorting: sort list + :param left: left endpoint of sorting + :param right: right endpoint of sorting + :return: None + + Examples: + >>> array1 = [5, -1, -1, 5, 5, 24, 0] + >>> quick_sort_3partition(array1, 0, 6) + >>> array1 + [-1, -1, 0, 5, 5, 5, 24] + >>> array2 = [9, 0, 2, 6] + >>> quick_sort_3partition(array2, 0, 3) + >>> array2 + [0, 2, 6, 9] + >>> array3 = [] + >>> quick_sort_3partition(array3, 0, 0) + >>> array3 + [] + """ if right <= left: return a = i = left From 8767d1d72436b8aff89f9c11d045ad95bec02ba4 Mon Sep 17 00:00:00 2001 From: Rodrigo Castro Date: Mon, 30 Dec 2024 21:36:41 -0300 Subject: [PATCH 193/260] add some documentation for heap sort (#9949) * add some documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typing * Update heap_sort.py * Update heap_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/heap_sort.py | 47 +++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/sorts/heap_sort.py b/sorts/heap_sort.py index 4dca879bd89c..44ee1d4b39f1 100644 --- a/sorts/heap_sort.py +++ b/sorts/heap_sort.py @@ -1,17 +1,22 @@ """ -This is a pure Python implementation of the heap sort algorithm. - -For doctests run following command: -python -m doctest -v heap_sort.py -or -python3 -m doctest -v heap_sort.py - -For manual testing run: -python heap_sort.py +A pure Python implementation of the heap sort algorithm. """ -def heapify(unsorted, index, heap_size): +def heapify(unsorted: list[int], index: int, heap_size: int) -> None: + """ + :param unsorted: unsorted list containing integers numbers + :param index: index + :param heap_size: size of the heap + :return: None + >>> unsorted = [1, 4, 3, 5, 2] + >>> heapify(unsorted, 0, len(unsorted)) + >>> unsorted + [4, 5, 3, 1, 2] + >>> heapify(unsorted, 0, len(unsorted)) + >>> unsorted + [5, 4, 3, 1, 2] + """ largest = index left_index = 2 * index + 1 right_index = 2 * index + 2 @@ -22,26 +27,26 @@ def heapify(unsorted, index, heap_size): largest = right_index if largest != index: - unsorted[largest], unsorted[index] = unsorted[index], unsorted[largest] + unsorted[largest], unsorted[index] = (unsorted[index], unsorted[largest]) heapify(unsorted, largest, heap_size) -def heap_sort(unsorted): +def heap_sort(unsorted: list[int]) -> list[int]: """ - Pure implementation of the heap sort algorithm in Python - :param collection: some mutable ordered collection with heterogeneous - comparable items inside + A pure Python implementation of the heap sort algorithm + + :param collection: a mutable ordered collection of heterogeneous comparable items :return: the same collection ordered by ascending Examples: >>> heap_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] - >>> heap_sort([]) [] - >>> heap_sort([-2, -5, -45]) [-45, -5, -2] + >>> heap_sort([3, 7, 9, 28, 123, -5, 8, -30, -200, 0, 4]) + [-200, -30, -5, 0, 3, 4, 7, 8, 9, 28, 123] """ n = len(unsorted) for i in range(n // 2 - 1, -1, -1): @@ -53,6 +58,10 @@ def heap_sort(unsorted): if __name__ == "__main__": + import doctest + + doctest.testmod() user_input = input("Enter numbers separated by a comma:\n").strip() - unsorted = [int(item) for item in user_input.split(",")] - print(heap_sort(unsorted)) + if user_input: + unsorted = [int(item) for item in user_input.split(",")] + print(f"{heap_sort(unsorted) = }") From 8439fa8d1da94370250d153cd57f9bdcc382a062 Mon Sep 17 00:00:00 2001 From: Paarth Goyal <138299656+pluto-tofu@users.noreply.github.com> Date: Tue, 31 Dec 2024 06:17:41 +0530 Subject: [PATCH 194/260] Added the algorithm to compute the time period of a simple pendulum (#10265) * Added the algorithm to compute the time period of a simple pendulum * imported g form scipy and changed doctests accordingly * fixed formatting * applied all suggested changes from code review * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/period_of_pendulum.py | 53 +++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 physics/period_of_pendulum.py diff --git a/physics/period_of_pendulum.py b/physics/period_of_pendulum.py new file mode 100644 index 000000000000..2e3c7bc3ef1e --- /dev/null +++ b/physics/period_of_pendulum.py @@ -0,0 +1,53 @@ +""" +Title : Computing the time period of a simple pendulum + +The simple pendulum is a mechanical system that sways or moves in an +oscillatory motion. The simple pendulum comprises of a small bob of +mass m suspended by a thin string of length L and secured to a platform +at its upper end. Its motion occurs in a vertical plane and is mainly +driven by gravitational force. The period of the pendulum depends on the +length of the string and the amplitude (the maximum angle) of oscillation. +However, the effect of the amplitude can be ignored if the amplitude is +small. It should be noted that the period does not depend on the mass of +the bob. + +For small amplitudes, the period of a simple pendulum is given by the +following approximation: +T ≈ 2π * √(L / g) + +where: +L = length of string from which the bob is hanging (in m) +g = acceleration due to gravity (approx 9.8 m/s²) + +Reference : https://byjus.com/jee/simple-pendulum/ +""" + +from math import pi + +from scipy.constants import g + + +def period_of_pendulum(length: float) -> float: + """ + >>> period_of_pendulum(1.23) + 2.2252155506257845 + >>> period_of_pendulum(2.37) + 3.0888278441908574 + >>> period_of_pendulum(5.63) + 4.76073193364765 + >>> period_of_pendulum(-12) + Traceback (most recent call last): + ... + ValueError: The length should be non-negative + >>> period_of_pendulum(0) + 0.0 + """ + if length < 0: + raise ValueError("The length should be non-negative") + return 2 * pi * (length / g) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a4399022e516dfba1097f91cedc0b7f4213bab84 Mon Sep 17 00:00:00 2001 From: Julien Richard Date: Tue, 31 Dec 2024 02:11:29 +0100 Subject: [PATCH 195/260] chore: improve comments and add tests to trapezoidal rule (#11640) * chore: improve comments and add tests to trapezoidal rule * fix: too much characters in line * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * fix: change function name in calls * modify tests, changes numbers to remove coma * updating DIRECTORY.md * Fix doctest whitespace * Try to fix line length in doctest --------- Co-authored-by: Tianyi Zheng Co-authored-by: tianyizheng02 --- DIRECTORY.md | 3 ++ maths/trapezoidal_rule.py | 97 ++++++++++++++++++++++----------------- 2 files changed, 58 insertions(+), 42 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 44d0414a37c8..1248a290d294 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -142,6 +142,7 @@ * [Haralick Descriptors](computer_vision/haralick_descriptors.py) * [Harris Corner](computer_vision/harris_corner.py) * [Horn Schunck](computer_vision/horn_schunck.py) + * [Intensity Based Segmentation](computer_vision/intensity_based_segmentation.py) * [Mean Threshold](computer_vision/mean_threshold.py) * [Mosaic Augmentation](computer_vision/mosaic_augmentation.py) * [Pooling Functions](computer_vision/pooling_functions.py) @@ -507,6 +508,7 @@ * [Kahns Algorithm Long](graphs/kahns_algorithm_long.py) * [Kahns Algorithm Topo](graphs/kahns_algorithm_topo.py) * [Karger](graphs/karger.py) + * [Lanczos Eigenvectors](graphs/lanczos_eigenvectors.py) * [Markov Chain](graphs/markov_chain.py) * [Matching Min Vertex Cover](graphs/matching_min_vertex_cover.py) * [Minimum Path Sum](graphs/minimum_path_sum.py) @@ -886,6 +888,7 @@ * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) + * [Period Of Pendulum](physics/period_of_pendulum.py) * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) * [Rainfall Intensity](physics/rainfall_intensity.py) diff --git a/maths/trapezoidal_rule.py b/maths/trapezoidal_rule.py index 0186629ee378..21b10b239b5f 100644 --- a/maths/trapezoidal_rule.py +++ b/maths/trapezoidal_rule.py @@ -1,28 +1,25 @@ """ Numerical integration or quadrature for a smooth function f with known values at x_i - -This method is the classical approach of suming 'Equally Spaced Abscissas' - -method 1: -"extended trapezoidal rule" -int(f) = dx/2 * (f1 + 2f2 + ... + fn) - """ -def method_1(boundary, steps): +def trapezoidal_rule(boundary, steps): """ - Apply the extended trapezoidal rule to approximate the integral of function f(x) - over the interval defined by 'boundary' with the number of 'steps'. - - Args: - boundary (list of floats): A list containing the start and end values [a, b]. - steps (int): The number of steps or subintervals. - Returns: - float: Approximation of the integral of f(x) over [a, b]. - Examples: - >>> method_1([0, 1], 10) - 0.3349999999999999 + Implements the extended trapezoidal rule for numerical integration. + The function f(x) is provided below. + + :param boundary: List containing the lower and upper bounds of integration [a, b] + :param steps: The number of steps (intervals) used in the approximation + :return: The numerical approximation of the integral + + >>> abs(trapezoidal_rule([0, 1], 10) - 0.33333) < 0.01 + True + >>> abs(trapezoidal_rule([0, 1], 100) - 0.33333) < 0.01 + True + >>> abs(trapezoidal_rule([0, 2], 1000) - 2.66667) < 0.01 + True + >>> abs(trapezoidal_rule([1, 2], 1000) - 2.33333) < 0.01 + True """ h = (boundary[1] - boundary[0]) / steps a = boundary[0] @@ -31,7 +28,6 @@ def method_1(boundary, steps): y = 0.0 y += (h / 2.0) * f(a) for i in x_i: - # print(i) y += h * f(i) y += (h / 2.0) * f(b) return y @@ -39,49 +35,66 @@ def method_1(boundary, steps): def make_points(a, b, h): """ - Generates points between 'a' and 'b' with step size 'h', excluding the end points. - Args: - a (float): Start value - b (float): End value - h (float): Step size - Examples: + Generates points between a and b with step size h for trapezoidal integration. + + :param a: The lower bound of integration + :param b: The upper bound of integration + :param h: The step size + :yield: The next x-value in the range (a, b) + + >>> list(make_points(0, 1, 0.1)) # doctest: +NORMALIZE_WHITESPACE + [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.6, 0.7, 0.7999999999999999, \ + 0.8999999999999999] >>> list(make_points(0, 10, 2.5)) [2.5, 5.0, 7.5] - >>> list(make_points(0, 10, 2)) [2, 4, 6, 8] - >>> list(make_points(1, 21, 5)) [6, 11, 16] - >>> list(make_points(1, 5, 2)) [3] - >>> list(make_points(1, 4, 3)) [] """ x = a + h while x <= (b - h): yield x - x = x + h + x += h -def f(x): # enter your function here +def f(x): """ - Example: - >>> f(2) - 4 + This is the function to integrate, f(x) = (x - 0)^2 = x^2. + + :param x: The input value + :return: The value of f(x) + + >>> f(0) + 0 + >>> f(1) + 1 + >>> f(0.5) + 0.25 """ - y = (x - 0) * (x - 0) - return y + return x**2 def main(): - a = 0.0 # Lower bound of integration - b = 1.0 # Upper bound of integration - steps = 10.0 # define number of steps or resolution - boundary = [a, b] # define boundary of integration - y = method_1(boundary, steps) + """ + Main function to test the trapezoidal rule. + :a: Lower bound of integration + :b: Upper bound of integration + :steps: define number of steps or resolution + :boundary: define boundary of integration + + >>> main() + y = 0.3349999999999999 + """ + a = 0.0 + b = 1.0 + steps = 10.0 + boundary = [a, b] + y = trapezoidal_rule(boundary, steps) print(f"y = {y}") From 91a22c2e36477623b1f81518ff18c6f8617f81fb Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Tue, 31 Dec 2024 07:39:14 +0530 Subject: [PATCH 196/260] Create digital differential analyzer_line.py (#10929) * Create DDA_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename DDA_line_drawing.py to digital differential analyzer_line_drawing.py * Rename DDA_line_drawing.py to digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Tianyi Zheng * Update and rename digital_differential_analyzer_line_drawing.py to digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Fix doctest * Trigger GH workflows * Fix function call in main block --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../digital_differential_analyzer_line.py | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 graphics/digital_differential_analyzer_line.py diff --git a/graphics/digital_differential_analyzer_line.py b/graphics/digital_differential_analyzer_line.py new file mode 100644 index 000000000000..a51cb0b8dc37 --- /dev/null +++ b/graphics/digital_differential_analyzer_line.py @@ -0,0 +1,52 @@ +import matplotlib.pyplot as plt + + +def digital_differential_analyzer_line( + p1: tuple[int, int], p2: tuple[int, int] +) -> list[tuple[int, int]]: + """ + Draws a line between two points using the DDA algorithm. + + Args: + - p1: Coordinates of the starting point. + - p2: Coordinates of the ending point. + Returns: + - List of coordinate points that form the line. + + >>> digital_differential_analyzer_line((1, 1), (4, 4)) + [(2, 2), (3, 3), (4, 4)] + """ + x1, y1 = p1 + x2, y2 = p2 + dx = x2 - x1 + dy = y2 - y1 + steps = max(abs(dx), abs(dy)) + x_increment = dx / float(steps) + y_increment = dy / float(steps) + coordinates = [] + x: float = x1 + y: float = y1 + for _ in range(steps): + x += x_increment + y += y_increment + coordinates.append((int(round(x)), int(round(y)))) + return coordinates + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + x1 = int(input("Enter the x-coordinate of the starting point: ")) + y1 = int(input("Enter the y-coordinate of the starting point: ")) + x2 = int(input("Enter the x-coordinate of the ending point: ")) + y2 = int(input("Enter the y-coordinate of the ending point: ")) + coordinates = digital_differential_analyzer_line((x1, y1), (x2, y2)) + x_points, y_points = zip(*coordinates) + plt.plot(x_points, y_points, marker="o") + plt.title("Digital Differential Analyzer Line Drawing Algorithm") + plt.xlabel("X-axis") + plt.ylabel("Y-axis") + plt.grid() + plt.show() From 12b1023a9d97ca19be761c10129cba5509c9b450 Mon Sep 17 00:00:00 2001 From: Kaustubh Mani Tripathi <129510465+kmtGryffindor20@users.noreply.github.com> Date: Tue, 31 Dec 2024 07:46:32 +0530 Subject: [PATCH 197/260] [ADDED] Implementation of Geometric Mean. (#10421) * [ADDED] Implementation of Geometric Mean. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rectified type hints * Typo * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/geometric_mean.py | 55 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 maths/geometric_mean.py diff --git a/maths/geometric_mean.py b/maths/geometric_mean.py new file mode 100644 index 000000000000..240d519ad398 --- /dev/null +++ b/maths/geometric_mean.py @@ -0,0 +1,55 @@ +""" +The Geometric Mean of n numbers is defined as the n-th root of the product +of those numbers. It is used to measure the central tendency of the numbers. +https://en.wikipedia.org/wiki/Geometric_mean +""" + + +def compute_geometric_mean(*args: int) -> float: + """ + Return the geometric mean of the argument numbers. + >>> compute_geometric_mean(2,8) + 4.0 + >>> compute_geometric_mean('a', 4) + Traceback (most recent call last): + ... + TypeError: Not a Number + >>> compute_geometric_mean(5, 125) + 25.0 + >>> compute_geometric_mean(1, 0) + 0.0 + >>> compute_geometric_mean(1, 5, 25, 5) + 5.0 + >>> compute_geometric_mean(2, -2) + Traceback (most recent call last): + ... + ArithmeticError: Cannot Compute Geometric Mean for these numbers. + >>> compute_geometric_mean(-5, 25, 1) + -5.0 + """ + product = 1 + for number in args: + if not isinstance(number, int) and not isinstance(number, float): + raise TypeError("Not a Number") + product *= number + # Cannot calculate the even root for negative product. + # Frequently they are restricted to being positive. + if product < 0 and len(args) % 2 == 0: + raise ArithmeticError("Cannot Compute Geometric Mean for these numbers.") + mean = abs(product) ** (1 / len(args)) + # Since python calculates complex roots for negative products with odd roots. + if product < 0: + mean = -mean + # Since it does floating point arithmetic, it gives 64**(1/3) as 3.99999996 + possible_mean = float(round(mean)) + # To check if the rounded number is actually the mean. + if possible_mean ** len(args) == product: + mean = possible_mean + return mean + + +if __name__ == "__main__": + from doctest import testmod + + testmod(name="compute_geometric_mean") + print(compute_geometric_mean(-3, -27)) From bae33acf9008aa6c80351b3c68f492ba0c4a1352 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 21:12:13 +0100 Subject: [PATCH 198/260] [pre-commit.ci] pre-commit autoupdate (#12507) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.4 → v0.8.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.4...v0.8.6) - [github.com/pre-commit/mirrors-mypy: v1.14.0 → v1.14.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.14.0...v1.14.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 71ac72c29b5f..ec1dbca3a41c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.4 + rev: v0.8.6 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.14.0 + rev: v1.14.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 1248a290d294..3f0a5dbb140f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -462,6 +462,7 @@ ## Graphics * [Bezier Curve](graphics/bezier_curve.py) + * [Digital Differential Analyzer Line](graphics/digital_differential_analyzer_line.py) * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) ## Graphs @@ -663,6 +664,7 @@ * [Gamma](maths/gamma.py) * [Gaussian](maths/gaussian.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) + * [Geometric Mean](maths/geometric_mean.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) From b653aee627a95de423f1cad97f283de904271ff7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 12 Jan 2025 19:05:08 +0300 Subject: [PATCH 199/260] Fix ruff (#12515) * Empty commit * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * Fix * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 4 ++-- ciphers/{base64.py => base64_cipher.py} | 0 project_euler/problem_002/sol4.py | 2 +- strings/{wave.py => wave_string.py} | 0 4 files changed, 3 insertions(+), 3 deletions(-) rename ciphers/{base64.py => base64_cipher.py} (100%) rename strings/{wave.py => wave_string.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3f0a5dbb140f..aad6c72aa8ee 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -86,7 +86,7 @@ * [Baconian Cipher](ciphers/baconian_cipher.py) * [Base16](ciphers/base16.py) * [Base32](ciphers/base32.py) - * [Base64](ciphers/base64.py) + * [Base64 Cipher](ciphers/base64_cipher.py) * [Base85](ciphers/base85.py) * [Beaufort Cipher](ciphers/beaufort_cipher.py) * [Bifid](ciphers/bifid.py) @@ -1331,7 +1331,7 @@ * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) - * [Wave](strings/wave.py) + * [Wave String](strings/wave_string.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) * [Word Occurrence](strings/word_occurrence.py) * [Word Patterns](strings/word_patterns.py) diff --git a/ciphers/base64.py b/ciphers/base64_cipher.py similarity index 100% rename from ciphers/base64.py rename to ciphers/base64_cipher.py diff --git a/project_euler/problem_002/sol4.py b/project_euler/problem_002/sol4.py index 3a2e4fce341c..a13d34fd760e 100644 --- a/project_euler/problem_002/sol4.py +++ b/project_euler/problem_002/sol4.py @@ -61,7 +61,7 @@ def solution(n: int = 4000000) -> int: if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") getcontext().prec = 100 - phi = (Decimal(5) ** Decimal(0.5) + 1) / Decimal(2) + phi = (Decimal(5) ** Decimal("0.5") + 1) / Decimal(2) index = (math.floor(math.log(n * (phi + 2), phi) - 1) // 3) * 3 + 2 num = Decimal(round(phi ** Decimal(index + 1))) / (phi + 2) diff --git a/strings/wave.py b/strings/wave_string.py similarity index 100% rename from strings/wave.py rename to strings/wave_string.py From 4c92de5e03310811a376058e110db8d615769087 Mon Sep 17 00:00:00 2001 From: Sanjay Muthu Date: Mon, 13 Jan 2025 05:05:22 +0530 Subject: [PATCH 200/260] Fix dynamic_programming/longest_increasing_subsequence.py (#12517) * Fix #12510 * Added the doctest mentioned in the issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Grammer Mistake * Update longest_increasing_subsequence.py * Update longest_increasing_subsequence.py * Update longest_increasing_subsequence.py * Update longest_increasing_subsequence.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- dynamic_programming/longest_increasing_subsequence.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index d839757f6da5..1863a882c41e 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -24,8 +24,10 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu [10, 22, 33, 41, 60, 80] >>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9]) [1, 2, 3, 9] + >>> longest_subsequence([28, 26, 12, 23, 35, 39]) + [12, 23, 35, 39] >>> longest_subsequence([9, 8, 7, 6, 5, 7]) - [8] + [5, 7] >>> longest_subsequence([1, 1, 1]) [1, 1, 1] >>> longest_subsequence([]) @@ -44,7 +46,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu while not is_found and i < array_length: if array[i] < pivot: is_found = True - temp_array = [element for element in array[i:] if element >= array[i]] + temp_array = array[i:] temp_array = longest_subsequence(temp_array) if len(temp_array) > len(longest_subseq): longest_subseq = temp_array From 787aa5d3b59640b2d9161b56ca8fde763597efe4 Mon Sep 17 00:00:00 2001 From: Siddhant <87547498+Siddhant231xyz@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:54:23 -0500 Subject: [PATCH 201/260] doctest all_combinations.py (#12506) * doctest in all_combinations.py * added doctest in all_combinations.py * doctests in all_combinations.py * add doctest all_combinations.py * add --------- Co-authored-by: Siddhant Jain --- backtracking/all_combinations.py | 34 ++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index 390decf3a05b..1d15c6263e14 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -12,6 +12,8 @@ def combination_lists(n: int, k: int) -> list[list[int]]: """ + Generates all possible combinations of k numbers out of 1 ... n using itertools. + >>> combination_lists(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] """ @@ -20,6 +22,8 @@ def combination_lists(n: int, k: int) -> list[list[int]]: def generate_all_combinations(n: int, k: int) -> list[list[int]]: """ + Generates all possible combinations of k numbers out of 1 ... n using backtracking. + >>> generate_all_combinations(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] >>> generate_all_combinations(n=0, k=0) @@ -34,6 +38,14 @@ def generate_all_combinations(n: int, k: int) -> list[list[int]]: ValueError: n must not be negative >>> generate_all_combinations(n=5, k=4) [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]] + >>> generate_all_combinations(n=3, k=3) + [[1, 2, 3]] + >>> generate_all_combinations(n=3, k=1) + [[1], [2], [3]] + >>> generate_all_combinations(n=1, k=0) + [[]] + >>> generate_all_combinations(n=1, k=1) + [[1]] >>> from itertools import combinations >>> all(generate_all_combinations(n, k) == combination_lists(n, k) ... for n in range(1, 6) for k in range(1, 6)) @@ -56,6 +68,28 @@ def create_all_state( current_list: list[int], total_list: list[list[int]], ) -> None: + """ + Helper function to recursively build all combinations. + + >>> create_all_state(1, 4, 2, [], result := []) + >>> result + [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] + >>> create_all_state(1, 3, 3, [], result := []) + >>> result + [[1, 2, 3]] + >>> create_all_state(2, 2, 1, [1], result := []) + >>> result + [[1, 2]] + >>> create_all_state(1, 0, 0, [], result := []) + >>> result + [[]] + >>> create_all_state(1, 4, 0, [1, 2], result := []) + >>> result + [[1, 2]] + >>> create_all_state(5, 4, 2, [1, 2], result := []) + >>> result + [] + """ if level == 0: total_list.append(current_list[:]) return From cfcc84edf7d14cb56f52ba6fbd8c8deb2e9a7852 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 13 Jan 2025 23:49:07 +0300 Subject: [PATCH 202/260] Fix build (#12516) * Empty commit * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/current_stock_price.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index d0a65e9aac84..573e1f575c8e 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -15,7 +15,7 @@ def stock_price(symbol: str = "AAPL") -> str: """ >>> stock_price("EEEE") - '-' + '- ' >>> isinstance(float(stock_price("GOOG")),float) True """ @@ -24,12 +24,10 @@ def stock_price(symbol: str = "AAPL") -> str: url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 ).text soup = BeautifulSoup(yahoo_finance_source, "html.parser") - specific_fin_streamer_tag = soup.find("fin-streamer", {"data-testid": "qsp-price"}) - if specific_fin_streamer_tag: - text = specific_fin_streamer_tag.get_text() - return text - return "No tag with the specified data-test attribute found." + if specific_fin_streamer_tag := soup.find("span", {"data-testid": "qsp-price"}): + return specific_fin_streamer_tag.get_text() + return "No tag with the specified data-testid attribute found." # Search for the symbol at https://finance.yahoo.com/lookup From 4fe50bc1fcf82fceb61839bae314720c092c0692 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:52:12 +0100 Subject: [PATCH 203/260] [pre-commit.ci] pre-commit autoupdate -- ruff 2025 stable format (#12521) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.6 → v0.9.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.6...v0.9.1) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/dual_number_automatic_differentiation.py * Update maths/dual_number_automatic_differentiation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dual_number_automatic_differentiation.py * Update dual_number_automatic_differentiation.py * No tag with the specified data-test attribute found. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- ciphers/base64_cipher.py | 12 +++--- ciphers/caesar_cipher.py | 2 +- computer_vision/flip_augmentation.py | 2 +- computer_vision/mosaic_augmentation.py | 2 +- .../hashing/number_theory/prime_numbers.py | 6 +-- data_structures/heap/min_heap.py | 6 +-- data_structures/kd_tree/tests/test_kdtree.py | 12 +++--- .../suffix_tree/tests/test_suffix_tree.py | 24 +++++------ dynamic_programming/climbing_stairs.py | 6 +-- .../iterating_through_submasks.py | 6 +-- .../matrix_chain_multiplication.py | 2 +- .../linear_discriminant_analysis.py | 4 +- .../dual_number_automatic_differentiation.py | 6 +-- maths/max_sum_sliding_window.py | 4 +- .../integration_by_simpson_approx.py | 12 +++--- maths/prime_check.py | 12 +++--- maths/primelib.py | 42 +++++++++---------- matrix/matrix_based_game.py | 2 +- neural_network/input_data.py | 6 +-- scripts/validate_solutions.py | 6 +-- strings/jaro_winkler.py | 4 +- web_programming/fetch_anime_and_play.py | 4 +- 23 files changed, 93 insertions(+), 91 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ec1dbca3a41c..3b1dd9658d7f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.6 + rev: v0.9.1 hooks: - id: ruff - id: ruff-format diff --git a/ciphers/base64_cipher.py b/ciphers/base64_cipher.py index 2b950b1be37d..038d13963d95 100644 --- a/ciphers/base64_cipher.py +++ b/ciphers/base64_cipher.py @@ -105,13 +105,13 @@ def base64_decode(encoded_data: str) -> bytes: # Check if the encoded string contains non base64 characters if padding: - assert all( - char in B64_CHARSET for char in encoded_data[:-padding] - ), "Invalid base64 character(s) found." + assert all(char in B64_CHARSET for char in encoded_data[:-padding]), ( + "Invalid base64 character(s) found." + ) else: - assert all( - char in B64_CHARSET for char in encoded_data - ), "Invalid base64 character(s) found." + assert all(char in B64_CHARSET for char in encoded_data), ( + "Invalid base64 character(s) found." + ) # Check the padding assert len(encoded_data) % 4 == 0 and padding < 3, "Incorrect padding" diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index 9c096fe8a7da..1cf4d67cbaed 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -225,7 +225,7 @@ def brute_force(input_string: str, alphabet: str | None = None) -> dict[int, str if __name__ == "__main__": while True: - print(f'\n{"-" * 10}\n Menu\n{"-" * 10}') + print(f"\n{'-' * 10}\n Menu\n{'-' * 10}") print(*["1.Encrypt", "2.Decrypt", "3.BruteForce", "4.Quit"], sep="\n") # get user input diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py index 77a8cbd7b14f..7301424824df 100644 --- a/computer_vision/flip_augmentation.py +++ b/computer_vision/flip_augmentation.py @@ -33,7 +33,7 @@ def main() -> None: file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0] file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cv2.imwrite(f"{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85]) - print(f"Success {index+1}/{len(new_images)} with {file_name}") + print(f"Success {index + 1}/{len(new_images)} with {file_name}") annos_list = [] for anno in new_annos[index]: obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index cd923dfe095f..d881347121ea 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -41,7 +41,7 @@ def main() -> None: file_name = path.split(os.sep)[-1].rsplit(".", 1)[0] file_root = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cv2.imwrite(f"{file_root}.jpg", new_image, [cv2.IMWRITE_JPEG_QUALITY, 85]) - print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}") + print(f"Succeeded {index + 1}/{NUMBER_IMAGES} with {file_name}") annos_list = [] for anno in new_annos: width = anno[3] - anno[1] diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index 2549a1477b2b..82071b5e9f09 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -32,9 +32,9 @@ def is_prime(number: int) -> bool: """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and positive" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and positive" + ) if 1 < number < 4: # 2 and 3 are primes diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index ce7ed570a58d..577b98d788a1 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -124,9 +124,9 @@ def is_empty(self): return len(self.heap) == 0 def decrease_key(self, node, new_value): - assert ( - self.heap[self.idx_of_element[node]].val > new_value - ), "newValue must be less that current value" + assert self.heap[self.idx_of_element[node]].val > new_value, ( + "newValue must be less that current value" + ) node.val = new_value self.heap_dict[node.name] = new_value self.sift_up(self.idx_of_element[node]) diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py index dce5e4f34ff4..d6a4a66dd24d 100644 --- a/data_structures/kd_tree/tests/test_kdtree.py +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -48,14 +48,14 @@ def test_build_kdtree(num_points, cube_size, num_dimensions, depth, expected_res assert kdtree is not None, "Expected a KDNode, got None" # Check if root has correct dimensions - assert ( - len(kdtree.point) == num_dimensions - ), f"Expected point dimension {num_dimensions}, got {len(kdtree.point)}" + assert len(kdtree.point) == num_dimensions, ( + f"Expected point dimension {num_dimensions}, got {len(kdtree.point)}" + ) # Check that the tree is balanced to some extent (simplistic check) - assert isinstance( - kdtree, KDNode - ), f"Expected KDNode instance, got {type(kdtree)}" + assert isinstance(kdtree, KDNode), ( + f"Expected KDNode instance, got {type(kdtree)}" + ) def test_nearest_neighbour_search(): diff --git a/data_structures/suffix_tree/tests/test_suffix_tree.py b/data_structures/suffix_tree/tests/test_suffix_tree.py index 45c6790ac48a..c9dbe199d19d 100644 --- a/data_structures/suffix_tree/tests/test_suffix_tree.py +++ b/data_structures/suffix_tree/tests/test_suffix_tree.py @@ -22,18 +22,18 @@ def test_search_existing_patterns(self) -> None: patterns = ["ana", "ban", "na"] for pattern in patterns: with self.subTest(pattern=pattern): - assert self.suffix_tree.search( - pattern - ), f"Pattern '{pattern}' should be found." + assert self.suffix_tree.search(pattern), ( + f"Pattern '{pattern}' should be found." + ) def test_search_non_existing_patterns(self) -> None: """Test searching for patterns that do not exist in the suffix tree.""" patterns = ["xyz", "apple", "cat"] for pattern in patterns: with self.subTest(pattern=pattern): - assert not self.suffix_tree.search( - pattern - ), f"Pattern '{pattern}' should not be found." + assert not self.suffix_tree.search(pattern), ( + f"Pattern '{pattern}' should not be found." + ) def test_search_empty_pattern(self) -> None: """Test searching for an empty pattern.""" @@ -41,18 +41,18 @@ def test_search_empty_pattern(self) -> None: def test_search_full_text(self) -> None: """Test searching for the full text.""" - assert self.suffix_tree.search( - self.text - ), "The full text should be found in the suffix tree." + assert self.suffix_tree.search(self.text), ( + "The full text should be found in the suffix tree." + ) def test_search_substrings(self) -> None: """Test searching for substrings of the full text.""" substrings = ["ban", "ana", "a", "na"] for substring in substrings: with self.subTest(substring=substring): - assert self.suffix_tree.search( - substring - ), f"Substring '{substring}' should be found." + assert self.suffix_tree.search(substring), ( + f"Substring '{substring}' should be found." + ) if __name__ == "__main__": diff --git a/dynamic_programming/climbing_stairs.py b/dynamic_programming/climbing_stairs.py index d6273d025f08..38bdb427eedc 100644 --- a/dynamic_programming/climbing_stairs.py +++ b/dynamic_programming/climbing_stairs.py @@ -25,9 +25,9 @@ def climb_stairs(number_of_steps: int) -> int: ... AssertionError: number_of_steps needs to be positive integer, your input -7 """ - assert ( - isinstance(number_of_steps, int) and number_of_steps > 0 - ), f"number_of_steps needs to be positive integer, your input {number_of_steps}" + assert isinstance(number_of_steps, int) and number_of_steps > 0, ( + f"number_of_steps needs to be positive integer, your input {number_of_steps}" + ) if number_of_steps == 1: return 1 previous, current = 1, 1 diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index 372dd2c74a71..efab6dacff3f 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -37,9 +37,9 @@ def list_of_submasks(mask: int) -> list[int]: """ - assert ( - isinstance(mask, int) and mask > 0 - ), f"mask needs to be positive integer, your input {mask}" + assert isinstance(mask, int) and mask > 0, ( + f"mask needs to be positive integer, your input {mask}" + ) """ first submask iterated will be mask itself then operation will be performed diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py index 10e136b9f0db..4c0c771f9092 100644 --- a/dynamic_programming/matrix_chain_multiplication.py +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -134,7 +134,7 @@ def elapsed_time(msg: str) -> Iterator: start = perf_counter_ns() yield - print(f"Finished: {msg} in {(perf_counter_ns() - start) / 10 ** 9} seconds.") + print(f"Finished: {msg} in {(perf_counter_ns() - start) / 10**9} seconds.") if __name__ == "__main__": diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 86f28aef671a..8528ccbbae51 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -322,7 +322,7 @@ def main(): user_count = valid_input( input_type=int, condition=lambda x: x > 0, - input_msg=(f"Enter The number of instances for class_{i+1}: "), + input_msg=(f"Enter The number of instances for class_{i + 1}: "), err_msg="Number of instances should be positive!", ) counts.append(user_count) @@ -333,7 +333,7 @@ def main(): for a in range(n_classes): user_mean = valid_input( input_type=float, - input_msg=(f"Enter the value of mean for class_{a+1}: "), + input_msg=(f"Enter the value of mean for class_{a + 1}: "), err_msg="This is an invalid value.", ) user_means.append(user_mean) diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py index f98997c8be4d..09aeb17a4aea 100644 --- a/maths/dual_number_automatic_differentiation.py +++ b/maths/dual_number_automatic_differentiation.py @@ -17,10 +17,8 @@ def __init__(self, real, rank): self.duals = rank def __repr__(self): - return ( - f"{self.real}+" - f"{'+'.join(str(dual)+'E'+str(n+1)for n,dual in enumerate(self.duals))}" - ) + s = "+".join(f"{dual}E{n}" for n, dual in enumerate(self.duals, 1)) + return f"{self.real}+{s}" def reduce(self): cur = self.duals.copy() diff --git a/maths/max_sum_sliding_window.py b/maths/max_sum_sliding_window.py index 090117429604..c7492978a6c9 100644 --- a/maths/max_sum_sliding_window.py +++ b/maths/max_sum_sliding_window.py @@ -43,4 +43,6 @@ def max_sum_in_array(array: list[int], k: int) -> int: testmod() array = [randint(-1000, 1000) for i in range(100)] k = randint(0, 110) - print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}") + print( + f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array, k)}" + ) diff --git a/maths/numerical_analysis/integration_by_simpson_approx.py b/maths/numerical_analysis/integration_by_simpson_approx.py index 934299997aac..043f3a9a72af 100644 --- a/maths/numerical_analysis/integration_by_simpson_approx.py +++ b/maths/numerical_analysis/integration_by_simpson_approx.py @@ -88,18 +88,18 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo AssertionError: precision should be positive integer your input : -1 """ - assert callable( - function - ), f"the function(object) passed should be callable your input : {function}" + assert callable(function), ( + f"the function(object) passed should be callable your input : {function}" + ) assert isinstance(a, (float, int)), f"a should be float or integer your input : {a}" assert isinstance(function(a), (float, int)), ( "the function should return integer or float return type of your function, " f"{type(a)}" ) assert isinstance(b, (float, int)), f"b should be float or integer your input : {b}" - assert ( - isinstance(precision, int) and precision > 0 - ), f"precision should be positive integer your input : {precision}" + assert isinstance(precision, int) and precision > 0, ( + f"precision should be positive integer your input : {precision}" + ) # just applying the formula of simpson for approximate integration written in # mentioned article in first comment of this file and above this function diff --git a/maths/prime_check.py b/maths/prime_check.py index f1bc4def2469..a757c4108f24 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -73,12 +73,12 @@ def test_primes(self): def test_not_primes(self): with pytest.raises(ValueError): is_prime(-19) - assert not is_prime( - 0 - ), "Zero doesn't have any positive factors, primes must have exactly two." - assert not is_prime( - 1 - ), "One only has 1 positive factor, primes must have exactly two." + assert not is_prime(0), ( + "Zero doesn't have any positive factors, primes must have exactly two." + ) + assert not is_prime(1), ( + "One only has 1 positive factor, primes must have exactly two." + ) assert not is_prime(2 * 2) assert not is_prime(2 * 3) assert not is_prime(3 * 3) diff --git a/maths/primelib.py b/maths/primelib.py index a26b0eaeb328..3a966e5cd936 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -66,9 +66,9 @@ def is_prime(number: int) -> bool: """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and positive" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and positive" + ) status = True @@ -254,9 +254,9 @@ def greatest_prime_factor(number): """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and >= 0" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and >= 0" + ) ans = 0 @@ -296,9 +296,9 @@ def smallest_prime_factor(number): """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and >= 0" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and >= 0" + ) ans = 0 @@ -399,9 +399,9 @@ def goldbach(number): """ # precondition - assert ( - isinstance(number, int) and (number > 2) and is_even(number) - ), "'number' must been an int, even and > 2" + assert isinstance(number, int) and (number > 2) and is_even(number), ( + "'number' must been an int, even and > 2" + ) ans = [] # this list will returned @@ -525,9 +525,9 @@ def kg_v(number1, number2): done.append(n) # precondition - assert isinstance(ans, int) and ( - ans >= 0 - ), "'ans' must been from type int and positive" + assert isinstance(ans, int) and (ans >= 0), ( + "'ans' must been from type int and positive" + ) return ans @@ -574,9 +574,9 @@ def get_prime(n): ans += 1 # precondition - assert isinstance(ans, int) and is_prime( - ans - ), "'ans' must been a prime number and from type int" + assert isinstance(ans, int) and is_prime(ans), ( + "'ans' must been a prime number and from type int" + ) return ans @@ -705,9 +705,9 @@ def is_perfect_number(number): """ # precondition - assert isinstance(number, int) and ( - number > 1 - ), "'number' must been an int and >= 1" + assert isinstance(number, int) and (number > 1), ( + "'number' must been an int and >= 1" + ) divisors = get_divisors(number) diff --git a/matrix/matrix_based_game.py b/matrix/matrix_based_game.py index 1ff0cbe93435..6181086c6704 100644 --- a/matrix/matrix_based_game.py +++ b/matrix/matrix_based_game.py @@ -273,7 +273,7 @@ def process_game(size: int, matrix: list[str], moves: list[tuple[int, int]]) -> size = int(input("Enter the size of the matrix: ")) validate_matrix_size(size) print(f"Enter the {size} rows of the matrix:") - matrix = [input(f"Row {i+1}: ") for i in range(size)] + matrix = [input(f"Row {i + 1}: ") for i in range(size)] validate_matrix_content(matrix, size) moves_input = input("Enter the moves (e.g., '0 0, 1 1'): ") moves = parse_moves(moves_input) diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 72debabb566a..3a8628f939f8 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -160,9 +160,9 @@ def __init__( self._num_examples = 10000 self.one_hot = one_hot else: - assert ( - images.shape[0] == labels.shape[0] - ), f"images.shape: {images.shape} labels.shape: {labels.shape}" + assert images.shape[0] == labels.shape[0], ( + f"images.shape: {images.shape} labels.shape: {labels.shape}" + ) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 325c245e0d77..df5d01086bbe 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -94,6 +94,6 @@ def test_project_euler(solution_path: pathlib.Path) -> None: solution_module = convert_path_to_module(solution_path) answer = str(solution_module.solution()) answer = hashlib.sha256(answer.encode()).hexdigest() - assert ( - answer == expected - ), f"Expected solution to {problem_number} to have hash {expected}, got {answer}" + assert answer == expected, ( + f"Expected solution to {problem_number} to have hash {expected}, got {answer}" + ) diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index cae2068fabc1..0ce5d83b3c41 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -33,7 +33,9 @@ def get_matched_characters(_str1: str, _str2: str) -> str: right = int(min(i + limit + 1, len(_str2))) if char in _str2[left:right]: matched.append(char) - _str2 = f"{_str2[0:_str2.index(char)]} {_str2[_str2.index(char) + 1:]}" + _str2 = ( + f"{_str2[0 : _str2.index(char)]} {_str2[_str2.index(char) + 1 :]}" + ) return "".join(matched) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index fd7c3a3a7381..e56b7124eeb5 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -165,7 +165,7 @@ def get_anime_episode(episode_endpoint: str) -> list: print(f"Found {len(anime_list)} results: ") for i, anime in enumerate(anime_list): anime_title = anime["title"] - print(f"{i+1}. {anime_title}") + print(f"{i + 1}. {anime_title}") anime_choice = int(input("\nPlease choose from the following list: ").strip()) chosen_anime = anime_list[anime_choice - 1] @@ -177,7 +177,7 @@ def get_anime_episode(episode_endpoint: str) -> list: else: print(f"Found {len(episode_list)} results: ") for i, episode in enumerate(episode_list): - print(f"{i+1}. {episode['title']}") + print(f"{i + 1}. {episode['title']}") episode_choice = int(input("\nChoose an episode by serial no: ").strip()) chosen_episode = episode_list[episode_choice - 1] From f04d308431266759dce36265d8701dfb106932af Mon Sep 17 00:00:00 2001 From: Sanjay Muthu Date: Wed, 15 Jan 2025 02:19:04 +0530 Subject: [PATCH 204/260] Create longest_increasing_subsequence_iterative.py (#12524) * Create longest_increasing_subsequence_iterative.py * Update longest_increasing_subsequence_iterative.py * Update longest_increasing_subsequence_iterative.py --------- Co-authored-by: Maxim Smolskiy --- ...ongest_increasing_subsequence_iterative.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 dynamic_programming/longest_increasing_subsequence_iterative.py diff --git a/dynamic_programming/longest_increasing_subsequence_iterative.py b/dynamic_programming/longest_increasing_subsequence_iterative.py new file mode 100644 index 000000000000..665c86a35d2e --- /dev/null +++ b/dynamic_programming/longest_increasing_subsequence_iterative.py @@ -0,0 +1,72 @@ +""" +Author : Sanjay Muthu + +This is a pure Python implementation of Dynamic Programming solution to the longest +increasing subsequence of a given sequence. + +The problem is: + Given an array, to find the longest and increasing sub-array in that given array and + return it. + +Example: + ``[10, 22, 9, 33, 21, 50, 41, 60, 80]`` as input will return + ``[10, 22, 33, 50, 60, 80]`` as output +""" + +from __future__ import annotations + +import copy + + +def longest_subsequence(array: list[int]) -> list[int]: + """ + Some examples + + >>> longest_subsequence([10, 22, 9, 33, 21, 50, 41, 60, 80]) + [10, 22, 33, 50, 60, 80] + >>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9]) + [1, 2, 3, 9] + >>> longest_subsequence([9, 8, 7, 6, 5, 7]) + [7, 7] + >>> longest_subsequence([28, 26, 12, 23, 35, 39]) + [12, 23, 35, 39] + >>> longest_subsequence([1, 1, 1]) + [1, 1, 1] + >>> longest_subsequence([]) + [] + """ + n = len(array) + # The longest increasing subsequence ending at array[i] + longest_increasing_subsequence = [] + for i in range(n): + longest_increasing_subsequence.append([array[i]]) + + for i in range(1, n): + for prev in range(i): + # If array[prev] is less than or equal to array[i], then + # longest_increasing_subsequence[prev] + array[i] + # is a valid increasing subsequence + + # longest_increasing_subsequence[i] is only set to + # longest_increasing_subsequence[prev] + array[i] if the length is longer. + + if array[prev] <= array[i] and len( + longest_increasing_subsequence[prev] + ) + 1 > len(longest_increasing_subsequence[i]): + longest_increasing_subsequence[i] = copy.copy( + longest_increasing_subsequence[prev] + ) + longest_increasing_subsequence[i].append(array[i]) + + result: list[int] = [] + for i in range(n): + if len(longest_increasing_subsequence[i]) > len(result): + result = longest_increasing_subsequence[i] + + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0040ad47f928f299dadbb97c5cea00bc1daf8c75 Mon Sep 17 00:00:00 2001 From: aydinomer00 <109145643+aydinomer00@users.noreply.github.com> Date: Wed, 15 Jan 2025 00:24:36 +0300 Subject: [PATCH 205/260] Add butterfly pattern implementation (#12493) * Add butterfly pattern implementation * Add butterfly pattern implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add finalized butterfly pattern implementation and test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete graphics/test_butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- graphics/butterfly_pattern.py | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 graphics/butterfly_pattern.py diff --git a/graphics/butterfly_pattern.py b/graphics/butterfly_pattern.py new file mode 100644 index 000000000000..7913b03a7e95 --- /dev/null +++ b/graphics/butterfly_pattern.py @@ -0,0 +1,46 @@ +def butterfly_pattern(n: int) -> str: + """ + Creates a butterfly pattern of size n and returns it as a string. + + >>> print(butterfly_pattern(3)) + * * + ** ** + ***** + ** ** + * * + >>> print(butterfly_pattern(5)) + * * + ** ** + *** *** + **** **** + ********* + **** **** + *** *** + ** ** + * * + """ + result = [] + + # Upper part + for i in range(1, n): + left_stars = "*" * i + spaces = " " * (2 * (n - i) - 1) + right_stars = "*" * i + result.append(left_stars + spaces + right_stars) + + # Middle part + result.append("*" * (2 * n - 1)) + + # Lower part + for i in range(n - 1, 0, -1): + left_stars = "*" * i + spaces = " " * (2 * (n - i) - 1) + right_stars = "*" * i + result.append(left_stars + spaces + right_stars) + + return "\n".join(result) + + +if __name__ == "__main__": + n = int(input("Enter the size of the butterfly pattern: ")) + print(butterfly_pattern(n)) From 533767ff46bbcf5c594ff8196894ae2e8130bc3e Mon Sep 17 00:00:00 2001 From: Nguyen Thi Thanh Minh <140883075+minh-swinburne@users.noreply.github.com> Date: Sat, 18 Jan 2025 10:07:44 +0700 Subject: [PATCH 206/260] Doomsday Algorithm: Fix leap year check (#12396) * Fix leap year check Replace `!=` in `(year % 400) != 0` (line 49) with `==` Justification: Years that are divisible by 100 (centurian == 100) but not by 400 (year % 400 != 0) are skipped and NOT leap year. * Update parentheses Correct the parentheses to make clear the precedence of the conditional check * Update other/doomsday.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- other/doomsday.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/doomsday.py b/other/doomsday.py index d8fe261156a1..be3b18eeecaa 100644 --- a/other/doomsday.py +++ b/other/doomsday.py @@ -46,7 +46,7 @@ def get_week_day(year: int, month: int, day: int) -> str: ) % 7 day_anchor = ( DOOMSDAY_NOT_LEAP[month - 1] - if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) + if year % 4 != 0 or (centurian == 0 and year % 400 != 0) else DOOMSDAY_LEAP[month - 1] ) week_day = (dooms_day + day - day_anchor) % 7 From 91ebea1d99735ee2798b01ebcea0fc06e9a6af49 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 19 Jan 2025 08:33:35 +0100 Subject: [PATCH 207/260] Sphinx runs on ubuntu 24.04 arm (#12530) * Speed up our Sphinx GitHub Action with ARM # `runs-on: ubuntu-24.04-arm` https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources * updating DIRECTORY.md --------- Co-authored-by: cclauss --- .github/workflows/sphinx.yml | 2 +- DIRECTORY.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index d02435d98028..16ff284a74f2 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -23,7 +23,7 @@ concurrency: jobs: build_docs: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04-arm steps: - uses: actions/checkout@v4 - uses: astral-sh/setup-uv@v5 diff --git a/DIRECTORY.md b/DIRECTORY.md index aad6c72aa8ee..941e30dfe721 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -377,6 +377,7 @@ * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) + * [Longest Increasing Subsequence Iterative](dynamic_programming/longest_increasing_subsequence_iterative.py) * [Longest Increasing Subsequence O Nlogn](dynamic_programming/longest_increasing_subsequence_o_nlogn.py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) @@ -462,6 +463,7 @@ ## Graphics * [Bezier Curve](graphics/bezier_curve.py) + * [Butterfly Pattern](graphics/butterfly_pattern.py) * [Digital Differential Analyzer Line](graphics/digital_differential_analyzer_line.py) * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) From 1f74db0c06df7557e7ae3a17ebcc303f753f824e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:22:02 +0100 Subject: [PATCH 208/260] [pre-commit.ci] pre-commit autoupdate (#12536) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.1 → v0.9.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.1...v0.9.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3b1dd9658d7f..c4480f47faa1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.1 + rev: v0.9.2 hooks: - id: ruff - id: ruff-format From 9fb51b4169e0f7a4952e9eb460b91f4d7ffb819f Mon Sep 17 00:00:00 2001 From: Ronald Ngounou <74538524+ronaldngounou@users.noreply.github.com> Date: Thu, 23 Jan 2025 09:02:46 +0100 Subject: [PATCH 209/260] Update docstrings in the functions definitions. (#11797) --- data_structures/arrays/sudoku_solver.py | 61 +++++++++++++++++-------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index fd1a4f3e37b8..e1714e57ece8 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -9,7 +9,9 @@ def cross(items_a, items_b): - "Cross product of elements in A and elements in B." + """ + Cross product of elements in A and elements in B. + """ return [a + b for a in items_a for b in items_b] @@ -27,7 +29,7 @@ def cross(items_a, items_b): def test(): - "A set of unit tests." + """A set of unit tests.""" assert len(squares) == 81 assert len(unitlist) == 27 assert all(len(units[s]) == 3 for s in squares) @@ -47,8 +49,10 @@ def test(): def parse_grid(grid): - """Convert grid to a dict of possible values, {square: digits}, or - return False if a contradiction is detected.""" + """ + Convert grid to a dict of possible values, {square: digits}, or + return False if a contradiction is detected. + """ ## To start, every square can be any digit; then assign values from the grid. values = {s: digits for s in squares} for s, d in grid_values(grid).items(): @@ -58,15 +62,19 @@ def parse_grid(grid): def grid_values(grid): - "Convert grid into a dict of {square: char} with '0' or '.' for empties." + """ + Convert grid into a dict of {square: char} with '0' or '.' for empties. + """ chars = [c for c in grid if c in digits or c in "0."] assert len(chars) == 81 return dict(zip(squares, chars)) def assign(values, s, d): - """Eliminate all the other values (except d) from values[s] and propagate. - Return values, except return False if a contradiction is detected.""" + """ + Eliminate all the other values (except d) from values[s] and propagate. + Return values, except return False if a contradiction is detected. + """ other_values = values[s].replace(d, "") if all(eliminate(values, s, d2) for d2 in other_values): return values @@ -75,8 +83,10 @@ def assign(values, s, d): def eliminate(values, s, d): - """Eliminate d from values[s]; propagate when values or places <= 2. - Return values, except return False if a contradiction is detected.""" + """ + Eliminate d from values[s]; propagate when values or places <= 2. + Return values, except return False if a contradiction is detected. + """ if d not in values[s]: return values ## Already eliminated values[s] = values[s].replace(d, "") @@ -99,7 +109,9 @@ def eliminate(values, s, d): def display(values): - "Display these values as a 2-D grid." + """ + Display these values as a 2-D grid. + """ width = 1 + max(len(values[s]) for s in squares) line = "+".join(["-" * (width * 3)] * 3) for r in rows: @@ -114,11 +126,14 @@ def display(values): def solve(grid): + """ + Solve the grid. + """ return search(parse_grid(grid)) def some(seq): - "Return some element of seq that is true." + """Return some element of seq that is true.""" for e in seq: if e: return e @@ -126,7 +141,9 @@ def some(seq): def search(values): - "Using depth-first search and propagation, try all possible values." + """ + Using depth-first search and propagation, try all possible values. + """ if values is False: return False ## Failed earlier if all(len(values[s]) == 1 for s in squares): @@ -137,9 +154,11 @@ def search(values): def solve_all(grids, name="", showif=0.0): - """Attempt to solve a sequence of grids. Report results. + """ + Attempt to solve a sequence of grids. Report results. When showif is a number of seconds, display puzzles that take longer. - When showif is None, don't display any puzzles.""" + When showif is None, don't display any puzzles. + """ def time_solve(grid): start = time.monotonic() @@ -162,7 +181,9 @@ def time_solve(grid): def solved(values): - "A puzzle is solved if each unit is a permutation of the digits 1 to 9." + """ + A puzzle is solved if each unit is a permutation of the digits 1 to 9. + """ def unitsolved(unit): return {values[s] for s in unit} == set(digits) @@ -177,9 +198,11 @@ def from_file(filename, sep="\n"): def random_puzzle(assignments=17): - """Make a random puzzle with N or more assignments. Restart on contradictions. + """ + Make a random puzzle with N or more assignments. Restart on contradictions. Note the resulting puzzle is not guaranteed to be solvable, but empirically - about 99.8% of them are solvable. Some have multiple solutions.""" + about 99.8% of them are solvable. Some have multiple solutions. + """ values = {s: digits for s in squares} for s in shuffled(squares): if not assign(values, s, random.choice(values[s])): @@ -191,7 +214,9 @@ def random_puzzle(assignments=17): def shuffled(seq): - "Return a randomly shuffled copy of the input sequence." + """ + Return a randomly shuffled copy of the input sequence. + """ seq = list(seq) random.shuffle(seq) return seq From c666db3729b6d9f73e2f7756a3974f53279caa50 Mon Sep 17 00:00:00 2001 From: Vijayalaxmi Wakode Date: Fri, 24 Jan 2025 03:31:47 +0530 Subject: [PATCH 210/260] Add Doc test bubble sort (#12070) * The string manipulation - replace() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update replace.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updating DIRECTORY.md * Add doc test to bubble_sort * Update DIRECTORY.md * Delete strings/replace.py * Update bubble_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: vijayalaxmi777 Co-authored-by: Maxim Smolskiy --- sorts/bubble_sort.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sorts/bubble_sort.py b/sorts/bubble_sort.py index bdf85c70dd35..9ec3d5384f38 100644 --- a/sorts/bubble_sort.py +++ b/sorts/bubble_sort.py @@ -85,6 +85,8 @@ def bubble_sort_recursive(collection: list[Any]) -> list[Any]: [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] >>> bubble_sort_recursive([1, 3.3, 5, 7.7, 2, 4.4, 6]) [1, 2, 3.3, 4.4, 5, 6, 7.7] + >>> bubble_sort_recursive(['a', 'Z', 'B', 'C', 'A', 'c']) + ['A', 'B', 'C', 'Z', 'a', 'c'] >>> import random >>> collection_arg = random.sample(range(-50, 50), 100) >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) From 13e4d3e76cfaa74d8b14314d319fb6c089aa051e Mon Sep 17 00:00:00 2001 From: Rachel Spears <103690982+Rosepetal2022@users.noreply.github.com> Date: Thu, 23 Jan 2025 21:59:36 -0800 Subject: [PATCH 211/260] Fix error in avl_tree del_node function (#11510) * fixed error in del_node function * Update avl_tree.py --------- Co-authored-by: Maxim Smolskiy --- data_structures/binary_tree/avl_tree.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 9fca7237404c..8558305eefe4 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -221,6 +221,10 @@ def del_node(root: MyNode, data: Any) -> MyNode | None: else: root.set_right(del_node(right_child, data)) + # Re-fetch left_child and right_child references + left_child = root.get_left() + right_child = root.get_right() + if get_height(right_child) - get_height(left_child) == 2: assert right_child is not None if get_height(right_child.get_right()) > get_height(right_child.get_left()): From 6c92c5a539276d387b85eedc89be1f888962647d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 22:05:20 +0100 Subject: [PATCH 212/260] [pre-commit.ci] pre-commit autoupdate (#12542) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.2 → v0.9.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.2...v0.9.3) - [github.com/codespell-project/codespell: v2.3.0 → v2.4.0](https://github.com/codespell-project/codespell/compare/v2.3.0...v2.4.0) * Update trifid_cipher.py * Update pyproject.toml * Update trifid_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- ciphers/trifid_cipher.py | 4 ++-- pyproject.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c4480f47faa1..e34b563b05dd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,13 +16,13 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.2 + rev: v0.9.3 hooks: - id: ruff - id: ruff-format - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.0 hooks: - id: codespell additional_dependencies: diff --git a/ciphers/trifid_cipher.py b/ciphers/trifid_cipher.py index 9613cee0669d..13a47e9dd03b 100644 --- a/ciphers/trifid_cipher.py +++ b/ciphers/trifid_cipher.py @@ -88,7 +88,7 @@ def __prepare( ... KeyError: 'Length of alphabet has to be 27.' - Testing with punctuations that are not in the given alphabet + Testing with punctuation not in the given alphabet >>> __prepare('am i a boy?','abCdeFghijkLmnopqrStuVwxYZ+') Traceback (most recent call last): @@ -128,7 +128,7 @@ def encrypt_message( encrypt_message =============== - Encrypts a message using the trifid_cipher. Any punctuatuions that + Encrypts a message using the trifid_cipher. Any punctuatuion chars that would be used should be added to the alphabet. PARAMETERS diff --git a/pyproject.toml b/pyproject.toml index 7b7176705c44..2135f1f5825a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -159,7 +159,7 @@ lint.pylint.max-returns = 8 # default: 6 lint.pylint.max-statements = 88 # default: 50 [tool.codespell] -ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" +ignore-words-list = "3rt,abd,aer,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,*.lock,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" [tool.pytest.ini_options] From e59d819d091efdb30e385f4ecfe9ab5d36c3be71 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 20:47:41 +0100 Subject: [PATCH 213/260] [pre-commit.ci] pre-commit autoupdate (#12554) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.3 → v0.9.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.3...v0.9.4) - [github.com/codespell-project/codespell: v2.4.0 → v2.4.1](https://github.com/codespell-project/codespell/compare/v2.4.0...v2.4.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e34b563b05dd..d9477e216b96 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,13 +16,13 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.3 + rev: v0.9.4 hooks: - id: ruff - id: ruff-format - repo: https://github.com/codespell-project/codespell - rev: v2.4.0 + rev: v2.4.1 hooks: - id: codespell additional_dependencies: From 338cbafe0d5b07d57f83060ea0f9ba3a6c1155e7 Mon Sep 17 00:00:00 2001 From: lighting9999 <120090117+lighting9999@users.noreply.github.com> Date: Mon, 10 Feb 2025 01:51:18 +0800 Subject: [PATCH 214/260] Improve power.py (#12567) * Fix And Add power.py To fix the inaccuracies and allow handling of negative exponents and bases, the key issue lies in how negative numbers are handled in the power calculation, especially when dividing. ## Example Output: ```python >>> power(4, 6) 4096 >>> power(2, 3) 8 >>> power(-2, 3) -8 >>> power(2, -3) 0.125 >>> power(-2, -3) -0.125 ``` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- divide_and_conquer/power.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/divide_and_conquer/power.py b/divide_and_conquer/power.py index faf6a3476d40..492ee6dd12f0 100644 --- a/divide_and_conquer/power.py +++ b/divide_and_conquer/power.py @@ -1,4 +1,4 @@ -def actual_power(a: int, b: int): +def actual_power(a: int, b: int) -> int: """ Function using divide and conquer to calculate a^b. It only works for integer a,b. @@ -19,10 +19,12 @@ def actual_power(a: int, b: int): """ if b == 0: return 1 + half = actual_power(a, b // 2) + if (b % 2) == 0: - return actual_power(a, int(b / 2)) * actual_power(a, int(b / 2)) + return half * half else: - return a * actual_power(a, int(b / 2)) * actual_power(a, int(b / 2)) + return a * half * half def power(a: int, b: int) -> float: @@ -43,9 +45,9 @@ def power(a: int, b: int) -> float: -0.125 """ if b < 0: - return 1 / actual_power(a, b) + return 1 / actual_power(a, -b) return actual_power(a, b) if __name__ == "__main__": - print(power(-2, -3)) + print(power(-2, -3)) # output -0.125 From 738253e80030ffdd35ac57ff64cda816f85eda71 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 10:05:23 +0100 Subject: [PATCH 215/260] git mv data_structures/queue data_structures/queues (#12577) Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 18 +++++++++--------- data_structures/{queue => queues}/__init__.py | 0 .../{queue => queues}/circular_queue.py | 10 +++++++--- .../circular_queue_linked_list.py | 0 .../{queue => queues}/double_ended_queue.py | 0 .../{queue => queues}/linked_queue.py | 0 .../priority_queue_using_list.py | 6 +++--- .../{queue => queues}/queue_by_list.py | 0 .../{queue => queues}/queue_by_two_stacks.py | 0 .../{queue => queues}/queue_on_pseudo_stack.py | 0 11 files changed, 21 insertions(+), 17 deletions(-) rename data_structures/{queue => queues}/__init__.py (100%) rename data_structures/{queue => queues}/circular_queue.py (87%) rename data_structures/{queue => queues}/circular_queue_linked_list.py (100%) rename data_structures/{queue => queues}/double_ended_queue.py (100%) rename data_structures/{queue => queues}/linked_queue.py (100%) rename data_structures/{queue => queues}/priority_queue_using_list.py (96%) rename data_structures/{queue => queues}/queue_by_list.py (100%) rename data_structures/{queue => queues}/queue_by_two_stacks.py (100%) rename data_structures/{queue => queues}/queue_on_pseudo_stack.py (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d9477e216b96..a603109fd79f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.4 + rev: v0.9.6 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.14.1 + rev: v1.15.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 941e30dfe721..a535f12cb59a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -275,15 +275,15 @@ * [Singly Linked List](data_structures/linked_list/singly_linked_list.py) * [Skip List](data_structures/linked_list/skip_list.py) * [Swap Nodes](data_structures/linked_list/swap_nodes.py) - * Queue - * [Circular Queue](data_structures/queue/circular_queue.py) - * [Circular Queue Linked List](data_structures/queue/circular_queue_linked_list.py) - * [Double Ended Queue](data_structures/queue/double_ended_queue.py) - * [Linked Queue](data_structures/queue/linked_queue.py) - * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py) - * [Queue By List](data_structures/queue/queue_by_list.py) - * [Queue By Two Stacks](data_structures/queue/queue_by_two_stacks.py) - * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py) + * Queues + * [Circular Queue](data_structures/queues/circular_queue.py) + * [Circular Queue Linked List](data_structures/queues/circular_queue_linked_list.py) + * [Double Ended Queue](data_structures/queues/double_ended_queue.py) + * [Linked Queue](data_structures/queues/linked_queue.py) + * [Priority Queue Using List](data_structures/queues/priority_queue_using_list.py) + * [Queue By List](data_structures/queues/queue_by_list.py) + * [Queue By Two Stacks](data_structures/queues/queue_by_two_stacks.py) + * [Queue On Pseudo Stack](data_structures/queues/queue_on_pseudo_stack.py) * Stacks * [Balanced Parentheses](data_structures/stacks/balanced_parentheses.py) * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) diff --git a/data_structures/queue/__init__.py b/data_structures/queues/__init__.py similarity index 100% rename from data_structures/queue/__init__.py rename to data_structures/queues/__init__.py diff --git a/data_structures/queue/circular_queue.py b/data_structures/queues/circular_queue.py similarity index 87% rename from data_structures/queue/circular_queue.py rename to data_structures/queues/circular_queue.py index f2fb4c01e467..efbf1efdc42d 100644 --- a/data_structures/queue/circular_queue.py +++ b/data_structures/queues/circular_queue.py @@ -17,7 +17,9 @@ def __len__(self) -> int: >>> len(cq) 0 >>> cq.enqueue("A") # doctest: +ELLIPSIS - >> cq.array + ['A', None, None, None, None] >>> len(cq) 1 """ @@ -51,11 +53,13 @@ def enqueue(self, data): as an index. >>> cq = CircularQueue(5) >>> cq.enqueue("A") # doctest: +ELLIPSIS - >> (cq.size, cq.first()) (1, 'A') >>> cq.enqueue("B") # doctest: +ELLIPSIS - >> cq.array + ['A', 'B', None, None, None] >>> (cq.size, cq.first()) (2, 'A') """ diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queues/circular_queue_linked_list.py similarity index 100% rename from data_structures/queue/circular_queue_linked_list.py rename to data_structures/queues/circular_queue_linked_list.py diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queues/double_ended_queue.py similarity index 100% rename from data_structures/queue/double_ended_queue.py rename to data_structures/queues/double_ended_queue.py diff --git a/data_structures/queue/linked_queue.py b/data_structures/queues/linked_queue.py similarity index 100% rename from data_structures/queue/linked_queue.py rename to data_structures/queues/linked_queue.py diff --git a/data_structures/queue/priority_queue_using_list.py b/data_structures/queues/priority_queue_using_list.py similarity index 96% rename from data_structures/queue/priority_queue_using_list.py rename to data_structures/queues/priority_queue_using_list.py index f61b5e8e664d..15e56c557069 100644 --- a/data_structures/queue/priority_queue_using_list.py +++ b/data_structures/queues/priority_queue_using_list.py @@ -59,12 +59,12 @@ class FixedPriorityQueue: >>> fpq.dequeue() Traceback (most recent call last): ... - data_structures.queue.priority_queue_using_list.UnderFlowError: All queues are empty + data_structures.queues.priority_queue_using_list.UnderFlowError: All queues are empty >>> print(fpq) Priority 0: [] Priority 1: [] Priority 2: [] - """ + """ # noqa: E501 def __init__(self): self.queues = [ @@ -141,7 +141,7 @@ class ElementPriorityQueue: >>> epq.dequeue() Traceback (most recent call last): ... - data_structures.queue.priority_queue_using_list.UnderFlowError: The queue is empty + data_structures.queues.priority_queue_using_list.UnderFlowError: The queue is empty >>> print(epq) [] """ diff --git a/data_structures/queue/queue_by_list.py b/data_structures/queues/queue_by_list.py similarity index 100% rename from data_structures/queue/queue_by_list.py rename to data_structures/queues/queue_by_list.py diff --git a/data_structures/queue/queue_by_two_stacks.py b/data_structures/queues/queue_by_two_stacks.py similarity index 100% rename from data_structures/queue/queue_by_two_stacks.py rename to data_structures/queues/queue_by_two_stacks.py diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queues/queue_on_pseudo_stack.py similarity index 100% rename from data_structures/queue/queue_on_pseudo_stack.py rename to data_structures/queues/queue_on_pseudo_stack.py From a5aed92b4c20fd3e99c6e7a9202afcc9cf502883 Mon Sep 17 00:00:00 2001 From: Maxim Evtush <154841002+maximevtush@users.noreply.github.com> Date: Thu, 20 Feb 2025 21:09:01 +0100 Subject: [PATCH 216/260] fix: typo in data_structures/linked_list/from_sequence.py (#12584) --- data_structures/linked_list/from_sequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/linked_list/from_sequence.py b/data_structures/linked_list/from_sequence.py index 94b44f15037f..fa43f4d10e08 100644 --- a/data_structures/linked_list/from_sequence.py +++ b/data_structures/linked_list/from_sequence.py @@ -1,4 +1,4 @@ -# Recursive Prorgam to create a Linked List from a sequence and +# Recursive Program to create a Linked List from a sequence and # print a string representation of it. From 183fa06f40e80c6e86ceda6e7c7d23eaf91507ac Mon Sep 17 00:00:00 2001 From: sector <104625848+infrablue1@users.noreply.github.com> Date: Sat, 22 Feb 2025 16:16:29 +0800 Subject: [PATCH 217/260] Fix n-queens problem (#12583) * Fix n-queens problem * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update n_queens.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update n_queens.py * Update n_queens.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- backtracking/n_queens.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 81668b17a0ac..d10181f319b3 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -27,21 +27,28 @@ def is_safe(board: list[list[int]], row: int, column: int) -> bool: >>> is_safe([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) True + >>> is_safe([[0, 1, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + False >>> is_safe([[1, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) False + >>> is_safe([[0, 0, 1], [0, 0, 0], [0, 0, 0]], 1, 1) + False """ n = len(board) # Size of the board - # Check if there is any queen in the same row, column, - # left upper diagonal, and right upper diagonal + # Check if there is any queen in the same upper column, + # left upper diagonal and right upper diagonal return ( - all(board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, n))) + all(board[i][j] != 1 for i, j in zip(range(row), [column] * row)) + and all( + board[i][j] != 1 + for i, j in zip(range(row - 1, -1, -1), range(column - 1, -1, -1)) + ) and all( - board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, -1, -1)) + board[i][j] != 1 + for i, j in zip(range(row - 1, -1, -1), range(column + 1, n)) ) - and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, n))) - and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, -1, -1))) ) From 114d4283b98e52396e2460c802f18d45eeacd90c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 19:27:10 +0100 Subject: [PATCH 218/260] [pre-commit.ci] pre-commit autoupdate (#12591) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.6 → v0.9.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.6...v0.9.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a603109fd79f..8de90b11767f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.6 + rev: v0.9.7 hooks: - id: ruff - id: ruff-format From f528ce350b366ce40e0494fc94da65cfd4509c7d Mon Sep 17 00:00:00 2001 From: Sanjay Muthu Date: Thu, 27 Feb 2025 17:01:08 +0530 Subject: [PATCH 219/260] Added dynamic_programming/range_sum_query.py (#12592) * Create prefix_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix pre-commit and ruff errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename prefix_sum.py to range_sum_query.py * Refactor description * Fix * Refactor code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- dynamic_programming/range_sum_query.py | 92 ++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 dynamic_programming/range_sum_query.py diff --git a/dynamic_programming/range_sum_query.py b/dynamic_programming/range_sum_query.py new file mode 100644 index 000000000000..484fcf785fda --- /dev/null +++ b/dynamic_programming/range_sum_query.py @@ -0,0 +1,92 @@ +""" +Author: Sanjay Muthu + +This is an implementation of the Dynamic Programming solution to the Range Sum Query. + +The problem statement is: + Given an array and q queries, + each query stating you to find the sum of elements from l to r (inclusive) + +Example: + arr = [1, 4, 6, 2, 61, 12] + queries = 3 + l_1 = 2, r_1 = 5 + l_2 = 1, r_2 = 5 + l_3 = 3, r_3 = 4 + + as input will return + + [81, 85, 63] + + as output + +0-indexing: +NOTE: 0-indexing means the indexing of the array starts from 0 +Example: a = [1, 2, 3, 4, 5, 6] + Here, the 0th index of a is 1, + the 1st index of a is 2, + and so forth + +Time Complexity: O(N + Q) +* O(N) pre-calculation time to calculate the prefix sum array +* and O(1) time per each query = O(1 * Q) = O(Q) time + +Space Complexity: O(N) +* O(N) to store the prefix sum + +Algorithm: +So, first we calculate the prefix sum (dp) of the array. +The prefix sum of the index i is the sum of all elements indexed +from 0 to i (inclusive). +The prefix sum of the index i is the prefix sum of index (i - 1) + the current element. +So, the state of the dp is dp[i] = dp[i - 1] + a[i]. + +After we calculate the prefix sum, +for each query [l, r] +the answer is dp[r] - dp[l - 1] (we need to be careful because l might be 0). +For example take this array: + [4, 2, 1, 6, 3] +The prefix sum calculated for this array would be: + [4, 4 + 2, 4 + 2 + 1, 4 + 2 + 1 + 6, 4 + 2 + 1 + 6 + 3] + ==> [4, 6, 7, 13, 16] +If the query was l = 3, r = 4, +the answer would be 6 + 3 = 9 but this would require O(r - l + 1) time ≈ O(N) time + +If we use prefix sums we can find it in O(1) by using the formula +prefix[r] - prefix[l - 1]. +This formula works because prefix[r] is the sum of elements from [0, r] +and prefix[l - 1] is the sum of elements from [0, l - 1], +so if we do prefix[r] - prefix[l - 1] it will be +[0, r] - [0, l - 1] = [0, l - 1] + [l, r] - [0, l - 1] = [l, r] +""" + + +def prefix_sum(array: list[int], queries: list[tuple[int, int]]) -> list[int]: + """ + >>> prefix_sum([1, 4, 6, 2, 61, 12], [(2, 5), (1, 5), (3, 4)]) + [81, 85, 63] + >>> prefix_sum([4, 2, 1, 6, 3], [(3, 4), (1, 3), (0, 2)]) + [9, 9, 7] + """ + # The prefix sum array + dp = [0] * len(array) + dp[0] = array[0] + for i in range(1, len(array)): + dp[i] = dp[i - 1] + array[i] + + # See Algorithm section (Line 44) + result = [] + for query in queries: + left, right = query + res = dp[right] + if left > 0: + res -= dp[left - 1] + result.append(res) + + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 8826ad3a4d75f7a4e1d7b1a682682528c2c73672 Mon Sep 17 00:00:00 2001 From: PARIKSHIT SINGH <90330646+parikshit2111@users.noreply.github.com> Date: Sun, 2 Mar 2025 16:33:12 +0530 Subject: [PATCH 220/260] feat: Implement Principal Component Analysis (PCA) (#12596) - Added PCA implementation with dataset standardization. - Used Singular Value Decomposition (SVD) for computing principal components. - Fixed import sorting to comply with PEP 8 (Ruff I001). - Ensured type hints and docstrings for better readability. - Added doctests to validate correctness. - Passed all Ruff checks and automated tests. --- DIRECTORY.md | 2 + .../principle_component_analysis.py | 85 +++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 machine_learning/principle_component_analysis.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a535f12cb59a..ab3259b9a766 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -395,6 +395,7 @@ * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) + * [Range Sum Query](dynamic_programming/range_sum_query.py) * [Regex Match](dynamic_programming/regex_match.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Smith Waterman](dynamic_programming/smith_waterman.py) @@ -608,6 +609,7 @@ * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) + * [Principle Component Analysis](machine_learning/principle_component_analysis.py) * [Scoring Functions](machine_learning/scoring_functions.py) * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) diff --git a/machine_learning/principle_component_analysis.py b/machine_learning/principle_component_analysis.py new file mode 100644 index 000000000000..46ccdb968494 --- /dev/null +++ b/machine_learning/principle_component_analysis.py @@ -0,0 +1,85 @@ +""" +Principal Component Analysis (PCA) is a dimensionality reduction technique +used in machine learning. It transforms high-dimensional data into a lower-dimensional +representation while retaining as much variance as possible. + +This implementation follows best practices, including: +- Standardizing the dataset. +- Computing principal components using Singular Value Decomposition (SVD). +- Returning transformed data and explained variance ratio. +""" + +import doctest + +import numpy as np +from sklearn.datasets import load_iris +from sklearn.decomposition import PCA +from sklearn.preprocessing import StandardScaler + + +def collect_dataset() -> tuple[np.ndarray, np.ndarray]: + """ + Collects the dataset (Iris dataset) and returns feature matrix and target values. + + :return: Tuple containing feature matrix (X) and target labels (y) + + Example: + >>> X, y = collect_dataset() + >>> X.shape + (150, 4) + >>> y.shape + (150,) + """ + data = load_iris() + return np.array(data.data), np.array(data.target) + + +def apply_pca(data_x: np.ndarray, n_components: int) -> tuple[np.ndarray, np.ndarray]: + """ + Applies Principal Component Analysis (PCA) to reduce dimensionality. + + :param data_x: Original dataset (features) + :param n_components: Number of principal components to retain + :return: Tuple containing transformed dataset and explained variance ratio + + Example: + >>> X, _ = collect_dataset() + >>> transformed_X, variance = apply_pca(X, 2) + >>> transformed_X.shape + (150, 2) + >>> len(variance) == 2 + True + """ + # Standardizing the dataset + scaler = StandardScaler() + data_x_scaled = scaler.fit_transform(data_x) + + # Applying PCA + pca = PCA(n_components=n_components) + principal_components = pca.fit_transform(data_x_scaled) + + return principal_components, pca.explained_variance_ratio_ + + +def main() -> None: + """ + Driver function to execute PCA and display results. + """ + data_x, data_y = collect_dataset() + + # Number of principal components to retain + n_components = 2 + + # Apply PCA + transformed_data, variance_ratio = apply_pca(data_x, n_components) + + print("Transformed Dataset (First 5 rows):") + print(transformed_data[:5]) + + print("\nExplained Variance Ratio:") + print(variance_ratio) + + +if __name__ == "__main__": + doctest.testmod() + main() From fff34ed528a7c1af373aeae68693d67639ff616b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 19:10:41 +0100 Subject: [PATCH 221/260] [pre-commit.ci] pre-commit autoupdate (#12599) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.7 → v0.9.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.7...v0.9.9) - [github.com/tox-dev/pyproject-fmt: v2.5.0 → v2.5.1](https://github.com/tox-dev/pyproject-fmt/compare/v2.5.0...v2.5.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8de90b11767f..a0952928a775 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.7 + rev: v0.9.9 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "v2.5.0" + rev: "v2.5.1" hooks: - id: pyproject-fmt From a415a953c3f1bb741f14a4ba06f067e6d94653ed Mon Sep 17 00:00:00 2001 From: Ankana Pari <143877643+ankana2113@users.noreply.github.com> Date: Sun, 9 Mar 2025 03:05:07 +0530 Subject: [PATCH 222/260] Add largest rectangle histogram (#12269) * added ridge regression * added ridge regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added ridge regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ridge regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved conflicts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff and minor checks * minor chenges * minor checks * minor checks * minor changes * descriptive names * Fix ruff check in loss_functions.py * fixed pre-commit issues * added largest rectangle histogram function * added largest rectangle histogram function * Update frequent_pattern_growth.py * Update loss_functions.py * Delete machine_learning/ridge_regression/__init__.py * Delete machine_learning/ridge_regression/ADRvsRating.csv * Delete machine_learning/ridge_regression/ridge_regression.py * Delete machine_learning/ridge_regression/test_ridge_regression.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- .../stacks/largest_rectangle_histogram.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 data_structures/stacks/largest_rectangle_histogram.py diff --git a/data_structures/stacks/largest_rectangle_histogram.py b/data_structures/stacks/largest_rectangle_histogram.py new file mode 100644 index 000000000000..7575bd9f628d --- /dev/null +++ b/data_structures/stacks/largest_rectangle_histogram.py @@ -0,0 +1,39 @@ +def largest_rectangle_area(heights: list[int]) -> int: + """ + Inputs an array of integers representing the heights of bars, + and returns the area of the largest rectangle that can be formed + + >>> largest_rectangle_area([2, 1, 5, 6, 2, 3]) + 10 + + >>> largest_rectangle_area([2, 4]) + 4 + + >>> largest_rectangle_area([6, 2, 5, 4, 5, 1, 6]) + 12 + + >>> largest_rectangle_area([1]) + 1 + """ + stack: list[int] = [] + max_area = 0 + heights = [*heights, 0] # make a new list by appending the sentinel 0 + n = len(heights) + + for i in range(n): + # make sure the stack remains in increasing order + while stack and heights[i] < heights[stack[-1]]: + h = heights[stack.pop()] # height of the bar + # if stack is empty, it means entire width can be taken from index 0 to i-1 + w = i if not stack else i - stack[-1] - 1 # calculate width + max_area = max(max_area, h * w) + + stack.append(i) + + return max_area + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4fbd350b6e08aeb22741d694ce2e64182c66ac92 Mon Sep 17 00:00:00 2001 From: PAUL ADUTWUM Date: Sat, 8 Mar 2025 16:47:04 -0500 Subject: [PATCH 223/260] Improved test coverage in decimal_to_fraction.py (#12608) * Imporved test coverage in decimal_to_fraction.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update decimal_to_fraction.py * Update decimal_to_fraction.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/decimal_to_fraction.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py index 2aa8e3c3dfd6..7f1299b33c5c 100644 --- a/maths/decimal_to_fraction.py +++ b/maths/decimal_to_fraction.py @@ -16,6 +16,20 @@ def decimal_to_fraction(decimal: float | str) -> tuple[int, int]: >>> decimal_to_fraction("78td") Traceback (most recent call last): ValueError: Please enter a valid number + >>> decimal_to_fraction(0) + (0, 1) + >>> decimal_to_fraction(-2.5) + (-5, 2) + >>> decimal_to_fraction(0.125) + (1, 8) + >>> decimal_to_fraction(1000000.25) + (4000001, 4) + >>> decimal_to_fraction(1.3333) + (13333, 10000) + >>> decimal_to_fraction("1.23e2") + (123, 1) + >>> decimal_to_fraction("0.500") + (1, 2) """ try: decimal = float(decimal) From e3fb5309da98e2d07699ae39eb0a55836a063532 Mon Sep 17 00:00:00 2001 From: PAUL ADUTWUM Date: Sat, 8 Mar 2025 16:52:20 -0500 Subject: [PATCH 224/260] Improve decimal_to_fraction.py (#12611) * Update decimal_to_fraction.py * Update decimal_to_fraction.py --------- Co-authored-by: Maxim Smolskiy --- maths/decimal_to_fraction.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py index 7f1299b33c5c..be42b9fb3b5a 100644 --- a/maths/decimal_to_fraction.py +++ b/maths/decimal_to_fraction.py @@ -48,8 +48,8 @@ def decimal_to_fraction(decimal: float | str) -> tuple[int, int]: if remainder == 0: break dividend, divisor = divisor, remainder - numerator, denominator = numerator / divisor, denominator / divisor - return int(numerator), int(denominator) + numerator, denominator = numerator // divisor, denominator // divisor + return numerator, denominator if __name__ == "__main__": From 23eb17462940e20b830aacce5d2eb80113a7f973 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 08:51:28 +0100 Subject: [PATCH 225/260] [pre-commit.ci] pre-commit autoupdate (#12614) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.9 → v0.9.10](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.9...v0.9.10) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0952928a775..32580f8c7398 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.9 + rev: v0.9.10 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index ab3259b9a766..1c02c191bd14 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -289,6 +289,7 @@ * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) + * [Largest Rectangle Histogram](data_structures/stacks/largest_rectangle_histogram.py) * [Lexicographical Numbers](data_structures/stacks/lexicographical_numbers.py) * [Next Greater Element](data_structures/stacks/next_greater_element.py) * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) From 7ce998b91c45090bd9c4cdfac6ed0220497b4810 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 11 Mar 2025 17:29:13 +0300 Subject: [PATCH 226/260] Fix some RUF012 per file ignores (#11399) * updating DIRECTORY.md * Fix some RUF012 per file ignores * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Improve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Improve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/lfu_cache.py | 18 ++++++++++-------- other/lru_cache.py | 18 ++++++++++-------- pyproject.toml | 3 --- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 788fdf19bb60..5a143c739b9d 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -196,9 +196,6 @@ class LFUCache(Generic[T, U]): CacheInfo(hits=196, misses=100, capacity=100, current_size=100) """ - # class variable to map the decorator functions to their respective instance - decorator_function_to_instance_map: dict[Callable[[T], U], LFUCache[T, U]] = {} - def __init__(self, capacity: int): self.list: DoubleLinkedList[T, U] = DoubleLinkedList() self.capacity = capacity @@ -291,18 +288,23 @@ def decorator( """ def cache_decorator_inner(func: Callable[[T], U]) -> Callable[..., U]: + # variable to map the decorator functions to their respective instance + decorator_function_to_instance_map: dict[ + Callable[[T], U], LFUCache[T, U] + ] = {} + def cache_decorator_wrapper(*args: T) -> U: - if func not in cls.decorator_function_to_instance_map: - cls.decorator_function_to_instance_map[func] = LFUCache(size) + if func not in decorator_function_to_instance_map: + decorator_function_to_instance_map[func] = LFUCache(size) - result = cls.decorator_function_to_instance_map[func].get(args[0]) + result = decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].put(args[0], result) + decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LFUCache[T, U]: - return cls.decorator_function_to_instance_map[func] + return decorator_function_to_instance_map[func] setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 diff --git a/other/lru_cache.py b/other/lru_cache.py index 1e5eeac45b4e..4f0c843c86cc 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -209,9 +209,6 @@ class LRUCache(Generic[T, U]): CacheInfo(hits=194, misses=99, capacity=100, current size=99) """ - # class variable to map the decorator functions to their respective instance - decorator_function_to_instance_map: dict[Callable[[T], U], LRUCache[T, U]] = {} - def __init__(self, capacity: int): self.list: DoubleLinkedList[T, U] = DoubleLinkedList() self.capacity = capacity @@ -308,18 +305,23 @@ def decorator( """ def cache_decorator_inner(func: Callable[[T], U]) -> Callable[..., U]: + # variable to map the decorator functions to their respective instance + decorator_function_to_instance_map: dict[ + Callable[[T], U], LRUCache[T, U] + ] = {} + def cache_decorator_wrapper(*args: T) -> U: - if func not in cls.decorator_function_to_instance_map: - cls.decorator_function_to_instance_map[func] = LRUCache(size) + if func not in decorator_function_to_instance_map: + decorator_function_to_instance_map[func] = LRUCache(size) - result = cls.decorator_function_to_instance_map[func].get(args[0]) + result = decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].put(args[0], result) + decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LRUCache[T, U]: - return cls.decorator_function_to_instance_map[func] + return decorator_function_to_instance_map[func] setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 diff --git a/pyproject.toml b/pyproject.toml index 2135f1f5825a..4a76c4ad6d11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,9 +135,6 @@ lint.per-file-ignores."machine_learning/sequential_minimum_optimization.py" = [ lint.per-file-ignores."matrix/sherman_morrison.py" = [ "SIM103", ] -lint.per-file-ignores."other/l*u_cache.py" = [ - "RUF012", -] lint.per-file-ignores."physics/newtons_second_law_of_motion.py" = [ "BLE001", ] From edf7c372a9a6a3e01a33ef92021d958029e99319 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 09:53:49 +0100 Subject: [PATCH 227/260] [pre-commit.ci] pre-commit autoupdate (#12623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.10 → v0.11.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.10...v0.11.0) - [github.com/abravalheri/validate-pyproject: v0.23 → v0.24](https://github.com/abravalheri/validate-pyproject/compare/v0.23...v0.24) * Fix ruff issues --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- conversions/prefix_conversions_string.py | 4 ++-- data_structures/arrays/sudoku_solver.py | 4 ++-- graphics/digital_differential_analyzer_line.py | 2 +- graphs/minimum_spanning_tree_prims2.py | 4 ++-- hashes/enigma_machine.py | 4 ++-- linear_algebra/src/test_linear_algebra.py | 2 +- maths/primelib.py | 2 +- other/davis_putnam_logemann_loveland.py | 2 +- other/quine.py | 2 +- project_euler/problem_028/sol1.py | 2 +- pyproject.toml | 1 + scripts/validate_filenames.py | 17 +++++++---------- sorts/external_sort.py | 2 +- strings/frequency_finder.py | 2 +- 15 files changed, 26 insertions(+), 28 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 32580f8c7398..5deb66a5e5a2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.10 + rev: v0.11.0 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.23 + rev: v0.24 hooks: - id: validate-pyproject diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py index 9344c9672a1f..c5fef49874ca 100644 --- a/conversions/prefix_conversions_string.py +++ b/conversions/prefix_conversions_string.py @@ -53,7 +53,7 @@ class SIUnit(Enum): yocto = -24 @classmethod - def get_positive(cls: type[T]) -> dict: + def get_positive(cls) -> dict: """ Returns a dictionary with only the elements of this enum that has a positive value @@ -68,7 +68,7 @@ def get_positive(cls: type[T]) -> dict: return {unit.name: unit.value for unit in cls if unit.value > 0} @classmethod - def get_negative(cls: type[T]) -> dict: + def get_negative(cls) -> dict: """ Returns a dictionary with only the elements of this enum that has a negative value diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index e1714e57ece8..4c722f12fd6e 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -54,7 +54,7 @@ def parse_grid(grid): return False if a contradiction is detected. """ ## To start, every square can be any digit; then assign values from the grid. - values = {s: digits for s in squares} + values = dict.fromkeys(squares, digits) for s, d in grid_values(grid).items(): if d in digits and not assign(values, s, d): return False ## (Fail if we can't assign d to square s.) @@ -203,7 +203,7 @@ def random_puzzle(assignments=17): Note the resulting puzzle is not guaranteed to be solvable, but empirically about 99.8% of them are solvable. Some have multiple solutions. """ - values = {s: digits for s in squares} + values = dict.fromkeys(squares, digits) for s in shuffled(squares): if not assign(values, s, random.choice(values[s])): break diff --git a/graphics/digital_differential_analyzer_line.py b/graphics/digital_differential_analyzer_line.py index a51cb0b8dc37..f7269ab09856 100644 --- a/graphics/digital_differential_analyzer_line.py +++ b/graphics/digital_differential_analyzer_line.py @@ -29,7 +29,7 @@ def digital_differential_analyzer_line( for _ in range(steps): x += x_increment y += y_increment - coordinates.append((int(round(x)), int(round(y)))) + coordinates.append((round(x), round(y))) return coordinates diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index cc918f81dfe8..6870cc80f844 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -239,8 +239,8 @@ def prims_algo( 13 """ # prim's algorithm for minimum spanning tree - dist: dict[T, int] = {node: maxsize for node in graph.connections} - parent: dict[T, T | None] = {node: None for node in graph.connections} + dist: dict[T, int] = dict.fromkeys(graph.connections, maxsize) + parent: dict[T, T | None] = dict.fromkeys(graph.connections) priority_queue: MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index d95437d12c34..0da8e4113de9 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -15,12 +15,12 @@ def rotator(): gear_one.append(i) del gear_one[0] gear_one_pos += 1 - if gear_one_pos % int(len(alphabets)) == 0: + if gear_one_pos % len(alphabets) == 0: i = gear_two[0] gear_two.append(i) del gear_two[0] gear_two_pos += 1 - if gear_two_pos % int(len(alphabets)) == 0: + if gear_two_pos % len(alphabets) == 0: i = gear_three[0] gear_three.append(i) del gear_three[0] diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index fc5f90fd5cbe..5209c152013e 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -181,7 +181,7 @@ def test_component_matrix(self) -> None: test for Matrix method component() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - assert a.component(2, 1) == 7, 0.01 + assert a.component(2, 1) == 7, "0.01" def test__add__matrix(self) -> None: """ diff --git a/maths/primelib.py b/maths/primelib.py index 3a966e5cd936..9f031efc50a9 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -76,7 +76,7 @@ def is_prime(number: int) -> bool: if number <= 1: status = False - for divisor in range(2, int(round(sqrt(number))) + 1): + for divisor in range(2, round(sqrt(number)) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index e95bf371a817..7d0bcce15a29 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -36,7 +36,7 @@ def __init__(self, literals: list[str]) -> None: Represent the literals and an assignment in a clause." """ # Assign all literals to None initially - self.literals: dict[str, bool | None] = {literal: None for literal in literals} + self.literals: dict[str, bool | None] = dict.fromkeys(literals) def __str__(self) -> str: """ diff --git a/other/quine.py b/other/quine.py index 08e885bc1ce7..0fc78333fed1 100644 --- a/other/quine.py +++ b/other/quine.py @@ -1,5 +1,5 @@ #!/bin/python3 -# ruff: noqa +# ruff: noqa: PLC3002 """ Quine: diff --git a/project_euler/problem_028/sol1.py b/project_euler/problem_028/sol1.py index 1ea5d4fcafd4..0a4648af36c4 100644 --- a/project_euler/problem_028/sol1.py +++ b/project_euler/problem_028/sol1.py @@ -37,7 +37,7 @@ def solution(n: int = 1001) -> int: """ total = 1 - for i in range(1, int(ceil(n / 2.0))): + for i in range(1, ceil(n / 2.0)): odd = 2 * i + 1 even = 2 * i total = total + 4 * odd**2 - 6 * even diff --git a/pyproject.toml b/pyproject.toml index 4a76c4ad6d11..60f8d4ffc96f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -119,6 +119,7 @@ lint.ignore = [ "PT018", # Assertion should be broken down into multiple parts "S101", # Use of `assert` detected -- DO NOT FIX "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SIM905", # Consider using a list literal instead of `str.split` -- DO NOT FIX "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index e76b4dbfe288..80399673cced 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -9,28 +9,25 @@ filepaths = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" -upper_files = [file for file in filepaths if file != file.lower()] -if upper_files: +if upper_files := [file for file in filepaths if file != file.lower()]: print(f"{len(upper_files)} files contain uppercase characters:") print("\n".join(upper_files) + "\n") -space_files = [file for file in filepaths if " " in file] -if space_files: +if space_files := [file for file in filepaths if " " in file]: print(f"{len(space_files)} files contain space characters:") print("\n".join(space_files) + "\n") -hyphen_files = [file for file in filepaths if "-" in file] -if hyphen_files: +if hyphen_files := [ + file for file in filepaths if "-" in file and "/site-packages/" not in file +]: print(f"{len(hyphen_files)} files contain hyphen characters:") print("\n".join(hyphen_files) + "\n") -nodir_files = [file for file in filepaths if os.sep not in file] -if nodir_files: +if nodir_files := [file for file in filepaths if os.sep not in file]: print(f"{len(nodir_files)} files are not in a directory:") print("\n".join(nodir_files) + "\n") -bad_files = len(upper_files + space_files + hyphen_files + nodir_files) -if bad_files: +if bad_files := len(upper_files + space_files + hyphen_files + nodir_files): import sys sys.exit(bad_files) diff --git a/sorts/external_sort.py b/sorts/external_sort.py index 3fa7cacc0592..cfddee4fe7f8 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -61,7 +61,7 @@ def __init__(self, files): self.files = files self.empty = set() self.num_buffers = len(files) - self.buffers = {i: None for i in range(self.num_buffers)} + self.buffers = dict.fromkeys(range(self.num_buffers)) def get_dict(self): return { diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index e5afee891bd9..98720dc36d6e 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -36,7 +36,7 @@ def get_letter_count(message: str) -> dict[str, int]: - letter_count = {letter: 0 for letter in string.ascii_uppercase} + letter_count = dict.fromkeys(string.ascii_uppercase, 0) for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 From 580273eeca28c30a8a5da114800d21b89fdfb930 Mon Sep 17 00:00:00 2001 From: Pranjay kumar <110048711+pranjaykumar926@users.noreply.github.com> Date: Thu, 20 Mar 2025 05:03:46 +0530 Subject: [PATCH 228/260] Improve prefix_sum.py (#12560) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update prefix_sum.py Index Validation for get_sum Raises ValueError if start or end is out of range or start > end. Handles cases where the array is empty. ✅ Empty Array Support If an empty array is passed, get_sum raises an appropriate error instead of failing unexpectedly. ✅ Optimized contains_sum Initialization Initializes sums with {0} for efficient subarray sum checking. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update prefix_sum.py * Update prefix_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update prefix_sum.py * Update prefix_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- data_structures/arrays/prefix_sum.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/data_structures/arrays/prefix_sum.py b/data_structures/arrays/prefix_sum.py index 2243a5308937..717b5f9d7e7e 100644 --- a/data_structures/arrays/prefix_sum.py +++ b/data_structures/arrays/prefix_sum.py @@ -30,11 +30,29 @@ def get_sum(self, start: int, end: int) -> int: 5 >>> PrefixSum([1,2,3]).get_sum(2, 2) 3 + >>> PrefixSum([]).get_sum(0, 0) + Traceback (most recent call last): + ... + ValueError: The array is empty. + >>> PrefixSum([1,2,3]).get_sum(-1, 2) + Traceback (most recent call last): + ... + ValueError: Invalid range specified. >>> PrefixSum([1,2,3]).get_sum(2, 3) Traceback (most recent call last): ... - IndexError: list index out of range + ValueError: Invalid range specified. + >>> PrefixSum([1,2,3]).get_sum(2, 1) + Traceback (most recent call last): + ... + ValueError: Invalid range specified. """ + if not self.prefix_sum: + raise ValueError("The array is empty.") + + if start < 0 or end >= len(self.prefix_sum) or start > end: + raise ValueError("Invalid range specified.") + if start == 0: return self.prefix_sum[end] From e3773dbec1504de17047c4fe013c0f1aaef20b38 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 24 Mar 2025 20:05:41 +0100 Subject: [PATCH 229/260] [pre-commit.ci] pre-commit autoupdate (#12631) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.0 → v0.11.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.0...v0.11.2) - [github.com/abravalheri/validate-pyproject: v0.24 → v0.24.1](https://github.com/abravalheri/validate-pyproject/compare/v0.24...v0.24.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5deb66a5e5a2..0fc8b2b14e07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.0 + rev: v0.11.2 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.24 + rev: v0.24.1 hooks: - id: validate-pyproject From 74b540ad73bd3b1187ed6e3c89bb8f309ef543fd Mon Sep 17 00:00:00 2001 From: Tony Dang <62843153+Dang-Hoang-Tung@users.noreply.github.com> Date: Sat, 29 Mar 2025 08:13:47 +0000 Subject: [PATCH 230/260] Genetic Algorithm: Fix bug in multi-threading (#12644) * Fix bug in multi-threading - Multi-threading (despite being commented out) had a tiny bug: missing target argument (2nd argument). - Commented out code was also slightly hard to understand, added (Option 1/2) in comments to clarify where a user may choose between 2 implementations. * Update basic_string.py --------- Co-authored-by: Maxim Smolskiy --- genetic_algorithm/basic_string.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index a906ce85a779..b75491d9a949 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -144,18 +144,18 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, # Random population created. Now it's time to evaluate. - # Adding a bit of concurrency can make everything faster, + # (Option 1) Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: - # futures = {executor.submit(evaluate, item) for item in population} + # futures = {executor.submit(evaluate, item, target) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. - # We just need to call evaluate for every item inside the population. + # (Option 2) We just need to call evaluate for every item inside the population. population_score = [evaluate(item, target) for item in population] # Check if there is a matching evolution. From f10a5cbfccc5ee9ddb5ddd9906591ecaad58f672 Mon Sep 17 00:00:00 2001 From: Isidro Date: Mon, 31 Mar 2025 23:09:14 +0200 Subject: [PATCH 231/260] prefix_evaluation: Add alternative recursive implementation (#12646) * prefix_evaluation: Add alternative recursive implementation * improve doc * better variable name calc->operators * Update prefix_evaluation.py * Update prefix_evaluation.py * Update prefix_evaluation.py * Update prefix_evaluation.py --------- Co-authored-by: Maxim Smolskiy --- data_structures/stacks/prefix_evaluation.py | 39 +++++++++++++++++++-- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/data_structures/stacks/prefix_evaluation.py b/data_structures/stacks/prefix_evaluation.py index f48eca23d7b5..03a70d884725 100644 --- a/data_structures/stacks/prefix_evaluation.py +++ b/data_structures/stacks/prefix_evaluation.py @@ -1,8 +1,9 @@ """ -Python3 program to evaluate a prefix expression. +Program to evaluate a prefix expression. +https://en.wikipedia.org/wiki/Polish_notation """ -calc = { +operators = { "+": lambda x, y: x + y, "-": lambda x, y: x - y, "*": lambda x, y: x * y, @@ -31,6 +32,10 @@ def evaluate(expression): 21 >>> evaluate("/ * 10 2 + 4 1 ") 4.0 + >>> evaluate("2") + 2 + >>> evaluate("+ * 2 3 / 8 4") + 8.0 """ stack = [] @@ -45,11 +50,39 @@ def evaluate(expression): # push the result onto the stack again o1 = stack.pop() o2 = stack.pop() - stack.append(calc[c](o1, o2)) + stack.append(operators[c](o1, o2)) return stack.pop() +def evaluate_recursive(expression: list[str]): + """ + Alternative recursive implementation + + >>> evaluate_recursive(['2']) + 2 + >>> expression = ['+', '*', '2', '3', '/', '8', '4'] + >>> evaluate_recursive(expression) + 8.0 + >>> expression + [] + >>> evaluate_recursive(['+', '9', '*', '2', '6']) + 21 + >>> evaluate_recursive(['/', '*', '10', '2', '+', '4', '1']) + 4.0 + """ + + op = expression.pop(0) + if is_operand(op): + return int(op) + + operation = operators[op] + + a = evaluate_recursive(expression) + b = evaluate_recursive(expression) + return operation(a, b) + + # Driver code if __name__ == "__main__": test_expression = "+ 9 * 2 6" From baab802965c37fa1740054a559cad8c119b2ee35 Mon Sep 17 00:00:00 2001 From: Isidro Date: Tue, 1 Apr 2025 20:55:14 +0200 Subject: [PATCH 232/260] doubly linked list: add dataclass and typing (#12647) * Node is a dataclass * fix mypy errors * LinkedList is a dataclass * fix mypy errors * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py --------- Co-authored-by: Maxim Smolskiy --- .../linked_list/doubly_linked_list_two.py | 62 +++++++++---------- 1 file changed, 29 insertions(+), 33 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index e993cc5a20af..3d3bfb0cde30 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -9,25 +9,19 @@ Delete operation is more efficient """ +from dataclasses import dataclass +from typing import Self + +@dataclass class Node: - def __init__(self, data: int, previous=None, next_node=None): - self.data = data - self.previous = previous - self.next = next_node + data: int + previous: Self | None = None + next: Self | None = None def __str__(self) -> str: return f"{self.data}" - def get_data(self) -> int: - return self.data - - def get_next(self): - return self.next - - def get_previous(self): - return self.previous - class LinkedListIterator: def __init__(self, head): @@ -40,30 +34,30 @@ def __next__(self): if not self.current: raise StopIteration else: - value = self.current.get_data() - self.current = self.current.get_next() + value = self.current.data + self.current = self.current.next return value +@dataclass class LinkedList: - def __init__(self): - self.head = None # First node in list - self.tail = None # Last node in list + head: Node | None = None # First node in list + tail: Node | None = None # Last node in list def __str__(self): current = self.head nodes = [] while current is not None: - nodes.append(current.get_data()) - current = current.get_next() + nodes.append(current.data) + current = current.next return " ".join(str(node) for node in nodes) def __contains__(self, value: int): current = self.head while current: - if current.get_data() == value: + if current.data == value: return True - current = current.get_next() + current = current.next return False def __iter__(self): @@ -71,12 +65,12 @@ def __iter__(self): def get_head_data(self): if self.head: - return self.head.get_data() + return self.head.data return None def get_tail_data(self): if self.tail: - return self.tail.get_data() + return self.tail.data return None def set_head(self, node: Node) -> None: @@ -103,18 +97,20 @@ def insert_before_node(self, node: Node, node_to_insert: Node) -> None: node_to_insert.next = node node_to_insert.previous = node.previous - if node.get_previous() is None: + if node.previous is None: self.head = node_to_insert else: node.previous.next = node_to_insert node.previous = node_to_insert - def insert_after_node(self, node: Node, node_to_insert: Node) -> None: + def insert_after_node(self, node: Node | None, node_to_insert: Node) -> None: + assert node is not None + node_to_insert.previous = node node_to_insert.next = node.next - if node.get_next() is None: + if node.next is None: self.tail = node_to_insert else: node.next.previous = node_to_insert @@ -136,27 +132,27 @@ def insert_at_position(self, position: int, value: int) -> None: def get_node(self, item: int) -> Node: node = self.head while node: - if node.get_data() == item: + if node.data == item: return node - node = node.get_next() + node = node.next raise Exception("Node not found") def delete_value(self, value): if (node := self.get_node(value)) is not None: if node == self.head: - self.head = self.head.get_next() + self.head = self.head.next if node == self.tail: - self.tail = self.tail.get_previous() + self.tail = self.tail.previous self.remove_node_pointers(node) @staticmethod def remove_node_pointers(node: Node) -> None: - if node.get_next(): + if node.next: node.next.previous = node.previous - if node.get_previous(): + if node.previous: node.previous.next = node.next node.next = None From 0c8cf8e9871a5f91182d767adf173dccf87c2c0f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 2 Apr 2025 10:23:55 +0300 Subject: [PATCH 233/260] Fix bug for data_structures/linked_list/doubly_linked_list_two.py (#12651) * Fix bug for data_structures/linked_list/doubly_linked_list_two.py * Fix * Fix * Fix * Fix * Fix * Fix * Fix * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py --- .../linked_list/doubly_linked_list_two.py | 27 ++++++++++++++----- 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index 3d3bfb0cde30..8c93cddd5d31 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -81,8 +81,9 @@ def set_head(self, node: Node) -> None: self.insert_before_node(self.head, node) def set_tail(self, node: Node) -> None: - if self.head is None: - self.set_head(node) + if self.tail is None: + self.head = node + self.tail = node else: self.insert_after_node(self.tail, node) @@ -104,9 +105,7 @@ def insert_before_node(self, node: Node, node_to_insert: Node) -> None: node.previous = node_to_insert - def insert_after_node(self, node: Node | None, node_to_insert: Node) -> None: - assert node is not None - + def insert_after_node(self, node: Node, node_to_insert: Node) -> None: node_to_insert.previous = node node_to_insert.next = node.next @@ -127,7 +126,7 @@ def insert_at_position(self, position: int, value: int) -> None: return current_position += 1 node = node.next - self.insert_after_node(self.tail, new_node) + self.set_tail(new_node) def get_node(self, item: int) -> Node: node = self.head @@ -237,6 +236,22 @@ def create_linked_list() -> None: 7 8 9 + >>> linked_list = LinkedList() + >>> linked_list.insert_at_position(position=1, value=10) + >>> str(linked_list) + '10' + >>> linked_list.insert_at_position(position=2, value=20) + >>> str(linked_list) + '10 20' + >>> linked_list.insert_at_position(position=1, value=30) + >>> str(linked_list) + '30 10 20' + >>> linked_list.insert_at_position(position=3, value=40) + >>> str(linked_list) + '30 10 40 20' + >>> linked_list.insert_at_position(position=5, value=50) + >>> str(linked_list) + '30 10 40 20 50' """ From 5afe02994eb0aafb0b462fec32fe5f6ecedf7305 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 23:20:19 +0200 Subject: [PATCH 234/260] [pre-commit.ci] pre-commit autoupdate (#12661) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.2 → v0.11.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.2...v0.11.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0fc8b2b14e07..20065c433062 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.2 + rev: v0.11.4 hooks: - id: ruff - id: ruff-format From a4576dc2a42cbfc7585a7ce6f28917cb97f83c45 Mon Sep 17 00:00:00 2001 From: Kim Date: Wed, 9 Apr 2025 15:24:37 +0900 Subject: [PATCH 235/260] fix: correct typo "util" to "until" (#12653) --- dynamic_programming/bitmask.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py index a6e6a0cda7bf..4737a3419e8e 100644 --- a/dynamic_programming/bitmask.py +++ b/dynamic_programming/bitmask.py @@ -42,7 +42,7 @@ def count_ways_until(self, mask, task_no): return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement - total_ways_util = self.count_ways_until(mask, task_no + 1) + total_ways_until = self.count_ways_until(mask, task_no + 1) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. @@ -54,10 +54,10 @@ def count_ways_until(self, mask, task_no): # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. - total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1) + total_ways_until += self.count_ways_until(mask | (1 << p), task_no + 1) # save the value. - self.dp[mask][task_no] = total_ways_util + self.dp[mask][task_no] = total_ways_until return self.dp[mask][task_no] From 4ed61418a8fef5a0fe3c5a05a49c7cbc5ac8298c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 19:55:55 +0200 Subject: [PATCH 236/260] [pre-commit.ci] pre-commit autoupdate (#12671) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.4 → v0.11.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.4...v0.11.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 20065c433062..8a8697ca778a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.4 + rev: v0.11.5 hooks: - id: ruff - id: ruff-format From cc621f1fddac7391389270d7bc38326507b4b495 Mon Sep 17 00:00:00 2001 From: parth-6945 <140963864+parth-6945@users.noreply.github.com> Date: Mon, 14 Apr 2025 23:31:29 +0530 Subject: [PATCH 237/260] Add find_unique_number algorithm to bit manipulation (#12654) * Add find_unique_number algorithm to bit manipulation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- bit_manipulation/find_unique_number.py | 37 ++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 bit_manipulation/find_unique_number.py diff --git a/bit_manipulation/find_unique_number.py b/bit_manipulation/find_unique_number.py new file mode 100644 index 000000000000..77970b4865d1 --- /dev/null +++ b/bit_manipulation/find_unique_number.py @@ -0,0 +1,37 @@ +def find_unique_number(arr: list[int]) -> int: + """ + Given a list of integers where every element appears twice except for one, + this function returns the element that appears only once using bitwise XOR. + + >>> find_unique_number([1, 1, 2, 2, 3]) + 3 + >>> find_unique_number([4, 5, 4, 6, 6]) + 5 + >>> find_unique_number([7]) + 7 + >>> find_unique_number([10, 20, 10]) + 20 + >>> find_unique_number([]) + Traceback (most recent call last): + ... + ValueError: input list must not be empty + >>> find_unique_number([1, 'a', 1]) + Traceback (most recent call last): + ... + TypeError: all elements must be integers + """ + if not arr: + raise ValueError("input list must not be empty") + if not all(isinstance(x, int) for x in arr): + raise TypeError("all elements must be integers") + + result = 0 + for num in arr: + result ^= num + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d123cbc649e0777717b02dc74b4466872941a8d5 Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Tue, 15 Apr 2025 02:30:25 +0800 Subject: [PATCH 238/260] Solution for the Euler Project Problem 122 (#12655) * Add initial version for euler project problem 122. * Add doctests and documentation for the project euler problem 122. * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py --------- Co-authored-by: Maxim Smolskiy --- project_euler/problem_122/__init__.py | 0 project_euler/problem_122/sol1.py | 89 +++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 project_euler/problem_122/__init__.py create mode 100644 project_euler/problem_122/sol1.py diff --git a/project_euler/problem_122/__init__.py b/project_euler/problem_122/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_122/sol1.py b/project_euler/problem_122/sol1.py new file mode 100644 index 000000000000..cd8b1e67708c --- /dev/null +++ b/project_euler/problem_122/sol1.py @@ -0,0 +1,89 @@ +""" +Project Euler Problem 122: https://projecteuler.net/problem=122 + +Efficient Exponentiation + +The most naive way of computing n^15 requires fourteen multiplications: + + n x n x ... x n = n^15. + +But using a "binary" method you can compute it in six multiplications: + + n x n = n^2 + n^2 x n^2 = n^4 + n^4 x n^4 = n^8 + n^8 x n^4 = n^12 + n^12 x n^2 = n^14 + n^14 x n = n^15 + +However it is yet possible to compute it in only five multiplications: + + n x n = n^2 + n^2 x n = n^3 + n^3 x n^3 = n^6 + n^6 x n^6 = n^12 + n^12 x n^3 = n^15 + +We shall define m(k) to be the minimum number of multiplications to compute n^k; +for example m(15) = 5. + +Find sum_{k = 1}^200 m(k). + +It uses the fact that for rather small n, applicable for this problem, the solution +for each number can be formed by increasing the largest element. + +References: +- https://en.wikipedia.org/wiki/Addition_chain +""" + + +def solve(nums: list[int], goal: int, depth: int) -> bool: + """ + Checks if nums can have a sum equal to goal, given that length of nums does + not exceed depth. + + >>> solve([1], 2, 2) + True + >>> solve([1], 2, 0) + False + """ + if len(nums) > depth: + return False + for el in nums: + if el + nums[-1] == goal: + return True + nums.append(el + nums[-1]) + if solve(nums=nums, goal=goal, depth=depth): + return True + del nums[-1] + return False + + +def solution(n: int = 200) -> int: + """ + Calculates sum of smallest number of multiplactions for each number up to + and including n. + + >>> solution(1) + 0 + >>> solution(2) + 1 + >>> solution(14) + 45 + >>> solution(15) + 50 + """ + total = 0 + for i in range(2, n + 1): + max_length = 0 + while True: + nums = [1] + max_length += 1 + if solve(nums=nums, goal=i, depth=max_length): + break + total += max_length + return total + + +if __name__ == "__main__": + print(f"{solution() = }") From 42820634f3795e7d7397ad2e688d091a79c1eb83 Mon Sep 17 00:00:00 2001 From: Naitik Dwivedi Date: Tue, 15 Apr 2025 00:27:13 +0530 Subject: [PATCH 239/260] Add matrix inversion algorithm using NumPy (#12657) * Create matrix_inversion.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix_inversion.py * Update matrix_inversion.py * Update matrix_inversion.py * Update matrix_inversion.py * Update matrix_inversion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- linear_algebra/matrix_inversion.py | 36 ++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 linear_algebra/matrix_inversion.py diff --git a/linear_algebra/matrix_inversion.py b/linear_algebra/matrix_inversion.py new file mode 100644 index 000000000000..50dae1c2e825 --- /dev/null +++ b/linear_algebra/matrix_inversion.py @@ -0,0 +1,36 @@ +import numpy as np + + +def invert_matrix(matrix: list[list[float]]) -> list[list[float]]: + """ + Returns the inverse of a square matrix using NumPy. + + Parameters: + matrix (list[list[float]]): A square matrix. + + Returns: + list[list[float]]: Inverted matrix if invertible, else raises error. + + >>> invert_matrix([[4.0, 7.0], [2.0, 6.0]]) + [[0.6000000000000001, -0.7000000000000001], [-0.2, 0.4]] + >>> invert_matrix([[1.0, 2.0], [0.0, 0.0]]) + Traceback (most recent call last): + ... + ValueError: Matrix is not invertible + """ + np_matrix = np.array(matrix) + + try: + inv_matrix = np.linalg.inv(np_matrix) + except np.linalg.LinAlgError: + raise ValueError("Matrix is not invertible") + + return inv_matrix.tolist() + + +if __name__ == "__main__": + mat = [[4.0, 7.0], [2.0, 6.0]] + print("Original Matrix:") + print(mat) + print("Inverted Matrix:") + print(invert_matrix(mat)) From c585cb122718e219870ea7a2af110939b55e52f9 Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Fri, 18 Apr 2025 07:16:15 +0800 Subject: [PATCH 240/260] Solution for the Euler Project problem 136 (#12658) * Add initial version of file for the Euler project problem 136 solution. * Add documentation and tests for the Euler project problem 136 solution. * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_136/__init__.py | 0 project_euler/problem_136/sol1.py | 63 +++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 project_euler/problem_136/__init__.py create mode 100644 project_euler/problem_136/sol1.py diff --git a/project_euler/problem_136/__init__.py b/project_euler/problem_136/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_136/sol1.py b/project_euler/problem_136/sol1.py new file mode 100644 index 000000000000..688a9a5d7f24 --- /dev/null +++ b/project_euler/problem_136/sol1.py @@ -0,0 +1,63 @@ +""" +Project Euler Problem 136: https://projecteuler.net/problem=136 + +Singleton Difference + +The positive integers, x, y, and z, are consecutive terms of an arithmetic progression. +Given that n is a positive integer, the equation, x^2 - y^2 - z^2 = n, +has exactly one solution when n = 20: + 13^2 - 10^2 - 7^2 = 20. + +In fact there are twenty-five values of n below one hundred for which +the equation has a unique solution. + +How many values of n less than fifty million have exactly one solution? + +By change of variables + +x = y + delta +z = y - delta + +The expression can be rewritten: + +x^2 - y^2 - z^2 = y * (4 * delta - y) = n + +The algorithm loops over delta and y, which is restricted in upper and lower limits, +to count how many solutions each n has. +In the end it is counted how many n's have one solution. +""" + + +def solution(n_limit: int = 50 * 10**6) -> int: + """ + Define n count list and loop over delta, y to get the counts, then check + which n has count == 1. + + >>> solution(3) + 0 + >>> solution(10) + 3 + >>> solution(100) + 25 + >>> solution(110) + 27 + """ + n_sol = [0] * n_limit + + for delta in range(1, (n_limit + 1) // 4 + 1): + for y in range(4 * delta - 1, delta, -1): + n = y * (4 * delta - y) + if n >= n_limit: + break + n_sol[n] += 1 + + ans = 0 + for i in range(n_limit): + if n_sol[i] == 1: + ans += 1 + + return ans + + +if __name__ == "__main__": + print(f"{solution() = }") From a1aa6313e08657f0e9ae337afa81d6b6f95357c9 Mon Sep 17 00:00:00 2001 From: Samuel Willis <109305646+konsoleSam@users.noreply.github.com> Date: Thu, 17 Apr 2025 17:33:08 -0600 Subject: [PATCH 241/260] Adding time and a half pay calculator algorithm to financial folder (#12662) * Create time&half-pay.py * Update time&half-pay.py * Update time&half-pay.py * Rename time&half-pay.py to time_and_half_pay.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update time_and_half_pay.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update time_and_half_pay.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update time_and_half_pay.py * Update time_and_half_pay.py * Update time_and_half_pay.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- financial/time_and_half_pay.py | 40 ++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 financial/time_and_half_pay.py diff --git a/financial/time_and_half_pay.py b/financial/time_and_half_pay.py new file mode 100644 index 000000000000..c5dff1bc1ce1 --- /dev/null +++ b/financial/time_and_half_pay.py @@ -0,0 +1,40 @@ +""" +Calculate time and a half pay +""" + + +def pay(hours_worked: float, pay_rate: float, hours: float = 40) -> float: + """ + hours_worked = The total hours worked + pay_rate = Amount of money per hour + hours = Number of hours that must be worked before you receive time and a half + + >>> pay(41, 1) + 41.5 + >>> pay(65, 19) + 1472.5 + >>> pay(10, 1) + 10.0 + """ + # Check that all input parameters are float or integer + assert isinstance(hours_worked, (float, int)), ( + "Parameter 'hours_worked' must be of type 'int' or 'float'" + ) + assert isinstance(pay_rate, (float, int)), ( + "Parameter 'pay_rate' must be of type 'int' or 'float'" + ) + assert isinstance(hours, (float, int)), ( + "Parameter 'hours' must be of type 'int' or 'float'" + ) + + normal_pay = hours_worked * pay_rate + over_time = max(0, hours_worked - hours) + over_time_pay = over_time * pay_rate / 2 + return normal_pay + over_time_pay + + +if __name__ == "__main__": + # Test + import doctest + + doctest.testmod() From 9891d2bc3051ab4242ed041ea30edd10b94925bd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 21 Apr 2025 19:54:11 +0200 Subject: [PATCH 242/260] [pre-commit.ci] pre-commit autoupdate (#12680) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.5 → v0.11.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.5...v0.11.6) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a8697ca778a..8474deebb7ba 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.5 + rev: v0.11.6 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index 1c02c191bd14..fa731e32ff23 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -40,6 +40,7 @@ * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Excess 3 Code](bit_manipulation/excess_3_code.py) * [Find Previous Power Of Two](bit_manipulation/find_previous_power_of_two.py) + * [Find Unique Number](bit_manipulation/find_unique_number.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) @@ -442,6 +443,7 @@ * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) * [Simple Moving Average](financial/simple_moving_average.py) + * [Time And Half Pay](financial/time_and_half_pay.py) ## Fractals * [Julia Sets](fractals/julia_sets.py) @@ -570,6 +572,7 @@ * [Gaussian Elimination](linear_algebra/gaussian_elimination.py) * [Jacobi Iteration Method](linear_algebra/jacobi_iteration_method.py) * [Lu Decomposition](linear_algebra/lu_decomposition.py) + * [Matrix Inversion](linear_algebra/matrix_inversion.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting.py) @@ -1153,6 +1156,8 @@ * [Sol1](project_euler/problem_120/sol1.py) * Problem 121 * [Sol1](project_euler/problem_121/sol1.py) + * Problem 122 + * [Sol1](project_euler/problem_122/sol1.py) * Problem 123 * [Sol1](project_euler/problem_123/sol1.py) * Problem 125 @@ -1163,6 +1168,8 @@ * [Sol1](project_euler/problem_131/sol1.py) * Problem 135 * [Sol1](project_euler/problem_135/sol1.py) + * Problem 136 + * [Sol1](project_euler/problem_136/sol1.py) * Problem 144 * [Sol1](project_euler/problem_144/sol1.py) * Problem 145 From 11a61d15dc3aebc69f153adca8568076a25f7110 Mon Sep 17 00:00:00 2001 From: Isidro Date: Mon, 21 Apr 2025 21:04:39 +0200 Subject: [PATCH 243/260] Generic type hint in DDL (#12677) * Generic type hint in DDL Instead of forcing int * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py --------- Co-authored-by: Maxim Smolskiy --- .../linked_list/doubly_linked_list_two.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index 8c93cddd5d31..a7f639a6e289 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -10,12 +10,14 @@ """ from dataclasses import dataclass -from typing import Self +from typing import Self, TypeVar + +DataType = TypeVar("DataType") @dataclass -class Node: - data: int +class Node[DataType]: + data: DataType previous: Self | None = None next: Self | None = None @@ -52,7 +54,7 @@ def __str__(self): current = current.next return " ".join(str(node) for node in nodes) - def __contains__(self, value: int): + def __contains__(self, value: DataType): current = self.head while current: if current.data == value: @@ -87,7 +89,7 @@ def set_tail(self, node: Node) -> None: else: self.insert_after_node(self.tail, node) - def insert(self, value: int) -> None: + def insert(self, value: DataType) -> None: node = Node(value) if self.head is None: self.set_head(node) @@ -116,7 +118,7 @@ def insert_after_node(self, node: Node, node_to_insert: Node) -> None: node.next = node_to_insert - def insert_at_position(self, position: int, value: int) -> None: + def insert_at_position(self, position: int, value: DataType) -> None: current_position = 1 new_node = Node(value) node = self.head @@ -128,7 +130,7 @@ def insert_at_position(self, position: int, value: int) -> None: node = node.next self.set_tail(new_node) - def get_node(self, item: int) -> Node: + def get_node(self, item: DataType) -> Node: node = self.head while node: if node.data == item: From 29afed0df65fd84f53ad697ff1dbfc86c6e83631 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Apr 2025 08:30:42 +0200 Subject: [PATCH 244/260] Bump astral-sh/setup-uv from 5 to 6 (#12683) * Bump astral-sh/setup-uv from 5 to 6 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 5 to 6. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/v5...v6) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * uv run pytest --ignore=web_programming/fetch_anime_and_play.py --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 3 ++- .github/workflows/project_euler.yml | 4 ++-- .github/workflows/ruff.yml | 2 +- .github/workflows/sphinx.yml | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 62829b2b45a5..8b83cb41c79a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 + - uses: astral-sh/setup-uv@v6 with: enable-cache: true cache-dependency-glob: uv.lock @@ -30,6 +30,7 @@ jobs: --ignore=project_euler/ --ignore=quantum/q_fourier_transform.py --ignore=scripts/validate_solutions.py + --ignore=web_programming/fetch_anime_and_play.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 8d51ad8850cf..eaf4150e4eaa 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 + - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v5 with: python-version: 3.x @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 + - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v5 with: python-version: 3.x diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index cfe127b3521f..ec9f0202bd7e 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -12,5 +12,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 + - uses: astral-sh/setup-uv@v6 - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index 16ff284a74f2..2010041d80c5 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-24.04-arm steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 + - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v5 with: python-version: 3.13 From 0a3a96534767c37a0f1561d0f5148b5d1d5e0272 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 28 Apr 2025 19:55:57 +0200 Subject: [PATCH 245/260] [pre-commit.ci] pre-commit autoupdate (#12692) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.6 → v0.11.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.6...v0.11.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8474deebb7ba..034493b10912 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.6 + rev: v0.11.7 hooks: - id: ruff - id: ruff-format From 145879b8b2546c74fc51446ac607823876a0f601 Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Mon, 5 May 2025 15:00:32 +0800 Subject: [PATCH 246/260] Add solution for the Euler project problem 164. (#12663) * Add solution for the Euler project problem 164. * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_164/__init__.py | 0 project_euler/problem_164/sol1.py | 65 +++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 project_euler/problem_164/__init__.py create mode 100644 project_euler/problem_164/sol1.py diff --git a/project_euler/problem_164/__init__.py b/project_euler/problem_164/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_164/sol1.py b/project_euler/problem_164/sol1.py new file mode 100644 index 000000000000..5387c89bd757 --- /dev/null +++ b/project_euler/problem_164/sol1.py @@ -0,0 +1,65 @@ +""" +Project Euler Problem 164: https://projecteuler.net/problem=164 + +Three Consecutive Digital Sum Limit + +How many 20 digit numbers n (without any leading zero) exist such that no three +consecutive digits of n have a sum greater than 9? + +Brute-force recursive solution with caching of intermediate results. +""" + + +def solve( + digit: int, prev1: int, prev2: int, sum_max: int, first: bool, cache: dict[str, int] +) -> int: + """ + Solve for remaining 'digit' digits, with previous 'prev1' digit, and + previous-previous 'prev2' digit, total sum of 'sum_max'. + Pass around 'cache' to store/reuse intermediate results. + + >>> solve(digit=1, prev1=0, prev2=0, sum_max=9, first=True, cache={}) + 9 + >>> solve(digit=1, prev1=0, prev2=0, sum_max=9, first=False, cache={}) + 10 + """ + if digit == 0: + return 1 + + cache_str = f"{digit},{prev1},{prev2}" + if cache_str in cache: + return cache[cache_str] + + comb = 0 + for curr in range(sum_max - prev1 - prev2 + 1): + if first and curr == 0: + continue + + comb += solve( + digit=digit - 1, + prev1=curr, + prev2=prev1, + sum_max=sum_max, + first=False, + cache=cache, + ) + + cache[cache_str] = comb + return comb + + +def solution(n_digits: int = 20) -> int: + """ + Solves the problem for n_digits number of digits. + + >>> solution(2) + 45 + >>> solution(10) + 21838806 + """ + cache: dict[str, int] = {} + return solve(digit=n_digits, prev1=0, prev2=0, sum_max=9, first=True, cache=cache) + + +if __name__ == "__main__": + print(f"{solution(10) = }") From 40f4c510b6047d95d07f03c9915a53bbf84789e4 Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Mon, 5 May 2025 15:14:56 +0800 Subject: [PATCH 247/260] Add solution for the Euler problem 190 (#12664) * Add solution for the Euler project problem 164. * Add solution for the Euler project problem 190. * Delete project_euler/problem_164/sol1.py * Delete project_euler/problem_164/__init__.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_190/__init__.py | 0 project_euler/problem_190/sol1.py | 48 +++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 project_euler/problem_190/__init__.py create mode 100644 project_euler/problem_190/sol1.py diff --git a/project_euler/problem_190/__init__.py b/project_euler/problem_190/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_190/sol1.py b/project_euler/problem_190/sol1.py new file mode 100644 index 000000000000..b18d45be16b4 --- /dev/null +++ b/project_euler/problem_190/sol1.py @@ -0,0 +1,48 @@ +""" +Project Euler Problem 190: https://projecteuler.net/problem=190 + +Maximising a Weighted Product + +Let S_m = (x_1, x_2, ..., x_m) be the m-tuple of positive real numbers with +x_1 + x_2 + ... + x_m = m for which P_m = x_1 * x_2^2 * ... * x_m^m is maximised. + +For example, it can be verified that |_ P_10 _| = 4112 +(|_ _| is the integer part function). + +Find Sum_{m=2}^15 = |_ P_m _|. + +Solution: +- Fix x_1 = m - x_2 - ... - x_m. +- Calculate partial derivatives of P_m wrt the x_2, ..., x_m. This gives that + x_2 = 2 * x_1, x_3 = 3 * x_1, ..., x_m = m * x_1. +- Calculate partial second order derivatives of P_m wrt the x_2, ..., x_m. + By plugging in the values from the previous step, can verify that solution is maximum. +""" + + +def solution(n: int = 15) -> int: + """ + Calculate sum of |_ P_m _| for m from 2 to n. + + >>> solution(2) + 1 + >>> solution(3) + 2 + >>> solution(4) + 4 + >>> solution(5) + 10 + """ + total = 0 + for m in range(2, n + 1): + x1 = 2 / (m + 1) + p = 1.0 + for i in range(1, m + 1): + xi = i * x1 + p *= xi**i + total += int(p) + return total + + +if __name__ == "__main__": + print(f"{solution() = }") From 7ed7f042feeb3567ace384a00707d83c327310ab Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 5 May 2025 19:23:31 +0100 Subject: [PATCH 248/260] [pre-commit.ci] pre-commit autoupdate (#12708) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.7 → v0.11.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.7...v0.11.8) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 034493b10912..9e13416dc78d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.7 + rev: v0.11.8 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index fa731e32ff23..04e09c29de97 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1174,6 +1174,8 @@ * [Sol1](project_euler/problem_144/sol1.py) * Problem 145 * [Sol1](project_euler/problem_145/sol1.py) + * Problem 164 + * [Sol1](project_euler/problem_164/sol1.py) * Problem 173 * [Sol1](project_euler/problem_173/sol1.py) * Problem 174 @@ -1184,6 +1186,8 @@ * [Sol1](project_euler/problem_187/sol1.py) * Problem 188 * [Sol1](project_euler/problem_188/sol1.py) + * Problem 190 + * [Sol1](project_euler/problem_190/sol1.py) * Problem 191 * [Sol1](project_euler/problem_191/sol1.py) * Problem 203 From d9d56b10464f6825dc762c1665716e76b70c5fa2 Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Wed, 7 May 2025 03:49:59 +0800 Subject: [PATCH 249/260] Add solution for the Euler project problem 345 (#12666) * Add solution for the Euler project problem 345. * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_345/__init__.py | 0 project_euler/problem_345/sol1.py | 117 ++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 project_euler/problem_345/__init__.py create mode 100644 project_euler/problem_345/sol1.py diff --git a/project_euler/problem_345/__init__.py b/project_euler/problem_345/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_345/sol1.py b/project_euler/problem_345/sol1.py new file mode 100644 index 000000000000..4234458c5ad5 --- /dev/null +++ b/project_euler/problem_345/sol1.py @@ -0,0 +1,117 @@ +""" +Project Euler Problem 345: https://projecteuler.net/problem=345 + +Matrix Sum + +We define the Matrix Sum of a matrix as the maximum possible sum of +matrix elements such that none of the selected elements share the same row or column. + +For example, the Matrix Sum of the matrix below equals +3315 ( = 863 + 383 + 343 + 959 + 767): + 7 53 183 439 863 + 497 383 563 79 973 + 287 63 343 169 583 + 627 343 773 959 943 + 767 473 103 699 303 + +Find the Matrix Sum of: + 7 53 183 439 863 497 383 563 79 973 287 63 343 169 583 + 627 343 773 959 943 767 473 103 699 303 957 703 583 639 913 + 447 283 463 29 23 487 463 993 119 883 327 493 423 159 743 + 217 623 3 399 853 407 103 983 89 463 290 516 212 462 350 + 960 376 682 962 300 780 486 502 912 800 250 346 172 812 350 + 870 456 192 162 593 473 915 45 989 873 823 965 425 329 803 + 973 965 905 919 133 673 665 235 509 613 673 815 165 992 326 + 322 148 972 962 286 255 941 541 265 323 925 281 601 95 973 + 445 721 11 525 473 65 511 164 138 672 18 428 154 448 848 + 414 456 310 312 798 104 566 520 302 248 694 976 430 392 198 + 184 829 373 181 631 101 969 613 840 740 778 458 284 760 390 + 821 461 843 513 17 901 711 993 293 157 274 94 192 156 574 + 34 124 4 878 450 476 712 914 838 669 875 299 823 329 699 + 815 559 813 459 522 788 168 586 966 232 308 833 251 631 107 + 813 883 451 509 615 77 281 613 459 205 380 274 302 35 805 + +Brute force solution, with caching intermediate steps to speed up the calculation. +""" + +import numpy as np +from numpy.typing import NDArray + +MATRIX_1 = [ + "7 53 183 439 863", + "497 383 563 79 973", + "287 63 343 169 583", + "627 343 773 959 943", + "767 473 103 699 303", +] + +MATRIX_2 = [ + "7 53 183 439 863 497 383 563 79 973 287 63 343 169 583", + "627 343 773 959 943 767 473 103 699 303 957 703 583 639 913", + "447 283 463 29 23 487 463 993 119 883 327 493 423 159 743", + "217 623 3 399 853 407 103 983 89 463 290 516 212 462 350", + "960 376 682 962 300 780 486 502 912 800 250 346 172 812 350", + "870 456 192 162 593 473 915 45 989 873 823 965 425 329 803", + "973 965 905 919 133 673 665 235 509 613 673 815 165 992 326", + "322 148 972 962 286 255 941 541 265 323 925 281 601 95 973", + "445 721 11 525 473 65 511 164 138 672 18 428 154 448 848", + "414 456 310 312 798 104 566 520 302 248 694 976 430 392 198", + "184 829 373 181 631 101 969 613 840 740 778 458 284 760 390", + "821 461 843 513 17 901 711 993 293 157 274 94 192 156 574", + "34 124 4 878 450 476 712 914 838 669 875 299 823 329 699", + "815 559 813 459 522 788 168 586 966 232 308 833 251 631 107", + "813 883 451 509 615 77 281 613 459 205 380 274 302 35 805", +] + + +def solve(arr: NDArray, row: int, cols: set[int], cache: dict[str, int]) -> int: + """ + Finds the max sum for array `arr` starting with row index `row`, and with columns + included in `cols`. `cache` is used for caching intermediate results. + + >>> solve(arr=np.array([[1, 2], [3, 4]]), row=0, cols={0, 1}, cache={}) + 5 + """ + + cache_id = f"{row}, {sorted(cols)}" + if cache_id in cache: + return cache[cache_id] + + if row == len(arr): + return 0 + + max_sum = 0 + for col in cols: + new_cols = cols - {col} + max_sum = max( + max_sum, + int(arr[row, col]) + + solve(arr=arr, row=row + 1, cols=new_cols, cache=cache), + ) + cache[cache_id] = max_sum + return max_sum + + +def solution(matrix_str: list[str] = MATRIX_2) -> int: + """ + Takes list of strings `matrix_str` to parse the matrix and calculates the max sum. + + >>> solution(["1 2", "3 4"]) + 5 + >>> solution(MATRIX_1) + 3315 + """ + + n = len(matrix_str) + arr = np.empty(shape=(n, n), dtype=int) + for row, matrix_row_str in enumerate(matrix_str): + matrix_row_list_str = matrix_row_str.split() + for col, elem_str in enumerate(matrix_row_list_str): + arr[row, col] = int(elem_str) + + cache: dict[str, int] = {} + return solve(arr=arr, row=0, cols=set(range(n)), cache=cache) + + +if __name__ == "__main__": + print(f"{solution() = }") From b720f24b89c328944f8a0d6c18db0e09d9bcffba Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Sat, 10 May 2025 19:13:07 +0800 Subject: [PATCH 250/260] Add solution for the Euler project problem 95. (#12669) * Add documentation and tests for the Euler project problem 95 solution. * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py * Update sol1.py --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_095/__init__.py | 0 project_euler/problem_095/sol1.py | 164 ++++++++++++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 project_euler/problem_095/__init__.py create mode 100644 project_euler/problem_095/sol1.py diff --git a/project_euler/problem_095/__init__.py b/project_euler/problem_095/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_095/sol1.py b/project_euler/problem_095/sol1.py new file mode 100644 index 000000000000..82d84c5544de --- /dev/null +++ b/project_euler/problem_095/sol1.py @@ -0,0 +1,164 @@ +""" +Project Euler Problem 95: https://projecteuler.net/problem=95 + +Amicable Chains + +The proper divisors of a number are all the divisors excluding the number itself. +For example, the proper divisors of 28 are 1, 2, 4, 7, and 14. +As the sum of these divisors is equal to 28, we call it a perfect number. + +Interestingly the sum of the proper divisors of 220 is 284 and +the sum of the proper divisors of 284 is 220, forming a chain of two numbers. +For this reason, 220 and 284 are called an amicable pair. + +Perhaps less well known are longer chains. +For example, starting with 12496, we form a chain of five numbers: + 12496 -> 14288 -> 15472 -> 14536 -> 14264 (-> 12496 -> ...) + +Since this chain returns to its starting point, it is called an amicable chain. + +Find the smallest member of the longest amicable chain with +no element exceeding one million. + +Solution is doing the following: +- Get relevant prime numbers +- Iterate over product combination of prime numbers to generate all non-prime + numbers up to max number, by keeping track of prime factors +- Calculate the sum of factors for each number +- Iterate over found some factors to find longest chain +""" + +from math import isqrt + + +def generate_primes(max_num: int) -> list[int]: + """ + Calculates the list of primes up to and including `max_num`. + + >>> generate_primes(6) + [2, 3, 5] + """ + are_primes = [True] * (max_num + 1) + are_primes[0] = are_primes[1] = False + for i in range(2, isqrt(max_num) + 1): + if are_primes[i]: + for j in range(i * i, max_num + 1, i): + are_primes[j] = False + + return [prime for prime, is_prime in enumerate(are_primes) if is_prime] + + +def multiply( + chain: list[int], + primes: list[int], + min_prime_idx: int, + prev_num: int, + max_num: int, + prev_sum: int, + primes_degrees: dict[int, int], +) -> None: + """ + Run over all prime combinations to generate non-prime numbers. + + >>> chain = [0] * 3 + >>> primes_degrees = {} + >>> multiply( + ... chain=chain, + ... primes=[2], + ... min_prime_idx=0, + ... prev_num=1, + ... max_num=2, + ... prev_sum=0, + ... primes_degrees=primes_degrees, + ... ) + >>> chain + [0, 0, 1] + >>> primes_degrees + {2: 1} + """ + + min_prime = primes[min_prime_idx] + num = prev_num * min_prime + + min_prime_degree = primes_degrees.get(min_prime, 0) + min_prime_degree += 1 + primes_degrees[min_prime] = min_prime_degree + + new_sum = prev_sum * min_prime + (prev_sum + prev_num) * (min_prime - 1) // ( + min_prime**min_prime_degree - 1 + ) + chain[num] = new_sum + + for prime_idx in range(min_prime_idx, len(primes)): + if primes[prime_idx] * num > max_num: + break + + multiply( + chain=chain, + primes=primes, + min_prime_idx=prime_idx, + prev_num=num, + max_num=max_num, + prev_sum=new_sum, + primes_degrees=primes_degrees.copy(), + ) + + +def find_longest_chain(chain: list[int], max_num: int) -> int: + """ + Finds the smallest element of longest chain + + >>> find_longest_chain(chain=[0, 0, 0, 0, 0, 0, 6], max_num=6) + 6 + """ + + max_len = 0 + min_elem = 0 + for start in range(2, len(chain)): + visited = {start} + elem = chain[start] + length = 1 + + while elem > 1 and elem <= max_num and elem not in visited: + visited.add(elem) + elem = chain[elem] + length += 1 + + if elem == start and length > max_len: + max_len = length + min_elem = start + + return min_elem + + +def solution(max_num: int = 1000000) -> int: + """ + Runs the calculation for numbers <= `max_num`. + + >>> solution(10) + 6 + >>> solution(200000) + 12496 + """ + + primes = generate_primes(max_num) + chain = [0] * (max_num + 1) + for prime_idx, prime in enumerate(primes): + if prime**2 > max_num: + break + + multiply( + chain=chain, + primes=primes, + min_prime_idx=prime_idx, + prev_num=1, + max_num=max_num, + prev_sum=0, + primes_degrees={}, + ) + + return find_longest_chain(chain=chain, max_num=max_num) + + +if __name__ == "__main__": + print(f"{solution() = }") From a728cc96ab4f05248ac3389365a26f01dfaf6f8e Mon Sep 17 00:00:00 2001 From: NidhaNureen <165757787+NidhaNureen@users.noreply.github.com> Date: Sat, 10 May 2025 23:32:45 +1200 Subject: [PATCH 251/260] Added/Improved doctests for lowest_common_ancestor.py (#12673) * added doctests to functions in lowest_common_ancestor.py * fixed doctests to be less excessive * Update lowest_common_ancestor.py * Update lowest_common_ancestor.py * Update lowest_common_ancestor.py * Update lowest_common_ancestor.py * Update lowest_common_ancestor.py * Update lowest_common_ancestor.py --------- Co-authored-by: Maxim Smolskiy --- .../binary_tree/lowest_common_ancestor.py | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/data_structures/binary_tree/lowest_common_ancestor.py b/data_structures/binary_tree/lowest_common_ancestor.py index 651037703b95..ea0e31256903 100644 --- a/data_structures/binary_tree/lowest_common_ancestor.py +++ b/data_structures/binary_tree/lowest_common_ancestor.py @@ -15,6 +15,8 @@ def swap(a: int, b: int) -> tuple[int, int]: (4, 3) >>> swap(67, 12) (12, 67) + >>> swap(3,-4) + (-4, 3) """ a ^= b b ^= a @@ -25,6 +27,23 @@ def swap(a: int, b: int) -> tuple[int, int]: def create_sparse(max_node: int, parent: list[list[int]]) -> list[list[int]]: """ creating sparse table which saves each nodes 2^i-th parent + >>> max_node = 6 + >>> parent = [[0, 0, 1, 1, 2, 2, 3]] + [[0] * 7 for _ in range(19)] + >>> parent = create_sparse(max_node=max_node, parent=parent) + >>> parent[0] + [0, 0, 1, 1, 2, 2, 3] + >>> parent[1] + [0, 0, 0, 0, 1, 1, 1] + >>> parent[2] + [0, 0, 0, 0, 0, 0, 0] + + >>> max_node = 1 + >>> parent = [[0, 0]] + [[0] * 2 for _ in range(19)] + >>> parent = create_sparse(max_node=max_node, parent=parent) + >>> parent[0] + [0, 0] + >>> parent[1] + [0, 0] """ j = 1 while (1 << j) < max_node: @@ -38,6 +57,21 @@ def create_sparse(max_node: int, parent: list[list[int]]) -> list[list[int]]: def lowest_common_ancestor( u: int, v: int, level: list[int], parent: list[list[int]] ) -> int: + """ + Return the lowest common ancestor between u and v + + >>> level = [-1, 0, 1, 1, 2, 2, 2] + >>> parent = [[0, 0, 1, 1, 2, 2, 3],[0, 0, 0, 0, 1, 1, 1]] + \ + [[0] * 7 for _ in range(17)] + >>> lowest_common_ancestor(u=4, v=5, level=level, parent=parent) + 2 + >>> lowest_common_ancestor(u=4, v=6, level=level, parent=parent) + 1 + >>> lowest_common_ancestor(u=2, v=3, level=level, parent=parent) + 1 + >>> lowest_common_ancestor(u=6, v=6, level=level, parent=parent) + 6 + """ # u must be deeper in the tree than v if level[u] < level[v]: u, v = swap(u, v) @@ -68,6 +102,26 @@ def breadth_first_search( sets every nodes direct parent parent of root node is set to 0 calculates depth of each node from root node + >>> level = [-1] * 7 + >>> parent = [[0] * 7 for _ in range(20)] + >>> graph = {1: [2, 3], 2: [4, 5], 3: [6], 4: [], 5: [], 6: []} + >>> level, parent = breadth_first_search( + ... level=level, parent=parent, max_node=6, graph=graph, root=1) + >>> level + [-1, 0, 1, 1, 2, 2, 2] + >>> parent[0] + [0, 0, 1, 1, 2, 2, 3] + + + >>> level = [-1] * 2 + >>> parent = [[0] * 2 for _ in range(20)] + >>> graph = {1: []} + >>> level, parent = breadth_first_search( + ... level=level, parent=parent, max_node=1, graph=graph, root=1) + >>> level + [-1, 0] + >>> parent[0] + [0, 0] """ level[root] = 0 q: Queue[int] = Queue(maxsize=max_node) From 59c3c8bbf384ef05d3cb9862a41bba39bb098fe9 Mon Sep 17 00:00:00 2001 From: robohie Date: Sat, 10 May 2025 12:47:22 +0100 Subject: [PATCH 252/260] Add N Input AND Gate (#12717) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update and_gate.py J'ai nourri ce programme en ajoutant une porte And à n entrées. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and_gate.py Commentaires en anglais * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and_gate.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- boolean_algebra/and_gate.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index 6ae66b5b0a77..650017b7ae10 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -1,8 +1,8 @@ """ -An AND Gate is a logic gate in boolean algebra which results to 1 (True) if both the -inputs are 1, and 0 (False) otherwise. +An AND Gate is a logic gate in boolean algebra which results to 1 (True) if all the +inputs are 1 (True), and 0 (False) otherwise. -Following is the truth table of an AND Gate: +Following is the truth table of a Two Input AND Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ @@ -12,7 +12,7 @@ | 1 | 1 | 1 | ------------------------------ -Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +Refer - https://www.geeksforgeeks.org/logic-gates/ """ @@ -32,6 +32,18 @@ def and_gate(input_1: int, input_2: int) -> int: return int(input_1 and input_2) +def n_input_and_gate(inputs: list[int]) -> int: + """ + Calculate AND of a list of input values + + >>> n_input_and_gate([1, 0, 1, 1, 0]) + 0 + >>> n_input_and_gate([1, 1, 1, 1, 1]) + 1 + """ + return int(all(inputs)) + + if __name__ == "__main__": import doctest From 47a44abe23870ca0f7c8062601278645039b1c70 Mon Sep 17 00:00:00 2001 From: Alfredo Hernandez Baeza Date: Sat, 10 May 2025 07:57:43 -0400 Subject: [PATCH 253/260] Improve longest_common_substring.py (#12705) * Update longest_common_substring.py - Combined the ans_index and ans_length into a single tuple to track the best match (position + length) more cleanly. - Early exit for empty strings. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update longest_common_substring.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- dynamic_programming/longest_common_substring.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py index ea5233eb2d17..3ba83f3d9f03 100644 --- a/dynamic_programming/longest_common_substring.py +++ b/dynamic_programming/longest_common_substring.py @@ -43,22 +43,25 @@ def longest_common_substring(text1: str, text2: str) -> str: if not (isinstance(text1, str) and isinstance(text2, str)): raise ValueError("longest_common_substring() takes two strings for inputs") + if not text1 or not text2: + return "" + text1_length = len(text1) text2_length = len(text2) dp = [[0] * (text2_length + 1) for _ in range(text1_length + 1)] - ans_index = 0 - ans_length = 0 + end_pos = 0 + max_length = 0 for i in range(1, text1_length + 1): for j in range(1, text2_length + 1): if text1[i - 1] == text2[j - 1]: dp[i][j] = 1 + dp[i - 1][j - 1] - if dp[i][j] > ans_length: - ans_index = i - ans_length = dp[i][j] + if dp[i][j] > max_length: + end_pos = i + max_length = dp[i][j] - return text1[ans_index - ans_length : ans_index] + return text1[end_pos - max_length : end_pos] if __name__ == "__main__": From 131765574f5ccfaff4214a6f848412ce2fe4ab20 Mon Sep 17 00:00:00 2001 From: robohie Date: Sat, 10 May 2025 21:18:02 +0100 Subject: [PATCH 254/260] Fix error messages for horizontal_projectile_motion.py (#12722) * Update horizontal_projectile_motion.py This commit is about logic of this program. Changes made aim to allow a good understanding of what is done. * Update horizontal_projectile_motion.py --------- Co-authored-by: Maxim Smolskiy --- physics/horizontal_projectile_motion.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index 60f21c2b39c4..32dc5eb36f35 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -17,7 +17,7 @@ """ # Importing packages -from math import radians as angle_to_radians +from math import radians as deg_to_rad from math import sin # Acceleration Constant on Earth (unit m/s^2) @@ -31,10 +31,10 @@ def check_args(init_velocity: float, angle: float) -> None: # Ensure valid instance if not isinstance(init_velocity, (int, float)): - raise TypeError("Invalid velocity. Should be a positive number.") + raise TypeError("Invalid velocity. Should be an integer or float.") if not isinstance(angle, (int, float)): - raise TypeError("Invalid angle. Range is 1-90 degrees.") + raise TypeError("Invalid angle. Should be an integer or float.") # Ensure valid angle if angle > 90 or angle < 1: @@ -71,7 +71,7 @@ def horizontal_distance(init_velocity: float, angle: float) -> float: ValueError: Invalid angle. Range is 1-90 degrees. """ check_args(init_velocity, angle) - radians = angle_to_radians(2 * angle) + radians = deg_to_rad(2 * angle) return round(init_velocity**2 * sin(radians) / g, 2) @@ -94,14 +94,14 @@ def max_height(init_velocity: float, angle: float) -> float: >>> max_height("a", 20) Traceback (most recent call last): ... - TypeError: Invalid velocity. Should be a positive number. + TypeError: Invalid velocity. Should be an integer or float. >>> horizontal_distance(30, "b") Traceback (most recent call last): ... - TypeError: Invalid angle. Range is 1-90 degrees. + TypeError: Invalid angle. Should be an integer or float. """ check_args(init_velocity, angle) - radians = angle_to_radians(angle) + radians = deg_to_rad(angle) return round(init_velocity**2 * sin(radians) ** 2 / (2 * g), 2) @@ -128,10 +128,10 @@ def total_time(init_velocity: float, angle: float) -> float: >>> total_time(30, "b") Traceback (most recent call last): ... - TypeError: Invalid angle. Range is 1-90 degrees. + TypeError: Invalid angle. Should be an integer or float. """ check_args(init_velocity, angle) - radians = angle_to_radians(angle) + radians = deg_to_rad(angle) return round(2 * init_velocity * sin(radians) / g, 2) From 95fb181f5a944427fdbc5766cbf4e1cb699d4a6d Mon Sep 17 00:00:00 2001 From: S Sajeev <167018420+SajeevSenthil@users.noreply.github.com> Date: Sun, 11 May 2025 02:13:39 +0530 Subject: [PATCH 255/260] Add escape velocity calculator using standard physics formula (#12721) * Added iterative solution for power calculation * Added iterative solution for power calculation * Added iterative solution for power calculation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added iterative solution for power calculation fixes #12709 * Added iterative solution for power calculation FIXES NUMBER 12709 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Escape velocity is the minimum speed an object must have to break free from a celestial body's gravitational pull without further propulsion. Takes input as the Mass of the Celestial body (M) and Radius fron the center of mass (M) * Fix: added header comment to escape_velocity.py * Trigger re-PR with a minor change * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: resolve Ruff linter errors and add Wikipedia reference * Delete maths/power_using_iteration.py * Test doctests * Update escape_velocity.py * Update escape_velocity.py * Update escape_velocity.py * Update escape_velocity.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- physics/escape_velocity.py | 67 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 physics/escape_velocity.py diff --git a/physics/escape_velocity.py b/physics/escape_velocity.py new file mode 100644 index 000000000000..e54ed5e50798 --- /dev/null +++ b/physics/escape_velocity.py @@ -0,0 +1,67 @@ +import math + + +def escape_velocity(mass: float, radius: float) -> float: + """ + Calculates the escape velocity needed to break free from a celestial body's + gravitational field. + + The formula used is: + v = sqrt(2 * G * M / R) + + where: + v = escape velocity (m/s) + G = gravitational constant (6.67430 * 10^-11 m^3 kg^-1 s^-2) + M = mass of the celestial body (kg) + R = radius from the center of mass (m) + + Source: + https://en.wikipedia.org/wiki/Escape_velocity + + Args: + mass (float): Mass of the celestial body in kilograms. + radius (float): Radius from the center of mass in meters. + + Returns: + float: Escape velocity in meters per second, rounded to 3 decimal places. + + Examples: + >>> escape_velocity(mass=5.972e24, radius=6.371e6) # Earth + 11185.978 + >>> escape_velocity(mass=7.348e22, radius=1.737e6) # Moon + 2376.307 + >>> escape_velocity(mass=1.898e27, radius=6.9911e7) # Jupiter + 60199.545 + >>> escape_velocity(mass=0, radius=1.0) + 0.0 + >>> escape_velocity(mass=1.0, radius=0) + Traceback (most recent call last): + ... + ZeroDivisionError: Radius cannot be zero. + """ + gravitational_constant = 6.67430e-11 # m^3 kg^-1 s^-2 + + if radius == 0: + raise ZeroDivisionError("Radius cannot be zero.") + + velocity = math.sqrt(2 * gravitational_constant * mass / radius) + return round(velocity, 3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print("Calculate escape velocity of a celestial body...\n") + + try: + mass = float(input("Enter mass of the celestial body (in kgs): ").strip()) + radius = float(input("Enter radius from the center of mass (in ms): ").strip()) + + velocity = escape_velocity(mass=mass, radius=radius) + print(f"Escape velocity is {velocity} m/s") + + except ValueError: + print("Invalid input. Please enter valid numeric values.") + except ZeroDivisionError as e: + print(e) From 1ea05feffec48250a3d6d0ccbd1378242d680cbc Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 12 May 2025 11:59:19 +0200 Subject: [PATCH 256/260] Prep for Python 3.14: Rename compression to data_compression (#12725) * Prep for Python 3.14: Rename compression to data_compression * updating DIRECTORY.md --------- Co-authored-by: cclauss --- DIRECTORY.md | 23 +++++++++++------- compression/image_data/__init__.py | 0 {compression => data_compression}/README.md | 0 {compression => data_compression}/__init__.py | 0 .../burrows_wheeler.py | 0 {compression => data_compression}/huffman.py | 0 .../image_data/PSNR-example-base.png | Bin .../image_data/PSNR-example-comp-10.jpg | Bin .../image_data/compressed_image.png | Bin .../image_data/example_image.jpg | Bin .../image_data/example_wikipedia_image.jpg | Bin .../image_data/original_image.png | Bin .../lempel_ziv.py | 0 .../lempel_ziv_decompress.py | 0 {compression => data_compression}/lz77.py | 0 .../peak_signal_to_noise_ratio.py | 0 .../run_length_encoding.py | 0 pyproject.toml | 2 +- 18 files changed, 15 insertions(+), 10 deletions(-) delete mode 100644 compression/image_data/__init__.py rename {compression => data_compression}/README.md (100%) rename {compression => data_compression}/__init__.py (100%) rename {compression => data_compression}/burrows_wheeler.py (100%) rename {compression => data_compression}/huffman.py (100%) rename {compression => data_compression}/image_data/PSNR-example-base.png (100%) rename {compression => data_compression}/image_data/PSNR-example-comp-10.jpg (100%) rename {compression => data_compression}/image_data/compressed_image.png (100%) rename {compression => data_compression}/image_data/example_image.jpg (100%) rename {compression => data_compression}/image_data/example_wikipedia_image.jpg (100%) rename {compression => data_compression}/image_data/original_image.png (100%) rename {compression => data_compression}/lempel_ziv.py (100%) rename {compression => data_compression}/lempel_ziv_decompress.py (100%) rename {compression => data_compression}/lz77.py (100%) rename {compression => data_compression}/peak_signal_to_noise_ratio.py (100%) rename {compression => data_compression}/run_length_encoding.py (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 04e09c29de97..85dcc243462e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -128,15 +128,6 @@ * [Vigenere Cipher](ciphers/vigenere_cipher.py) * [Xor Cipher](ciphers/xor_cipher.py) -## Compression - * [Burrows Wheeler](compression/burrows_wheeler.py) - * [Huffman](compression/huffman.py) - * [Lempel Ziv](compression/lempel_ziv.py) - * [Lempel Ziv Decompress](compression/lempel_ziv_decompress.py) - * [Lz77](compression/lz77.py) - * [Peak Signal To Noise Ratio](compression/peak_signal_to_noise_ratio.py) - * [Run Length Encoding](compression/run_length_encoding.py) - ## Computer Vision * [Cnn Classification](computer_vision/cnn_classification.py) * [Flip Augmentation](computer_vision/flip_augmentation.py) @@ -181,6 +172,15 @@ * [Volume Conversions](conversions/volume_conversions.py) * [Weight Conversion](conversions/weight_conversion.py) +## Data Compression + * [Burrows Wheeler](data_compression/burrows_wheeler.py) + * [Huffman](data_compression/huffman.py) + * [Lempel Ziv](data_compression/lempel_ziv.py) + * [Lempel Ziv Decompress](data_compression/lempel_ziv_decompress.py) + * [Lz77](data_compression/lz77.py) + * [Peak Signal To Noise Ratio](data_compression/peak_signal_to_noise_ratio.py) + * [Run Length Encoding](data_compression/run_length_encoding.py) + ## Data Structures * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) @@ -884,6 +884,7 @@ * [Centripetal Force](physics/centripetal_force.py) * [Coulombs Law](physics/coulombs_law.py) * [Doppler Frequency](physics/doppler_frequency.py) + * [Escape Velocity](physics/escape_velocity.py) * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) @@ -1122,6 +1123,8 @@ * [Sol1](project_euler/problem_092/sol1.py) * Problem 094 * [Sol1](project_euler/problem_094/sol1.py) + * Problem 095 + * [Sol1](project_euler/problem_095/sol1.py) * Problem 097 * [Sol1](project_euler/problem_097/sol1.py) * Problem 099 @@ -1202,6 +1205,8 @@ * [Sol1](project_euler/problem_234/sol1.py) * Problem 301 * [Sol1](project_euler/problem_301/sol1.py) + * Problem 345 + * [Sol1](project_euler/problem_345/sol1.py) * Problem 493 * [Sol1](project_euler/problem_493/sol1.py) * Problem 551 diff --git a/compression/image_data/__init__.py b/compression/image_data/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/compression/README.md b/data_compression/README.md similarity index 100% rename from compression/README.md rename to data_compression/README.md diff --git a/compression/__init__.py b/data_compression/__init__.py similarity index 100% rename from compression/__init__.py rename to data_compression/__init__.py diff --git a/compression/burrows_wheeler.py b/data_compression/burrows_wheeler.py similarity index 100% rename from compression/burrows_wheeler.py rename to data_compression/burrows_wheeler.py diff --git a/compression/huffman.py b/data_compression/huffman.py similarity index 100% rename from compression/huffman.py rename to data_compression/huffman.py diff --git a/compression/image_data/PSNR-example-base.png b/data_compression/image_data/PSNR-example-base.png similarity index 100% rename from compression/image_data/PSNR-example-base.png rename to data_compression/image_data/PSNR-example-base.png diff --git a/compression/image_data/PSNR-example-comp-10.jpg b/data_compression/image_data/PSNR-example-comp-10.jpg similarity index 100% rename from compression/image_data/PSNR-example-comp-10.jpg rename to data_compression/image_data/PSNR-example-comp-10.jpg diff --git a/compression/image_data/compressed_image.png b/data_compression/image_data/compressed_image.png similarity index 100% rename from compression/image_data/compressed_image.png rename to data_compression/image_data/compressed_image.png diff --git a/compression/image_data/example_image.jpg b/data_compression/image_data/example_image.jpg similarity index 100% rename from compression/image_data/example_image.jpg rename to data_compression/image_data/example_image.jpg diff --git a/compression/image_data/example_wikipedia_image.jpg b/data_compression/image_data/example_wikipedia_image.jpg similarity index 100% rename from compression/image_data/example_wikipedia_image.jpg rename to data_compression/image_data/example_wikipedia_image.jpg diff --git a/compression/image_data/original_image.png b/data_compression/image_data/original_image.png similarity index 100% rename from compression/image_data/original_image.png rename to data_compression/image_data/original_image.png diff --git a/compression/lempel_ziv.py b/data_compression/lempel_ziv.py similarity index 100% rename from compression/lempel_ziv.py rename to data_compression/lempel_ziv.py diff --git a/compression/lempel_ziv_decompress.py b/data_compression/lempel_ziv_decompress.py similarity index 100% rename from compression/lempel_ziv_decompress.py rename to data_compression/lempel_ziv_decompress.py diff --git a/compression/lz77.py b/data_compression/lz77.py similarity index 100% rename from compression/lz77.py rename to data_compression/lz77.py diff --git a/compression/peak_signal_to_noise_ratio.py b/data_compression/peak_signal_to_noise_ratio.py similarity index 100% rename from compression/peak_signal_to_noise_ratio.py rename to data_compression/peak_signal_to_noise_ratio.py diff --git a/compression/run_length_encoding.py b/data_compression/run_length_encoding.py similarity index 100% rename from compression/run_length_encoding.py rename to data_compression/run_length_encoding.py diff --git a/pyproject.toml b/pyproject.toml index 60f8d4ffc96f..c320a2f2bbfe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -187,9 +187,9 @@ autoapi_dirs = [ "boolean_algebra", "cellular_automata", "ciphers", - "compression", "computer_vision", "conversions", + "data_compression", "data_structures", "digital_image_processing", "divide_and_conquer", From 088c74e84026fcb77b3e3e0c15c83111a6979ae7 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 12 May 2025 12:45:53 +0200 Subject: [PATCH 257/260] Delete empty source directory (#12730) --- source/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 source/__init__.py diff --git a/source/__init__.py b/source/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 485f688d0683ca026959e1b68f12e5d221724860 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 12 May 2025 12:52:27 +0200 Subject: [PATCH 258/260] Add PEP723 header to scripts/validate_solutions.py (#12731) Enable `uv run scripts/validate_solutions.py` or `pipx run scripts/validate_solutions.py` * https://peps.python.org/pep-0723 * https://docs.astral.sh/uv/guides/scripts/#declaring-script-dependencies --- scripts/validate_solutions.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index df5d01086bbe..c3f872203591 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -1,4 +1,13 @@ #!/usr/bin/env python3 + +# /// script +# requires-python = ">=3.13" +# dependencies = [ +# "pytest", +# "requests", +# ] +# /// + import hashlib import importlib.util import json From f721e598e5677ed368b7dbd119092eb409ab2fb0 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 12 May 2025 13:33:33 +0200 Subject: [PATCH 259/260] Add a proper shebang line to scripts/validate_filenames.py (#12733) --- scripts/validate_filenames.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index 80399673cced..a7328e099dde 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -1,4 +1,5 @@ -#!python +#!/usr/bin/env python3 + import os try: From ee3a1732e007d32b5835635ac7c7fbb2b5464f53 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 May 2025 19:59:33 +0200 Subject: [PATCH 260/260] [pre-commit.ci] pre-commit autoupdate (#12736) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.8 → v0.11.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.8...v0.11.9) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e13416dc78d..288e3f591403 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.8 + rev: v0.11.9 hooks: - id: ruff - id: ruff-format