diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 4256387e1209..be8582a6b221 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -2,3 +2,7 @@ 97c5ee99bc98dc475512e549b252b23a6e7e0997 # Use builtin generics and PEP 604 for type annotations wherever possible (#13427) 23ee1e7aff357e656e3102435ad0fe3b5074571e +# Use variable annotations (#10723) +f98f78216ba9d6ab68c8e69c19e9f3c7926c5efe +# run pyupgrade (#12711) +fc335cb16315964b923eb1927e3aad1516891c28 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 64a2e6494c1b..a88773308d5e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,6 +2,6 @@ contact_links: - about: "Please check the linked documentation page before filing new issues." name: "Common issues and solutions" url: "https://mypy.readthedocs.io/en/stable/common_issues.html" - - about: "Please ask and answer any questions on the mypy Gitter." + - about: "Please ask and answer any questions on the python/typing Gitter." name: "Questions or Chat" url: "https://gitter.im/python/typing" diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index da140375d603..e728d741d90d 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.11' - name: Trigger script env: WHEELS_PUSH_TOKEN: ${{ secrets.WHEELS_PUSH_TOKEN }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 8851f7fbb0f3..8beb293c2d76 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -20,13 +20,14 @@ jobs: env: TOXENV: docs TOX_SKIP_MISSING_INTERPRETERS: False + VERIFY_MYPY_ERROR_CODES: 1 steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.8' - name: Install tox - run: pip install --upgrade 'setuptools!=50' tox==4.4.4 + run: pip install --upgrade 'setuptools!=50' tox==4.11.0 - name: Setup tox environment run: tox run -e ${{ env.TOXENV }} --notest - name: Test diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 81587f3ca747..76d9cc6ab570 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,77 +26,77 @@ jobs: fail-fast: false matrix: include: - - name: Test suite with py37-windows-64 - python: '3.7' - arch: x64 - os: windows-latest - toxenv: py37 - - name: Test suite with py38-ubuntu + # Make sure to run mypyc compiled unit tests for both + # the oldest and newest supported Python versions + - name: Test suite with py38-ubuntu, mypyc-compiled python: '3.8' arch: x64 os: ubuntu-latest toxenv: py tox_extra_args: "-n 2" - - name: Test suite with py39-ubuntu - python: '3.9' + test_mypyc: true + - name: Test suite with py38-windows-64 + python: '3.8' arch: x64 - os: ubuntu-latest - toxenv: py + os: windows-latest + toxenv: py38 tox_extra_args: "-n 2" - - name: Test suite with py37-ubuntu, mypyc-compiled - python: '3.7' + - name: Test suite with py39-ubuntu + python: '3.9' arch: x64 os: ubuntu-latest toxenv: py tox_extra_args: "-n 2" - test_mypyc: true - - name: Test suite with py310-ubuntu, mypyc-compiled + - name: Test suite with py310-ubuntu python: '3.10' arch: x64 os: ubuntu-latest toxenv: py tox_extra_args: "-n 2" - test_mypyc: true - - name: Test suite with py310-ubuntu - python: '3.10' + - name: Test suite with py311-ubuntu, mypyc-compiled + python: '3.11' arch: x64 os: ubuntu-latest toxenv: py tox_extra_args: "-n 2" - - name: Test suite with py311-ubuntu, mypyc-compiled - python: '3.11' + test_mypyc: true + - name: Test suite with py312-ubuntu, mypyc-compiled + python: '3.12-dev' arch: x64 os: ubuntu-latest toxenv: py tox_extra_args: "-n 2" test_mypyc: true - - name: mypyc runtime tests with py37-macos - python: '3.7' + + - name: mypyc runtime tests with py38-macos + python: '3.8.17' arch: x64 os: macos-latest toxenv: py tox_extra_args: "-n 2 mypyc/test/test_run.py mypyc/test/test_external.py" - - name: mypyc runtime tests with py37-debug-build-ubuntu - python: '3.7.13' + - name: mypyc runtime tests with py38-debug-build-ubuntu + python: '3.8.17' arch: x64 os: ubuntu-latest toxenv: py tox_extra_args: "-n 2 mypyc/test/test_run.py mypyc/test/test_external.py" debug_build: true - - name: Type check our own code (py37-ubuntu) - python: '3.7' + + - name: Type check our own code (py38-ubuntu) + python: '3.8' arch: x64 os: ubuntu-latest toxenv: type - - name: Type check our own code (py37-windows-64) - python: '3.7' + - name: Type check our own code (py38-windows-64) + python: '3.8' arch: x64 os: windows-latest toxenv: type + # We also run these checks with pre-commit in CI, # but it's useful to run them with tox too, # to ensure the tox env works as expected - - name: Formatting with Black + isort and code style with flake8 + - name: Formatting and code style with Black + ruff python: '3.10' arch: x64 os: ubuntu-latest @@ -130,7 +130,7 @@ jobs: ./misc/build-debug-python.sh $PYTHONVERSION $PYTHONDIR $VENV source $VENV/bin/activate - name: Install tox - run: pip install --upgrade 'setuptools!=50' tox==4.4.4 + run: pip install --upgrade 'setuptools!=50' tox==4.11.0 - name: Compiled with mypyc if: ${{ matrix.test_mypyc }} run: | @@ -141,24 +141,6 @@ jobs: - name: Test run: tox run -e ${{ matrix.toxenv }} --skip-pkg-install -- ${{ matrix.tox_extra_args }} - python-nightly: - runs-on: ubuntu-latest - name: Test suite with Python nightly - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: '3.12-dev' - - name: Install tox - run: pip install --upgrade 'setuptools!=50' tox==4.4.4 - - name: Setup tox environment - run: tox run -e py --notest - - name: Test - run: tox run -e py --skip-pkg-install -- "-n 2" - continue-on-error: true - - name: Mark as a success - run: exit 0 - python_32bits: runs-on: ubuntu-latest name: Test mypyc suite with 32-bit Python @@ -200,7 +182,7 @@ jobs: default: 3.11.1 command: python -c "import platform; print(f'{platform.architecture()=} {platform.machine()=}');" - name: Install tox - run: pip install --upgrade 'setuptools!=50' tox==4.4.4 + run: pip install --upgrade 'setuptools!=50' tox==4.11.0 - name: Setup tox environment run: tox run -e py --notest - name: Test diff --git a/.gitignore b/.gitignore index c6761f0ed736..6c35e3d89342 100644 --- a/.gitignore +++ b/.gitignore @@ -57,6 +57,5 @@ test_capi *.o *.a test_capi -/.mypyc-flake8-cache.json /mypyc/lib-rt/build/ /mypyc/lib-rt/*.so diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 92d827fba006..8650a2868cd6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,22 +5,14 @@ repos: hooks: - id: trailing-whitespace - id: end-of-file-fixer - - repo: https://github.com/psf/black - rev: 23.3.0 # must match test-requirements.txt + - repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.7.0 # must match test-requirements.txt hooks: - id: black - - repo: https://github.com/pycqa/isort - rev: 5.12.0 # must match test-requirements.txt + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.281 # must match test-requirements.txt hooks: - - id: isort - - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 # must match test-requirements.txt - hooks: - - id: flake8 - additional_dependencies: - - flake8-bugbear==23.3.23 # must match test-requirements.txt - - flake8-noqa==1.3.1 # must match test-requirements.txt - + - id: ruff + args: [--exit-non-zero-on-fix] ci: - # We run flake8 as part of our GitHub Actions suite in CI - skip: [flake8] + autoupdate_schedule: quarterly diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000000..8ec33ee641ed --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,18 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.11" + +sphinx: + configuration: docs/source/conf.py + +formats: [pdf, htmlzip, epub] + +python: + install: + - requirements: docs/requirements-docs.txt diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d169d7f3159e..82e55f437e87 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,11 +88,8 @@ pytest -n0 -k 'test_name' # Run all test cases in the "test-data/unit/check-dataclasses.test" file pytest mypy/test/testcheck.py::TypeCheckSuite::check-dataclasses.test -# Run the linter -flake8 - -# Run formatters -black . && isort . +# Run the formatters and linters +python runtests.py lint ``` For an in-depth guide on running and writing tests, @@ -154,10 +151,6 @@ advice about good pull requests for open-source projects applies; we have [our own writeup](https://github.com/python/mypy/wiki/Good-Pull-Request) of this advice. -We are using `black` and `isort` to enforce a consistent coding style. -Run `black . && isort .` before your commits, otherwise you would receive -a CI failure. - Also, do not squash your commits after you have submitted a pull request, as this erases context during review. We will squash commits when the pull request is merged. diff --git a/LICENSE b/LICENSE index 991496cb4878..55d01ee19ad8 100644 --- a/LICENSE +++ b/LICENSE @@ -4,8 +4,8 @@ Mypy (and mypyc) are licensed under the terms of the MIT license, reproduced bel The MIT License -Copyright (c) 2012-2022 Jukka Lehtosalo and contributors -Copyright (c) 2015-2022 Dropbox, Inc. +Copyright (c) 2012-2023 Jukka Lehtosalo and contributors +Copyright (c) 2015-2023 Dropbox, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), diff --git a/MANIFEST.in b/MANIFEST.in index 1c26ae16fc78..b77b762b4852 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -39,9 +39,10 @@ graft test-data include conftest.py include runtests.py include pytest.ini +include tox.ini include LICENSE mypyc/README.md -exclude .gitmodules CONTRIBUTING.md CREDITS ROADMAP.md tox.ini action.yml .editorconfig +exclude .gitmodules CONTRIBUTING.md CREDITS ROADMAP.md action.yml .editorconfig exclude .git-blame-ignore-revs .pre-commit-config.yaml global-exclude *.py[cod] diff --git a/README.md b/README.md index 164957b1491a..8b1ebbc0f2cb 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Mypy: Static Typing for Python [![Chat at https://gitter.im/python/typing](https://badges.gitter.im/python/typing.svg)](https://gitter.im/python/typing?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Checked with mypy](https://www.mypy-lang.org/static/mypy_badge.svg)](https://mypy-lang.org/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) -[![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/) +[![Linting: Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) Got a question? --------------- diff --git a/build-requirements.txt b/build-requirements.txt index 0b1e6d43103a..aac1b95eddf7 100644 --- a/build-requirements.txt +++ b/build-requirements.txt @@ -2,4 +2,3 @@ -r mypy-requirements.txt types-psutil types-setuptools -types-typed-ast>=1.5.8.5,<1.6.0 diff --git a/docs/source/additional_features.rst b/docs/source/additional_features.rst index 10122e9b2fa9..5dd136476eaa 100644 --- a/docs/source/additional_features.rst +++ b/docs/source/additional_features.rst @@ -9,10 +9,9 @@ of the previous sections. Dataclasses *********** -In Python 3.7, a new :py:mod:`dataclasses` module has been added to the standard library. -This module allows defining and customizing simple boilerplate-free classes. -They can be defined using the :py:func:`@dataclasses.dataclass -` decorator: +The :py:mod:`dataclasses` module allows defining and customizing simple +boilerplate-free classes. They can be defined using the +:py:func:`@dataclasses.dataclass ` decorator: .. code-block:: python diff --git a/docs/source/cheat_sheet_py3.rst b/docs/source/cheat_sheet_py3.rst index 31242d0ad0bc..fe5761ca6187 100644 --- a/docs/source/cheat_sheet_py3.rst +++ b/docs/source/cheat_sheet_py3.rst @@ -104,7 +104,7 @@ Functions print(value + "!" * excitement) # Note that arguments without a type are dynamically typed (treated as Any) - # and that functions without any annotations not checked + # and that functions without any annotations are not checked def untyped(x): x.anything() + 1 + "string" # no errors @@ -178,8 +178,6 @@ Classes class AuditedBankAccount(BankAccount): # You can optionally declare instance variables in the class body audit_log: list[str] - # This is an instance variable with a default value - auditor_name: str = "The Spanish Inquisition" def __init__(self, account_name: str, initial_balance: int = 0) -> None: super().__init__(account_name, initial_balance) diff --git a/docs/source/class_basics.rst b/docs/source/class_basics.rst index 82bbf00b830d..73f95f1c5658 100644 --- a/docs/source/class_basics.rst +++ b/docs/source/class_basics.rst @@ -210,7 +210,9 @@ override has a compatible signature: In order to ensure that your code remains correct when renaming methods, it can be helpful to explicitly mark a method as overriding a base -method. This can be done with the ``@override`` decorator. If the base +method. This can be done with the ``@override`` decorator. ``@override`` +can be imported from ``typing`` starting with Python 3.12 or from +``typing_extensions`` for use with older Python versions. If the base method is then renamed while the overriding method is not, mypy will show an error: @@ -233,6 +235,11 @@ show an error: def g(self, y: str) -> None: # Error: no corresponding base method found ... +.. note:: + + Use :ref:`--enable-error-code explicit-override ` to require + that method overrides use the ``@override`` decorator. Emit an error if it is missing. + You can also override a statically typed method with a dynamically typed one. This allows dynamically typed code to override methods defined in library classes without worrying about their type diff --git a/docs/source/command_line.rst b/docs/source/command_line.rst index 2809294092ab..727d500e2d4d 100644 --- a/docs/source/command_line.rst +++ b/docs/source/command_line.rst @@ -350,6 +350,34 @@ definitions or calls. This flag reports an error whenever a function with type annotations calls a function defined without annotations. +.. option:: --untyped-calls-exclude + + This flag allows to selectively disable :option:`--disallow-untyped-calls` + for functions and methods defined in specific packages, modules, or classes. + Note that each exclude entry acts as a prefix. For example (assuming there + are no type annotations for ``third_party_lib`` available): + + .. code-block:: python + + # mypy --disallow-untyped-calls + # --untyped-calls-exclude=third_party_lib.module_a + # --untyped-calls-exclude=foo.A + from third_party_lib.module_a import some_func + from third_party_lib.module_b import other_func + import foo + + some_func() # OK, function comes from module `third_party_lib.module_a` + other_func() # E: Call to untyped function "other_func" in typed context + + foo.A().meth() # OK, method was defined in class `foo.A` + foo.B().meth() # E: Call to untyped function "meth" in typed context + + # file foo.py + class A: + def meth(self): pass + class B: + def meth(self): pass + .. option:: --disallow-untyped-defs This flag reports an error whenever it encounters a function definition @@ -612,6 +640,34 @@ of the above sections. assert text is not None # OK, check against None is allowed as a special case. +.. option:: --extra-checks + + This flag enables additional checks that are technically correct but may be + impractical in real code. In particular, it prohibits partial overlap in + ``TypedDict`` updates, and makes arguments prepended via ``Concatenate`` + positional-only. For example: + + .. code-block:: python + + from typing import TypedDict + + class Foo(TypedDict): + a: int + + class Bar(TypedDict): + a: int + b: int + + def test(foo: Foo, bar: Bar) -> None: + # This is technically unsafe since foo can have a subtype of Foo at + # runtime, where type of key "b" is incompatible with int, see below + bar.update(foo) + + class Bad(Foo): + b: str + bad: Bad = {"a": 0, "b": "no"} + test(bad, bar) + .. option:: --strict This flag mode enables all optional error checking flags. You can see the diff --git a/docs/source/conf.py b/docs/source/conf.py index 80097ef5b3a8..683b2a6785b3 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -35,7 +35,7 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ["sphinx.ext.intersphinx"] +extensions = ["sphinx.ext.intersphinx", "docs.source.html_builder"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/docs/source/config_file.rst b/docs/source/config_file.rst index 9e79ff99937b..c0798bbf03f1 100644 --- a/docs/source/config_file.rst +++ b/docs/source/config_file.rst @@ -490,7 +490,38 @@ section of the command line docs. :default: False Disallows calling functions without type annotations from functions with type - annotations. + annotations. Note that when used in per-module options, it enables/disables + this check **inside** the module(s) specified, not for functions that come + from that module(s), for example config like this: + + .. code-block:: ini + + [mypy] + disallow_untyped_calls = True + + [mypy-some.library.*] + disallow_untyped_calls = False + + will disable this check inside ``some.library``, not for your code that + imports ``some.library``. If you want to selectively disable this check for + all your code that imports ``some.library`` you should instead use + :confval:`untyped_calls_exclude`, for example: + + .. code-block:: ini + + [mypy] + disallow_untyped_calls = True + untyped_calls_exclude = some.library + +.. confval:: untyped_calls_exclude + + :type: comma-separated list of strings + + Selectively excludes functions and methods defined in specific packages, + modules, and classes from action of :confval:`disallow_untyped_calls`. + This also applies to all submodules of packages (i.e. everything inside + a given prefix). Note, this option does not support per-file configuration, + the exclusions list is defined globally for all your code. .. confval:: disallow_untyped_defs diff --git a/docs/source/error_code_list.rst b/docs/source/error_code_list.rst index 54dc31f2cfcb..1f75ac54d525 100644 --- a/docs/source/error_code_list.rst +++ b/docs/source/error_code_list.rst @@ -8,6 +8,8 @@ with default options. See :ref:`error-codes` for general documentation about error codes. :ref:`error-codes-optional` documents additional error codes that you can enable. +.. _code-attr-defined: + Check that attribute exists [attr-defined] ------------------------------------------ @@ -43,6 +45,8 @@ A reference to a missing attribute is given the ``Any`` type. In the above example, the type of ``non_existent`` will be ``Any``, which can be important if you silence the error. +.. _code-union-attr: + Check that attribute exists in each union item [union-attr] ----------------------------------------------------------- @@ -75,6 +79,8 @@ You can often work around these errors by using ``assert isinstance(obj, ClassNa or ``assert obj is not None`` to tell mypy that you know that the type is more specific than what mypy thinks. +.. _code-name-defined: + Check that name is defined [name-defined] ----------------------------------------- @@ -89,6 +95,7 @@ This example accidentally calls ``sort()`` instead of :py:func:`sorted`: x = sort([3, 2, 4]) # Error: Name "sort" is not defined [name-defined] +.. _code-used-before-def: Check that a variable is not used before it's defined [used-before-def] ----------------------------------------------------------------------- @@ -105,6 +112,7 @@ Example: print(x) # Error: Name "x" is used before definition [used-before-def] x = 123 +.. _code-call-arg: Check arguments in calls [call-arg] ----------------------------------- @@ -124,6 +132,8 @@ Example: greet('jack') # OK greet('jill', 'jack') # Error: Too many arguments for "greet" [call-arg] +.. _code-arg-type: + Check argument types [arg-type] ------------------------------- @@ -144,6 +154,8 @@ Example: # expected "list[int]" [arg-type] print(first(t)) +.. _code-call-overload: + Check calls to overloaded functions [call-overload] --------------------------------------------------- @@ -175,6 +187,8 @@ Example: # Error: No overload variant of "inc_maybe" matches argument type "float" [call-overload] inc_maybe(1.2) +.. _code-valid-type: + Check validity of types [valid-type] ------------------------------------ @@ -207,6 +221,8 @@ You can use :py:data:`~typing.Callable` as the type for callable objects: for x in objs: f(x) +.. _code-var-annotated: + Require annotation if variable type is unclear [var-annotated] -------------------------------------------------------------- @@ -239,6 +255,8 @@ To address this, we add an explicit annotation: reveal_type(Bundle().items) # list[str] +.. _code-override: + Check validity of overrides [override] -------------------------------------- @@ -275,6 +293,8 @@ Example: arg: bool) -> int: ... +.. _code-return: + Check that function returns a value [return] -------------------------------------------- @@ -303,6 +323,40 @@ Example: else: raise ValueError('not defined for zero') +.. _code-empty-body: + +Check that functions don't have empty bodies outside stubs [empty-body] +----------------------------------------------------------------------- + +This error code is similar to the ``[return]`` code but is emitted specifically +for functions and methods with empty bodies (if they are annotated with +non-trivial return type). Such a distinction exists because in some contexts +an empty body can be valid, for example for an abstract method or in a stub +file. Also old versions of mypy used to unconditionally allow functions with +empty bodies, so having a dedicated error code simplifies cross-version +compatibility. + +Note that empty bodies are allowed for methods in *protocols*, and such methods +are considered implicitly abstract: + +.. code-block:: python + + from abc import abstractmethod + from typing import Protocol + + class RegularABC: + @abstractmethod + def foo(self) -> int: + pass # OK + def bar(self) -> int: + pass # Error: Missing return statement [empty-body] + + class Proto(Protocol): + def bar(self) -> int: + pass # OK + +.. _code-return-value: + Check that return value is compatible [return-value] ---------------------------------------------------- @@ -317,6 +371,8 @@ Example: # Error: Incompatible return value type (got "int", expected "str") [return-value] return x + 1 +.. _code-assignment: + Check types in assignment statement [assignment] ------------------------------------------------ @@ -339,6 +395,8 @@ Example: # variable has type "str") [assignment] r.name = 5 +.. _code-method-assign: + Check that assignment target is not a method [method-assign] ------------------------------------------------------------ @@ -368,6 +426,8 @@ so only the second assignment will still generate an error. This error code is a subcode of the more general ``[assignment]`` code. +.. _code-type-var: + Check type variable values [type-var] ------------------------------------- @@ -390,6 +450,8 @@ Example: # Error: Value of type variable "T1" of "add" cannot be "str" [type-var] add('x', 'y') +.. _code-operator: + Check uses of various operators [operator] ------------------------------------------ @@ -404,6 +466,8 @@ Example: # Error: Unsupported operand types for + ("int" and "str") [operator] 1 + 'x' +.. _code-index: + Check indexing operations [index] --------------------------------- @@ -425,6 +489,8 @@ Example: # Error: Invalid index type "bytes" for "dict[str, int]"; expected type "str" [index] a[b'x'] = 4 +.. _code-list-item: + Check list items [list-item] ---------------------------- @@ -439,6 +505,8 @@ Example: # Error: List item 0 has incompatible type "int"; expected "str" [list-item] a: list[str] = [0] +.. _code-dict-item: + Check dict items [dict-item] ---------------------------- @@ -453,6 +521,8 @@ Example: # Error: Dict entry 0 has incompatible type "str": "str"; expected "str": "int" [dict-item] d: dict[str, int] = {'key': 'value'} +.. _code-typeddict-item: + Check TypedDict items [typeddict-item] -------------------------------------- @@ -477,6 +547,8 @@ Example: # TypedDict item "x" has type "int") [typeddict-item] p: Point = {'x': 1.2, 'y': 4} +.. _code-typeddict-unknown-key: + Check TypedDict Keys [typeddict-unknown-key] -------------------------------------------- @@ -533,6 +605,8 @@ runtime: This error code is a subcode of the wider ``[typeddict-item]`` code. +.. _code-has-type: + Check that type of target is known [has-type] --------------------------------------------- @@ -572,8 +646,20 @@ the issue: def set_y(self) -> None: self.y: int = self.x # Added annotation here -Check that import target can be found [import] ----------------------------------------------- +.. _code-import: + +Check for an issue with imports [import] +---------------------------------------- + +Mypy generates an error if it can't resolve an `import` statement. +This is a parent error code of `import-not-found` and `import-untyped` + +See :ref:`ignore-missing-imports` for how to work around these errors. + +.. _code-import-not-found: + +Check that import target can be found [import-not-found] +-------------------------------------------------------- Mypy generates an error if it can't find the source code or a stub file for an imported module. @@ -582,11 +668,33 @@ Example: .. code-block:: python - # Error: Cannot find implementation or library stub for module named 'acme' [import] - import acme + # Error: Cannot find implementation or library stub for module named "m0dule_with_typo" [import-not-found] + import m0dule_with_typo See :ref:`ignore-missing-imports` for how to work around these errors. +.. _code-import-untyped: + +Check that import target can be found [import-untyped] +-------------------------------------------------------- + +Mypy generates an error if it can find the source code for an imported module, +but that module does not provide type annotations (via :ref:`PEP 561 `). + +Example: + +.. code-block:: python + + # Error: Library stubs not installed for "bs4" [import-untyped] + import bs4 + # Error: Skipping analyzing "no_py_typed": module is installed, but missing library stubs or py.typed marker [import-untyped] + import no_py_typed + +In some cases, these errors can be fixed by installing an appropriate +stub package. See :ref:`ignore-missing-imports` for more details. + +.. _code-no-redef: + Check that each name is defined once [no-redef] ----------------------------------------------- @@ -613,6 +721,8 @@ Example: # (the first definition wins!) A('x') +.. _code-func-returns-value: + Check that called function returns a value [func-returns-value] --------------------------------------------------------------- @@ -635,6 +745,8 @@ returns ``None``: if f(): print("not false") +.. _code-abstract: + Check instantiation of abstract classes [abstract] -------------------------------------------------- @@ -666,6 +778,8 @@ Example: # Error: Cannot instantiate abstract class "Thing" with abstract attribute "save" [abstract] t = Thing() +.. _code-type-abstract: + Safe handling of abstract type object types [type-abstract] ----------------------------------------------------------- @@ -692,6 +806,8 @@ Example: # Error: Only concrete class can be given where "Type[Config]" is expected [type-abstract] make_many(Config, 5) +.. _code-safe-super: + Check that call to an abstract method via super is valid [safe-super] --------------------------------------------------------------------- @@ -714,10 +830,12 @@ will cause runtime errors, so mypy prevents you from doing so: Mypy considers the following as trivial bodies: a ``pass`` statement, a literal ellipsis ``...``, a docstring, and a ``raise NotImplementedError`` statement. +.. _code-valid-newtype: + Check the target of NewType [valid-newtype] ------------------------------------------- -The target of a :py:func:`NewType ` definition must be a class type. It can't +The target of a :py:class:`~typing.NewType` definition must be a class type. It can't be a union type, ``Any``, or various other special types. You can also get this error if the target has been imported from a @@ -738,6 +856,8 @@ To work around the issue, you can either give mypy access to the sources for ``acme`` or create a stub file for the module. See :ref:`ignore-missing-imports` for more information. +.. _code-exit-return: + Check the return type of __exit__ [exit-return] ----------------------------------------------- @@ -794,6 +914,8 @@ You can also use ``None``: def __exit__(self, exc, value, tb) -> None: # Also OK print('exit') +.. _code-name-match: + Check that naming is consistent [name-match] -------------------------------------------- @@ -807,6 +929,8 @@ consistently when using the call-based syntax. Example: # Error: First argument to namedtuple() should be "Point2D", not "Point" Point2D = NamedTuple("Point", [("x", int), ("y", int)]) +.. _code-literal-required: + Check that literal is used where expected [literal-required] ------------------------------------------------------------ @@ -836,6 +960,8 @@ or ``Literal`` variables. Example: # expected one of ("x", "y") [literal-required] p[key] +.. _code-no-overload-impl: + Check that overloaded functions have an implementation [no-overload-impl] ------------------------------------------------------------------------- @@ -858,6 +984,8 @@ implementation. def func(value): pass # actual implementation +.. _code-unused-coroutine: + Check that coroutine return value is used [unused-coroutine] ------------------------------------------------------------ @@ -881,6 +1009,41 @@ otherwise unused variable: _ = f() # No error +.. _code-top-level-await: + +Warn about top level await expressions [top-level-await] +-------------------------------------------------------- + +This error code is separate from the general ``[syntax]`` errors, because in +some environments (e.g. IPython) a top level ``await`` is allowed. In such +environments a user may want to use ``--disable-error-code=top-level-await``, +that allows to still have errors for other improper uses of ``await``, for +example: + +.. code-block:: python + + async def f() -> None: + ... + + top = await f() # Error: "await" outside function [top-level-await] + +.. _code-await-not-async: + +Warn about await expressions used outside of coroutines [await-not-async] +------------------------------------------------------------------------- + +``await`` must be used inside a coroutine. + +.. code-block:: python + + async def f() -> None: + ... + + def g() -> None: + await f() # Error: "await" outside coroutine ("async def") [await-not-async] + +.. _code-assert-type: + Check types in assert_type [assert-type] ---------------------------------------- @@ -895,6 +1058,8 @@ the provided type. assert_type([1], list[str]) # Error +.. _code-truthy-function: + Check that function isn't used in boolean context [truthy-function] ------------------------------------------------------------------- @@ -908,6 +1073,29 @@ Functions will always evaluate to true in boolean contexts. if f: # Error: Function "Callable[[], Any]" could always be true in boolean context [truthy-function] pass +.. _code-str-format: + +Check that string formatting/interpolation is type-safe [str-format] +-------------------------------------------------------------------- + +Mypy will check that f-strings, ``str.format()`` calls, and ``%`` interpolations +are valid (when corresponding template is a literal string). This includes +checking number and types of replacements, for example: + +.. code-block:: python + + # Error: Cannot find replacement for positional format specifier 1 [str-format] + "{} and {}".format("spam") + "{} and {}".format("spam", "eggs") # OK + # Error: Not all arguments converted during string formatting [str-format] + "{} and {}".format("spam", "eggs", "cheese") + + # Error: Incompatible types in string interpolation + # (expression has type "float", placeholder has type "int") [str-format] + "{:d}".format(3.14) + +.. _code-str-bytes-safe: + Check for implicit bytes coercions [str-bytes-safe] ------------------------------------------------------------------- @@ -926,6 +1114,30 @@ Warn about cases where a bytes object may be converted to a string in an unexpec print(f"The alphabet starts with {b!r}") # The alphabet starts with b'abc' print(f"The alphabet starts with {b.decode('utf-8')}") # The alphabet starts with abc +.. _code-annotation-unchecked: + +Notify about an annotation in an unchecked function [annotation-unchecked] +-------------------------------------------------------------------------- + +Sometimes a user may accidentally omit an annotation for a function, and mypy +will not check the body of this function (unless one uses +:option:`--check-untyped-defs ` or +:option:`--disallow-untyped-defs `). To avoid +such situations go unnoticed, mypy will show a note, if there are any type +annotations in an unchecked function: + +.. code-block:: python + + def test_assignment(): # "-> None" return annotation is missing + # Note: By default the bodies of untyped functions are not checked, + # consider using --check-untyped-defs [annotation-unchecked] + x: int = "no way" + +Note that mypy will still exit with return code ``0``, since such behaviour is +specified by :pep:`484`. + +.. _code-syntax: + Report syntax errors [syntax] ----------------------------- @@ -933,6 +1145,8 @@ If the code being checked is not syntactically valid, mypy issues a syntax error. Most, but not all, syntax errors are *blocking errors*: they can't be ignored with a ``# type: ignore`` comment. +.. _code-misc: + Miscellaneous checks [misc] --------------------------- diff --git a/docs/source/error_code_list2.rst b/docs/source/error_code_list2.rst index 8be2ac0b1d73..30fad0793771 100644 --- a/docs/source/error_code_list2.rst +++ b/docs/source/error_code_list2.rst @@ -15,6 +15,8 @@ error codes that are enabled by default. options by using a :ref:`configuration file ` or :ref:`command-line options `. +.. _code-type-arg: + Check that type arguments exist [type-arg] ------------------------------------------ @@ -34,6 +36,8 @@ Example: def remove_dups(items: list) -> list: ... +.. _code-no-untyped-def: + Check that every function has an annotation [no-untyped-def] ------------------------------------------------------------ @@ -62,6 +66,8 @@ Example: def __init__(self) -> None: self.value = 0 +.. _code-redundant-cast: + Check that cast is not redundant [redundant-cast] ------------------------------------------------- @@ -82,6 +88,8 @@ Example: # Error: Redundant cast to "int" [redundant-cast] return cast(int, x) +.. _code-redundant-self: + Check that methods do not have redundant Self annotations [redundant-self] -------------------------------------------------------------------------- @@ -104,6 +112,8 @@ Example: def copy(self: Self) -> Self: return type(self)() +.. _code-comparison-overlap: + Check that comparisons are overlapping [comparison-overlap] ----------------------------------------------------------- @@ -135,6 +145,8 @@ literal: def is_magic(x: bytes) -> bool: return x == b'magic' # OK +.. _code-no-untyped-call: + Check that no untyped functions are called [no-untyped-call] ------------------------------------------------------------ @@ -154,6 +166,7 @@ Example: def bad(): ... +.. _code-no-any-return: Check that function does not return Any value [no-any-return] ------------------------------------------------------------- @@ -175,6 +188,8 @@ Example: # Error: Returning Any from function declared to return "str" [no-any-return] return fields(x)[0] +.. _code-no-any-unimported: + Check that types have no Any components due to missing imports [no-any-unimported] ---------------------------------------------------------------------------------- @@ -195,6 +210,8 @@ that ``Cat`` falls back to ``Any`` in a type annotation: def feed(cat: Cat) -> None: ... +.. _code-unreachable: + Check that statement or expression is unreachable [unreachable] --------------------------------------------------------------- @@ -214,6 +231,8 @@ incorrect control flow or conditional checks that are accidentally always true o # Error: Statement is unreachable [unreachable] print('unreachable') +.. _code-redundant-expr: + Check that expression is redundant [redundant-expr] --------------------------------------------------- @@ -236,6 +255,34 @@ mypy generates an error if it thinks that an expression is redundant. [i for i in range(x) if isinstance(i, int)] +.. _code-possibly-undefined: + +Warn about variables that are defined only in some execution paths [possibly-undefined] +--------------------------------------------------------------------------------------- + +If you use :option:`--enable-error-code possibly-undefined `, +mypy generates an error if it cannot verify that a variable will be defined in +all execution paths. This includes situations when a variable definition +appears in a loop, in a conditional branch, in an except handler, etc. For +example: + +.. code-block:: python + + # Use "mypy --enable-error-code possibly-undefined ..." + + from typing import Iterable + + def test(values: Iterable[int], flag: bool) -> None: + if flag: + a = 1 + z = a + 1 # Error: Name "a" may be undefined [possibly-undefined] + + for v in values: + b = v + z = b + 1 # Error: Name "b" may be undefined [possibly-undefined] + +.. _code-truthy-bool: + Check that expression is not implicitly true in boolean context [truthy-bool] ----------------------------------------------------------------------------- @@ -259,6 +306,7 @@ Using an iterable value in a boolean context has a separate error code if foo: ... +.. _code-truthy-iterable: Check that iterable is not implicitly true in boolean context [truthy-iterable] ------------------------------------------------------------------------------- @@ -286,8 +334,7 @@ items`` check is actually valid. If that is the case, it is recommended to annotate ``items`` as ``Collection[int]`` instead of ``Iterable[int]``. - -.. _ignore-without-code: +.. _code-ignore-without-code: Check that ``# type: ignore`` include an error code [ignore-without-code] ------------------------------------------------------------------------- @@ -319,6 +366,8 @@ Example: # Error: "Foo" has no attribute "nme"; maybe "name"? f.nme = 42 # type: ignore[assignment] +.. _code-unused-awaitable: + Check that awaitable return value is used [unused-awaitable] ------------------------------------------------------------ @@ -348,6 +397,8 @@ silence the error: async def g() -> None: _ = asyncio.create_task(f()) # No error +.. _code-unused-ignore: + Check that ``# type: ignore`` comment is used [unused-ignore] ------------------------------------------------------------- @@ -391,3 +442,42 @@ Example: # The following will not generate an error on either # Python 3.8, or Python 3.9 42 + "testing..." # type: ignore + +.. _code-explicit-override: + +Check that ``@override`` is used when overriding a base class method [explicit-override] +---------------------------------------------------------------------------------------- + +If you use :option:`--enable-error-code explicit-override ` +mypy generates an error if you override a base class method without using the +``@override`` decorator. An error will not be emitted for overrides of ``__init__`` +or ``__new__``. See `PEP 698 `_. + +.. note:: + + Starting with Python 3.12, the ``@override`` decorator can be imported from ``typing``. + To use it with older Python versions, import it from ``typing_extensions`` instead. + +Example: + +.. code-block:: python + + # Use "mypy --enable-error-code explicit-override ..." + + from typing import override + + class Parent: + def f(self, x: int) -> None: + pass + + def g(self, y: int) -> None: + pass + + + class Child(Parent): + def f(self, x: int) -> None: # Error: Missing @override decorator + pass + + @override + def g(self, y: int) -> None: + pass diff --git a/docs/source/error_codes.rst b/docs/source/error_codes.rst index c8a2728b5697..a71168cadf30 100644 --- a/docs/source/error_codes.rst +++ b/docs/source/error_codes.rst @@ -32,7 +32,7 @@ or config ``hide_error_codes = True`` to hide error codes. Error codes are shown prog.py:1: error: "str" has no attribute "trim" [attr-defined] It's also possible to require error codes for ``type: ignore`` comments. -See :ref:`ignore-without-code` for more information. +See :ref:`ignore-without-code` for more information. .. _silence-error-codes: @@ -43,11 +43,7 @@ Silencing errors based on error codes You can use a special comment ``# type: ignore[code, ...]`` to only ignore errors with a specific error code (or codes) on a particular line. This can be used even if you have not configured mypy to show -error codes. Currently it's only possible to disable arbitrary error -codes on individual lines using this comment. - -You can also use :option:`--disable-error-code ` -to disable specific error codes globally. +error codes. This example shows how to ignore an error about an imported name mypy thinks is undefined: @@ -58,17 +54,17 @@ thinks is undefined: # definition. from foolib import foo # type: ignore[attr-defined] - -Enabling specific error codes ------------------------------ +Enabling/disabling specific error codes globally +------------------------------------------------ There are command-line flags and config file settings for enabling certain optional error codes, such as :option:`--disallow-untyped-defs `, which enables the ``no-untyped-def`` error code. -You can use :option:`--enable-error-code ` to -enable specific error codes that don't have a dedicated command-line -flag or config file setting. +You can use :option:`--enable-error-code ` +and :option:`--disable-error-code ` +to enable or disable specific error codes that don't have a dedicated +command-line flag or config file setting. Per-module enabling/disabling error codes ----------------------------------------- @@ -107,8 +103,9 @@ still keep the other two error codes enabled. The overall logic is following: * Individual config sections *adjust* them per glob/module -* Inline ``# mypy: ...`` comments can further *adjust* them for a specific - module +* Inline ``# mypy: disable-error-code="..."`` comments can further + *adjust* them for a specific module. + For example: ``# mypy: disable-error-code="truthy-bool, ignore-without-code"`` So one can e.g. enable some code globally, disable it for all tests in the corresponding config section, and then re-enable it with an inline diff --git a/docs/source/extending_mypy.rst b/docs/source/extending_mypy.rst index daf863616334..506f548db687 100644 --- a/docs/source/extending_mypy.rst +++ b/docs/source/extending_mypy.rst @@ -159,7 +159,7 @@ This hook will be also called for instantiation of classes. This is a good choice if the return type is too complex to be expressed by regular python typing. -**get_function_signature_hook** is used to adjust the signature of a function. +**get_function_signature_hook()** is used to adjust the signature of a function. **get_method_hook()** is the same as ``get_function_hook()`` but for methods instead of module level functions. diff --git a/docs/source/getting_started.rst b/docs/source/getting_started.rst index 9b927097cfd2..463c73b2fe76 100644 --- a/docs/source/getting_started.rst +++ b/docs/source/getting_started.rst @@ -16,7 +16,7 @@ may not make much sense otherwise. Installing and running mypy *************************** -Mypy requires Python 3.7 or later to run. You can install mypy using pip: +Mypy requires Python 3.8 or later to run. You can install mypy using pip: .. code-block:: shell @@ -264,7 +264,7 @@ Python standard library. For example, here is a function which uses the from pathlib import Path def load_template(template_path: Path, name: str) -> str: - # Mypy knows that `file_path` has a `read_text` method that returns a str + # Mypy knows that `template_path` has a `read_text` method that returns a str template = template_path.read_text() # ...so it understands this line type checks return template.replace('USERNAME', name) diff --git a/docs/source/html_builder.py b/docs/source/html_builder.py new file mode 100644 index 000000000000..3064833b5631 --- /dev/null +++ b/docs/source/html_builder.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import json +import os +import textwrap +from pathlib import Path +from typing import Any + +from sphinx.addnodes import document +from sphinx.application import Sphinx +from sphinx.builders.html import StandaloneHTMLBuilder + + +class MypyHTMLBuilder(StandaloneHTMLBuilder): + def __init__(self, app: Sphinx) -> None: + super().__init__(app) + self._ref_to_doc = {} + + def write_doc(self, docname: str, doctree: document) -> None: + super().write_doc(docname, doctree) + self._ref_to_doc.update({_id: docname for _id in doctree.ids}) + + def _verify_error_codes(self) -> None: + from mypy.errorcodes import error_codes + + missing_error_codes = {c for c in error_codes if f"code-{c}" not in self._ref_to_doc} + if missing_error_codes: + raise ValueError( + f"Some error codes are not documented: {', '.join(sorted(missing_error_codes))}" + ) + + def _write_ref_redirector(self) -> None: + if os.getenv("VERIFY_MYPY_ERROR_CODES"): + self._verify_error_codes() + p = Path(self.outdir) / "_refs.html" + data = f""" + + + + + + """ + p.write_text(textwrap.dedent(data)) + + def finish(self) -> None: + super().finish() + self._write_ref_redirector() + + +def setup(app: Sphinx) -> dict[str, Any]: + app.add_builder(MypyHTMLBuilder, override=True) + + return {"version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True} diff --git a/docs/source/installed_packages.rst b/docs/source/installed_packages.rst index b9a3b891c99c..fa4fae1a0b3e 100644 --- a/docs/source/installed_packages.rst +++ b/docs/source/installed_packages.rst @@ -119,7 +119,7 @@ The ``setup.py`` file could look like this: .. code-block:: python - from distutils.core import setup + from setuptools import setup setup( name="SuperPackageA", @@ -129,11 +129,6 @@ The ``setup.py`` file could look like this: packages=["package_a"] ) -.. note:: - - If you use :doc:`setuptools `, you must pass the option ``zip_safe=False`` to - ``setup()``, or mypy will not be able to find the installed package. - Some packages have a mix of stub files and runtime files. These packages also require a ``py.typed`` file. An example can be seen below: @@ -150,7 +145,7 @@ The ``setup.py`` file might look like this: .. code-block:: python - from distutils.core import setup + from setuptools import setup setup( name="SuperPackageB", @@ -180,7 +175,7 @@ The ``setup.py`` might look like this: .. code-block:: python - from distutils.core import setup + from setuptools import setup setup( name="SuperPackageC", diff --git a/docs/source/literal_types.rst b/docs/source/literal_types.rst index a66d300bd0fd..283bf7f9dba1 100644 --- a/docs/source/literal_types.rst +++ b/docs/source/literal_types.rst @@ -329,13 +329,10 @@ perform an exhaustiveness check, you need to update your code to use an .. code-block:: python from typing import Literal, NoReturn + from typing_extensions import assert_never PossibleValues = Literal['one', 'two'] - def assert_never(value: NoReturn) -> NoReturn: - # This also works at runtime as well - assert False, f'This code should never be reached, got: {value}' - def validate(x: PossibleValues) -> bool: if x == 'one': return True @@ -443,10 +440,7 @@ Let's start with a definition: from enum import Enum from typing import NoReturn - - def assert_never(value: NoReturn) -> NoReturn: - # This also works in runtime as well: - assert False, f'This code should never be reached, got: {value}' + from typing_extensions import assert_never class Direction(Enum): up = 'up' diff --git a/docs/source/more_types.rst b/docs/source/more_types.rst index 542ff1c57c71..4e6e9204fdca 100644 --- a/docs/source/more_types.rst +++ b/docs/source/more_types.rst @@ -2,7 +2,7 @@ More types ========== This section introduces a few additional kinds of types, including :py:data:`~typing.NoReturn`, -:py:func:`NewType `, and types for async code. It also discusses +:py:class:`~typing.NewType`, and types for async code. It also discusses how to give functions more precise types using overloads. All of these are only situationally useful, so feel free to skip this section and come back when you have a need for some of them. @@ -11,7 +11,7 @@ Here's a quick summary of what's covered here: * :py:data:`~typing.NoReturn` lets you tell mypy that a function never returns normally. -* :py:func:`NewType ` lets you define a variant of a type that is treated as a +* :py:class:`~typing.NewType` lets you define a variant of a type that is treated as a separate type by mypy but is identical to the original type at runtime. For example, you can have ``UserId`` as a variant of ``int`` that is just an ``int`` at runtime. @@ -75,7 +75,7 @@ certain values from base class instances. Example: ... However, this approach introduces some runtime overhead. To avoid this, the typing -module provides a helper object :py:func:`NewType ` that creates simple unique types with +module provides a helper object :py:class:`~typing.NewType` that creates simple unique types with almost zero runtime overhead. Mypy will treat the statement ``Derived = NewType('Derived', Base)`` as being roughly equivalent to the following definition: @@ -113,12 +113,12 @@ implicitly casting from ``UserId`` where ``int`` is expected. Examples: num: int = UserId(5) + 1 -:py:func:`NewType ` accepts exactly two arguments. The first argument must be a string literal +:py:class:`~typing.NewType` accepts exactly two arguments. The first argument must be a string literal containing the name of the new type and must equal the name of the variable to which the new type is assigned. The second argument must be a properly subclassable class, i.e., not a type construct like :py:data:`~typing.Union`, etc. -The callable returned by :py:func:`NewType ` accepts only one argument; this is equivalent to +The callable returned by :py:class:`~typing.NewType` accepts only one argument; this is equivalent to supporting only one constructor accepting an instance of the base class (see above). Example: @@ -139,12 +139,12 @@ Example: tcp_packet = TcpPacketId(127, 0) # Fails in type checker and at runtime You cannot use :py:func:`isinstance` or :py:func:`issubclass` on the object returned by -:py:func:`~typing.NewType`, nor can you subclass an object returned by :py:func:`~typing.NewType`. +:py:class:`~typing.NewType`, nor can you subclass an object returned by :py:class:`~typing.NewType`. .. note:: - Unlike type aliases, :py:func:`NewType ` will create an entirely new and - unique type when used. The intended purpose of :py:func:`NewType ` is to help you + Unlike type aliases, :py:class:`~typing.NewType` will create an entirely new and + unique type when used. The intended purpose of :py:class:`~typing.NewType` is to help you detect cases where you accidentally mixed together the old base type and the new derived type. @@ -160,7 +160,7 @@ You cannot use :py:func:`isinstance` or :py:func:`issubclass` on the object retu name_by_id(3) # ints and UserId are synonymous - But a similar example using :py:func:`NewType ` will not typecheck: + But a similar example using :py:class:`~typing.NewType` will not typecheck: .. code-block:: python diff --git a/docs/source/protocols.rst b/docs/source/protocols.rst index 95b870265f73..3336d77cb397 100644 --- a/docs/source/protocols.rst +++ b/docs/source/protocols.rst @@ -3,26 +3,24 @@ Protocols and structural subtyping ================================== -Mypy supports two ways of deciding whether two classes are compatible -as types: nominal subtyping and structural subtyping. - -*Nominal* subtyping is strictly based on the class hierarchy. If class ``D`` -inherits class ``C``, it's also a subtype of ``C``, and instances of -``D`` can be used when ``C`` instances are expected. This form of -subtyping is used by default in mypy, since it's easy to understand -and produces clear and concise error messages, and since it matches -how the native :py:func:`isinstance ` check works -- based on class +The Python type system supports two ways of deciding whether two objects are +compatible as types: nominal subtyping and structural subtyping. + +*Nominal* subtyping is strictly based on the class hierarchy. If class ``Dog`` +inherits class ``Animal``, it's a subtype of ``Animal``. Instances of ``Dog`` +can be used when ``Animal`` instances are expected. This form of subtyping +subtyping is what Python's type system predominantly uses: it's easy to +understand and produces clear and concise error messages, and matches how the +native :py:func:`isinstance ` check works -- based on class hierarchy. -*Structural* subtyping is based on the operations that can be performed with an object. Class ``D`` is -a structural subtype of class ``C`` if the former has all attributes -and methods of the latter, and with compatible types. +*Structural* subtyping is based on the operations that can be performed with an +object. Class ``Dog`` is a structural subtype of class ``Animal`` if the former +has all attributes and methods of the latter, and with compatible types. -Structural subtyping can be seen as a static equivalent of duck -typing, which is well known to Python programmers. Mypy provides -support for structural subtyping via protocol classes described -below. See :pep:`544` for the detailed specification of protocols -and structural subtyping in Python. +Structural subtyping can be seen as a static equivalent of duck typing, which is +well known to Python programmers. See :pep:`544` for the detailed specification +of protocols and structural subtyping in Python. .. _predefined_protocols: @@ -60,8 +58,7 @@ For example, ``IntList`` below is iterable, over ``int`` values: :ref:`predefined_protocols_reference` lists all protocols defined in :py:mod:`typing` and the signatures of the corresponding methods you need to define -to implement each protocol (the signatures can be left out, as always, but mypy -won't type check unannotated methods). +to implement each protocol. Simple user-defined protocols ***************************** @@ -89,18 +86,12 @@ class: for item in items: item.close() - close_all([Resource(), open('some/file')]) # Okay! + close_all([Resource(), open('some/file')]) # OK ``Resource`` is a subtype of the ``SupportsClose`` protocol since it defines a compatible ``close`` method. Regular file objects returned by :py:func:`open` are similarly compatible with the protocol, as they support ``close()``. -.. note:: - - The ``Protocol`` base class is provided in the ``typing_extensions`` - package for Python 3.4-3.7. Starting with Python 3.8, ``Protocol`` - is included in the ``typing`` module. - Defining subprotocols and subclassing protocols *********************************************** @@ -171,6 +162,13 @@ abstract: ExplicitSubclass() # error: Cannot instantiate abstract class 'ExplicitSubclass' # with abstract attributes 'attr' and 'method' +Similarly, explicitly assigning to a protocol instance can be a way to ask the +type checker to verify that your class implements a protocol: + +.. code-block:: python + + _proto: SomeProto = cast(ExplicitSubclass, None) + Invariance of protocol attributes ********************************* diff --git a/docs/source/runtime_troubles.rst b/docs/source/runtime_troubles.rst index 909215a774a9..66ab7b3a84c7 100644 --- a/docs/source/runtime_troubles.rst +++ b/docs/source/runtime_troubles.rst @@ -86,7 +86,7 @@ required to be valid Python syntax. For more details, see :pep:`563`. * :ref:`type aliases `; * :ref:`type narrowing `; - * type definitions (see :py:class:`~typing.TypeVar`, :py:func:`~typing.NewType`, :py:class:`~typing.NamedTuple`); + * type definitions (see :py:class:`~typing.TypeVar`, :py:class:`~typing.NewType`, :py:class:`~typing.NamedTuple`); * base classes. .. code-block:: python @@ -263,7 +263,7 @@ If your subclass is also generic, you can use the following: reveal_type(task_queue.get()) # Reveals str In Python 3.9, we can just inherit directly from ``Queue[str]`` or ``Queue[T]`` -since its :py:class:`queue.Queue` implements :py:meth:`__class_getitem__`, so +since its :py:class:`queue.Queue` implements :py:meth:`~object.__class_getitem__`, so the class object can be subscripted at runtime without issue. Using types defined in stubs but not at runtime diff --git a/docs/source/stubgen.rst b/docs/source/stubgen.rst index f06c9c066bb7..2de0743572e7 100644 --- a/docs/source/stubgen.rst +++ b/docs/source/stubgen.rst @@ -163,6 +163,11 @@ Additional flags Instead, only export imported names that are not referenced in the module that contains the import. +.. option:: --include-docstrings + + Include docstrings in stubs. This will add docstrings to Python function and + classes stubs and to C extension function stubs. + .. option:: --search-path PATH Specify module search directories, separated by colons (only used if diff --git a/docs/source/type_narrowing.rst b/docs/source/type_narrowing.rst index 72a816679140..4bc0fda70138 100644 --- a/docs/source/type_narrowing.rst +++ b/docs/source/type_narrowing.rst @@ -271,7 +271,7 @@ Generic TypeGuards else: reveal_type(names) # tuple[str, ...] -Typeguards with parameters +TypeGuards with parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~ Type guard functions can accept extra arguments: @@ -293,7 +293,7 @@ Type guard functions can accept extra arguments: TypeGuards as methods ~~~~~~~~~~~~~~~~~~~~~ - A method can also serve as the ``TypeGuard``: +A method can also serve as a ``TypeGuard``: .. code-block:: python diff --git a/misc/analyze_cache.py b/misc/analyze_cache.py index 45c44139b473..33205f5132fc 100644 --- a/misc/analyze_cache.py +++ b/misc/analyze_cache.py @@ -6,8 +6,8 @@ import os import os.path from collections import Counter -from typing import Any, Dict, Iterable -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Dict, Final, Iterable +from typing_extensions import TypeAlias as _TypeAlias ROOT: Final = ".mypy_cache/3.5" diff --git a/misc/async_matrix.py b/misc/async_matrix.py deleted file mode 100644 index d4612dd81799..000000000000 --- a/misc/async_matrix.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python3 -"""Test various combinations of generators/coroutines. - -This was used to cross-check the errors in the test case -testFullCoroutineMatrix in test-data/unit/check-async-await.test. -""" - -from __future__ import annotations - -import sys -from types import coroutine -from typing import Any, Awaitable, Generator, Iterator - -# The various things you might try to use in `await` or `yield from`. - - -def plain_generator() -> Generator[str, None, int]: - yield "a" - return 1 - - -async def plain_coroutine() -> int: - return 1 - - -@coroutine -def decorated_generator() -> Generator[str, None, int]: - yield "a" - return 1 - - -@coroutine -async def decorated_coroutine() -> int: - return 1 - - -class It(Iterator[str]): - stop = False - - def __iter__(self) -> It: - return self - - def __next__(self) -> str: - if self.stop: - raise StopIteration("end") - else: - self.stop = True - return "a" - - -def other_iterator() -> It: - return It() - - -class Aw(Awaitable[int]): - def __await__(self) -> Generator[str, Any, int]: - yield "a" - return 1 - - -def other_coroutine() -> Aw: - return Aw() - - -# The various contexts in which `await` or `yield from` might occur. - - -def plain_host_generator(func) -> Generator[str, None, None]: - yield "a" - x = 0 - f = func() - try: - x = yield from f # noqa: F841 - finally: - try: - f.close() - except AttributeError: - pass - - -async def plain_host_coroutine(func) -> None: - x = 0 - x = await func() # noqa: F841 - - -@coroutine -def decorated_host_generator(func) -> Generator[str, None, None]: - yield "a" - x = 0 - f = func() - try: - x = yield from f # noqa: F841 - finally: - try: - f.close() - except AttributeError: - pass - - -@coroutine -async def decorated_host_coroutine(func) -> None: - x = 0 - x = await func() # noqa: F841 - - -# Main driver. - - -def main() -> None: - verbose = "-v" in sys.argv - for host in [ - plain_host_generator, - plain_host_coroutine, - decorated_host_generator, - decorated_host_coroutine, - ]: - print() - print("==== Host:", host.__name__) - for func in [ - plain_generator, - plain_coroutine, - decorated_generator, - decorated_coroutine, - other_iterator, - other_coroutine, - ]: - print(" ---- Func:", func.__name__) - try: - f = host(func) - for i in range(10): - try: - x = f.send(None) - if verbose: - print(" yield:", x) - except StopIteration as e: - if verbose: - print(" stop:", e.value) - break - else: - if verbose: - print(" ???? still going") - except Exception as e: - print(" error:", repr(e)) - - -# Run main(). - -if __name__ == "__main__": - main() diff --git a/misc/build-debug-python.sh b/misc/build-debug-python.sh index f652d6ad9937..8dd1bff4c9ed 100755 --- a/misc/build-debug-python.sh +++ b/misc/build-debug-python.sh @@ -26,7 +26,7 @@ fi curl -O https://www.python.org/ftp/python/$VERSION/Python-$VERSION.tgz tar zxf Python-$VERSION.tgz cd Python-$VERSION -CPPFLAGS="$CPPFLAGS" LDFLAGS="$LDFLAGS" ./configure CFLAGS="-DPy_DEBUG -DPy_TRACE_REFS -DPYMALLOC_DEBUG" --with-pydebug --prefix=$PREFIX +CPPFLAGS="$CPPFLAGS" LDFLAGS="$LDFLAGS" ./configure CFLAGS="-DPy_DEBUG -DPy_TRACE_REFS -DPYMALLOC_DEBUG" --with-pydebug --prefix=$PREFIX --with-trace-refs make -j4 make install $PREFIX/bin/python3 -m pip install virtualenv diff --git a/misc/cherry-pick-typeshed.py b/misc/cherry-pick-typeshed.py index af08009c2a8f..7e3b8b56e65f 100644 --- a/misc/cherry-pick-typeshed.py +++ b/misc/cherry-pick-typeshed.py @@ -53,6 +53,7 @@ def main() -> None: "--index", "--directory=mypy/typeshed", "--exclude=**/tests/**", + "--exclude=**/test_cases/**", diff_file, ], check=True, diff --git a/misc/fix_annotate.py b/misc/fix_annotate.py deleted file mode 100644 index fc8ac27466d5..000000000000 --- a/misc/fix_annotate.py +++ /dev/null @@ -1,218 +0,0 @@ -"""Fixer for lib2to3 that inserts mypy annotations into all methods. - -The simplest way to run this is to copy it into lib2to3's "fixes" -subdirectory and then run "2to3 -f annotate" over your files. - -The fixer transforms e.g. - - def foo(self, bar, baz=12): - return bar + baz - -into - - def foo(self, bar, baz=12): - # type: (Any, int) -> Any - return bar + baz - -It does not do type inference but it recognizes some basic default -argument values such as numbers and strings (and assumes their type -implies the argument type). - -It also uses some basic heuristics to decide whether to ignore the -first argument: - - - always if it's named 'self' - - if there's a @classmethod decorator - -Finally, it knows that __init__() is supposed to return None. -""" - -from __future__ import annotations - -import os -import re -from lib2to3.fixer_base import BaseFix -from lib2to3.fixer_util import syms, token, touch_import -from lib2to3.patcomp import compile_pattern -from lib2to3.pytree import Leaf, Node - - -class FixAnnotate(BaseFix): - # This fixer is compatible with the bottom matcher. - BM_compatible = True - - # This fixer shouldn't run by default. - explicit = True - - # The pattern to match. - PATTERN = """ - funcdef< 'def' name=any parameters< '(' [args=any] ')' > ':' suite=any+ > - """ - - counter = None if not os.getenv("MAXFIXES") else int(os.getenv("MAXFIXES")) - - def transform(self, node, results): - if FixAnnotate.counter is not None: - if FixAnnotate.counter <= 0: - return - suite = results["suite"] - children = suite[0].children - - # NOTE: I've reverse-engineered the structure of the parse tree. - # It's always a list of nodes, the first of which contains the - # entire suite. Its children seem to be: - # - # [0] NEWLINE - # [1] INDENT - # [2...n-2] statements (the first may be a docstring) - # [n-1] DEDENT - # - # Comments before the suite are part of the INDENT's prefix. - # - # "Compact" functions (e.g. "def foo(x, y): return max(x, y)") - # have a different structure that isn't matched by PATTERN. - # - # print('-'*60) - # print(node) - # for i, ch in enumerate(children): - # print(i, repr(ch.prefix), repr(ch)) - # - # Check if there's already an annotation. - for ch in children: - if ch.prefix.lstrip().startswith("# type:"): - return # There's already a # type: comment here; don't change anything. - - # Compute the annotation - annot = self.make_annotation(node, results) - - # Insert '# type: {annot}' comment. - # For reference, see lib2to3/fixes/fix_tuple_params.py in stdlib. - if len(children) >= 2 and children[1].type == token.INDENT: - children[1].prefix = "{}# type: {}\n{}".format( - children[1].value, annot, children[1].prefix - ) - children[1].changed() - if FixAnnotate.counter is not None: - FixAnnotate.counter -= 1 - - # Also add 'from typing import Any' at the top. - if "Any" in annot: - touch_import("typing", "Any", node) - - def make_annotation(self, node, results): - name = results["name"] - assert isinstance(name, Leaf), repr(name) - assert name.type == token.NAME, repr(name) - decorators = self.get_decorators(node) - is_method = self.is_method(node) - if name.value == "__init__" or not self.has_return_exprs(node): - restype = "None" - else: - restype = "Any" - args = results.get("args") - argtypes = [] - if isinstance(args, Node): - children = args.children - elif isinstance(args, Leaf): - children = [args] - else: - children = [] - # Interpret children according to the following grammar: - # (('*'|'**')? NAME ['=' expr] ','?)* - stars = inferred_type = "" - in_default = False - at_start = True - for child in children: - if isinstance(child, Leaf): - if child.value in ("*", "**"): - stars += child.value - elif child.type == token.NAME and not in_default: - if not is_method or not at_start or "staticmethod" in decorators: - inferred_type = "Any" - else: - # Always skip the first argument if it's named 'self'. - # Always skip the first argument of a class method. - if child.value == "self" or "classmethod" in decorators: - pass - else: - inferred_type = "Any" - elif child.value == "=": - in_default = True - elif in_default and child.value != ",": - if child.type == token.NUMBER: - if re.match(r"\d+[lL]?$", child.value): - inferred_type = "int" - else: - inferred_type = "float" # TODO: complex? - elif child.type == token.STRING: - if child.value.startswith(("u", "U")): - inferred_type = "unicode" - else: - inferred_type = "str" - elif child.type == token.NAME and child.value in ("True", "False"): - inferred_type = "bool" - elif child.value == ",": - if inferred_type: - argtypes.append(stars + inferred_type) - # Reset - stars = inferred_type = "" - in_default = False - at_start = False - if inferred_type: - argtypes.append(stars + inferred_type) - return "(" + ", ".join(argtypes) + ") -> " + restype - - # The parse tree has a different shape when there is a single - # decorator vs. when there are multiple decorators. - DECORATED = "decorated< (d=decorator | decorators< dd=decorator+ >) funcdef >" - decorated = compile_pattern(DECORATED) - - def get_decorators(self, node): - """Return a list of decorators found on a function definition. - - This is a list of strings; only simple decorators - (e.g. @staticmethod) are returned. - - If the function is undecorated or only non-simple decorators - are found, return []. - """ - if node.parent is None: - return [] - results = {} - if not self.decorated.match(node.parent, results): - return [] - decorators = results.get("dd") or [results["d"]] - decs = [] - for d in decorators: - for child in d.children: - if isinstance(child, Leaf) and child.type == token.NAME: - decs.append(child.value) - return decs - - def is_method(self, node): - """Return whether the node occurs (directly) inside a class.""" - node = node.parent - while node is not None: - if node.type == syms.classdef: - return True - if node.type == syms.funcdef: - return False - node = node.parent - return False - - RETURN_EXPR = "return_stmt< 'return' any >" - return_expr = compile_pattern(RETURN_EXPR) - - def has_return_exprs(self, node): - """Traverse the tree below node looking for 'return expr'. - - Return True if at least 'return expr' is found, False if not. - (If both 'return' and 'return expr' are found, return True.) - """ - results = {} - if self.return_expr.match(node, results): - return True - return any( - child.type not in (syms.funcdef, syms.classdef) and self.has_return_exprs(child) - for child in node.children - ) diff --git a/misc/incremental_checker.py b/misc/incremental_checker.py index 85239b6462b8..4e42aef333bb 100755 --- a/misc/incremental_checker.py +++ b/misc/incremental_checker.py @@ -44,8 +44,8 @@ import textwrap import time from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter -from typing import Any, Dict -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Dict, Final +from typing_extensions import TypeAlias as _TypeAlias CACHE_PATH: Final = ".incremental_checker_cache.json" MYPY_REPO_URL: Final = "https://github.com/python/mypy.git" diff --git a/misc/remove-eol-whitespace.sh b/misc/remove-eol-whitespace.sh deleted file mode 100644 index 5cf666997e34..000000000000 --- a/misc/remove-eol-whitespace.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# Remove trailing whitespace from all non-binary files in a git repo. - -# From https://gist.github.com/dpaluy/3690668; originally from here: -# https://unix.stackexchange.com/questions/36233/how-to-skip-file-in-sed-if-it-contains-regex/36240#36240 - -git grep -I --name-only -z -e '' | xargs -0 sed -i -e 's/[ \t]\+\(\r\?\)$/\1/' diff --git a/misc/sync-typeshed.py b/misc/sync-typeshed.py index fc6cbc1d88e7..36967f86262e 100644 --- a/misc/sync-typeshed.py +++ b/misc/sync-typeshed.py @@ -179,13 +179,27 @@ def main() -> None: print("Created typeshed sync commit.") commits_to_cherry_pick = [ - "c844270a4", # LiteralString reverts - "9ebe5fd49", # sum reverts - "d1987191f", # ctypes reverts - "b1761f4c9", # ParamSpec for functools.wraps + "2f6b6e66c", # LiteralString reverts + "120af30e7", # sum reverts + "1866d28f1", # ctypes reverts + "3240da455", # ParamSpec for functools.wraps ] for commit in commits_to_cherry_pick: - subprocess.run(["git", "cherry-pick", commit], check=True) + try: + subprocess.run(["git", "cherry-pick", commit], check=True) + except subprocess.CalledProcessError: + if not sys.__stdin__.isatty(): + # We're in an automated context + raise + + # Allow the option to merge manually + print( + f"Commit {commit} failed to cherry pick." + " In a separate shell, please manually merge and continue cherry pick." + ) + rsp = input("Did you finish the cherry pick? [y/N]: ") + if rsp.lower() not in {"y", "yes"}: + raise print(f"Cherry-picked {commit}.") if args.make_pr: diff --git a/misc/test-stubgenc.sh b/misc/test-stubgenc.sh index 7da135f0bf16..7713e1b04e43 100755 --- a/misc/test-stubgenc.sh +++ b/misc/test-stubgenc.sh @@ -3,17 +3,33 @@ set -e set -x -cd "$(dirname $0)/.." +cd "$(dirname "$0")/.." # Install dependencies, demo project and mypy python -m pip install -r test-requirements.txt python -m pip install ./test-data/pybind11_mypy_demo python -m pip install . -# Remove expected stubs and generate new inplace -STUBGEN_OUTPUT_FOLDER=./test-data/pybind11_mypy_demo/stubgen -rm -rf $STUBGEN_OUTPUT_FOLDER/* -stubgen -p pybind11_mypy_demo -o $STUBGEN_OUTPUT_FOLDER +EXIT=0 -# Compare generated stubs to expected ones -git diff --exit-code $STUBGEN_OUTPUT_FOLDER +# performs the stubgenc test +# first argument is the test result folder +# everything else is passed to stubgen as its arguments +function stubgenc_test() { + # Remove expected stubs and generate new inplace + STUBGEN_OUTPUT_FOLDER=./test-data/pybind11_mypy_demo/$1 + rm -rf "${STUBGEN_OUTPUT_FOLDER:?}/*" + stubgen -o "$STUBGEN_OUTPUT_FOLDER" "${@:2}" + + # Compare generated stubs to expected ones + if ! git diff --exit-code "$STUBGEN_OUTPUT_FOLDER"; + then + EXIT=$? + fi +} + +# create stubs without docstrings +stubgenc_test stubgen -p pybind11_mypy_demo +# create stubs with docstrings +stubgenc_test stubgen-include-docs -p pybind11_mypy_demo --include-docstrings +exit $EXIT diff --git a/mypy-requirements.txt b/mypy-requirements.txt index 9a55446eb05a..f81412be761e 100644 --- a/mypy-requirements.txt +++ b/mypy-requirements.txt @@ -1,5 +1,4 @@ # NOTE: this needs to be kept in sync with the "requires" list in pyproject.toml -typing_extensions>=3.10 +typing_extensions>=4.1.0 mypy_extensions>=1.0.0 -typed_ast>=1.4.0,<2; python_version<'3.8' tomli>=1.1.0; python_version<'3.11' diff --git a/mypy/applytype.py b/mypy/applytype.py index 55a51d4adbb6..884be287e33d 100644 --- a/mypy/applytype.py +++ b/mypy/applytype.py @@ -3,25 +3,22 @@ from typing import Callable, Sequence import mypy.subtypes -from mypy.expandtype import expand_type, expand_unpack_with_variables -from mypy.nodes import ARG_STAR, Context +from mypy.expandtype import expand_type +from mypy.nodes import Context from mypy.types import ( AnyType, CallableType, - Instance, - Parameters, ParamSpecType, PartialType, - TupleType, Type, TypeVarId, TypeVarLikeType, TypeVarTupleType, TypeVarType, + UninhabitedType, UnpackType, get_proper_type, ) -from mypy.typevartuples import find_unpack_in_list, replace_starargs def get_target_type( @@ -32,13 +29,15 @@ def get_target_type( context: Context, skip_unsatisfied: bool, ) -> Type | None: + p_type = get_proper_type(type) + if isinstance(p_type, UninhabitedType) and tvar.has_default(): + return tvar.default if isinstance(tvar, ParamSpecType): return type if isinstance(tvar, TypeVarTupleType): return type assert isinstance(tvar, TypeVarType) values = tvar.values - p_type = get_proper_type(type) if values: if isinstance(p_type, AnyType): return type @@ -105,66 +104,26 @@ def apply_generic_arguments( if target_type is not None: id_to_type[tvar.id] = target_type + # TODO: validate arg_kinds/arg_names for ParamSpec and TypeVarTuple replacements, + # not just type variable bounds above. param_spec = callable.param_spec() if param_spec is not None: nt = id_to_type.get(param_spec.id) if nt is not None: - nt = get_proper_type(nt) - if isinstance(nt, (CallableType, Parameters)): - callable = callable.expand_param_spec(nt) + # ParamSpec expansion is special-cased, so we need to always expand callable + # as a whole, not expanding arguments individually. + callable = expand_type(callable, id_to_type) + assert isinstance(callable, CallableType) + return callable.copy_modified( + variables=[tv for tv in tvars if tv.id not in id_to_type] + ) # Apply arguments to argument types. var_arg = callable.var_arg() if var_arg is not None and isinstance(var_arg.typ, UnpackType): - star_index = callable.arg_kinds.index(ARG_STAR) - callable = callable.copy_modified( - arg_types=( - [expand_type(at, id_to_type) for at in callable.arg_types[:star_index]] - + [callable.arg_types[star_index]] - + [expand_type(at, id_to_type) for at in callable.arg_types[star_index + 1 :]] - ) - ) - - unpacked_type = get_proper_type(var_arg.typ.type) - if isinstance(unpacked_type, TupleType): - # Assuming for now that because we convert prefixes to positional arguments, - # the first argument is always an unpack. - expanded_tuple = expand_type(unpacked_type, id_to_type) - if isinstance(expanded_tuple, TupleType): - # TODO: handle the case where the tuple has an unpack. This will - # hit an assert below. - expanded_unpack = find_unpack_in_list(expanded_tuple.items) - if expanded_unpack is not None: - callable = callable.copy_modified( - arg_types=( - callable.arg_types[:star_index] - + [expanded_tuple] - + callable.arg_types[star_index + 1 :] - ) - ) - else: - callable = replace_starargs(callable, expanded_tuple.items) - else: - # TODO: handle the case for if we get a variable length tuple. - assert False, f"mypy bug: unimplemented case, {expanded_tuple}" - elif isinstance(unpacked_type, TypeVarTupleType): - expanded_tvt = expand_unpack_with_variables(var_arg.typ, id_to_type) - if isinstance(expanded_tvt, list): - for t in expanded_tvt: - assert not isinstance(t, UnpackType) - callable = replace_starargs(callable, expanded_tvt) - else: - assert isinstance(expanded_tvt, Instance) - assert expanded_tvt.type.fullname == "builtins.tuple" - callable = callable.copy_modified( - arg_types=( - callable.arg_types[:star_index] - + [expanded_tvt.args[0]] - + callable.arg_types[star_index + 1 :] - ) - ) - else: - assert False, "mypy bug: unhandled case applying unpack" + callable = expand_type(callable, id_to_type) + assert isinstance(callable, CallableType) + return callable.copy_modified(variables=[tv for tv in tvars if tv.id not in id_to_type]) else: callable = callable.copy_modified( arg_types=[expand_type(at, id_to_type) for at in callable.arg_types] @@ -177,6 +136,9 @@ def apply_generic_arguments( type_guard = None # The callable may retain some type vars if only some were applied. + # TODO: move apply_poly() logic from checkexpr.py here when new inference + # becomes universally used (i.e. in all passes + in unification). + # With this new logic we can actually *add* some new free variables. remaining_tvars = [tv for tv in tvars if tv.id not in id_to_type] return callable.copy_modified( diff --git a/mypy/binder.py b/mypy/binder.py index 37c0b6bb9006..8a68f24f661e 100644 --- a/mypy/binder.py +++ b/mypy/binder.py @@ -42,13 +42,6 @@ def __init__(self, id: int, conditional_frame: bool = False) -> None: self.types: dict[Key, Type] = {} self.unreachable = False self.conditional_frame = conditional_frame - - # Should be set only if we're entering a frame where it's not - # possible to accurately determine whether or not contained - # statements will be unreachable or not. - # - # Long-term, we should improve mypy to the point where we no longer - # need this field. self.suppress_unreachable_warnings = False def __repr__(self) -> str: @@ -174,7 +167,6 @@ def is_unreachable(self) -> bool: return any(f.unreachable for f in self.frames) def is_unreachable_warning_suppressed(self) -> bool: - # TODO: See todo in 'is_unreachable' return any(f.suppress_unreachable_warnings for f in self.frames) def cleanse(self, expr: Expression) -> None: diff --git a/mypy/build.py b/mypy/build.py index c239afb56236..39629c2dc455 100644 --- a/mypy/build.py +++ b/mypy/build.py @@ -31,22 +31,20 @@ Callable, ClassVar, Dict, - Iterable, + Final, Iterator, Mapping, NamedTuple, NoReturn, Sequence, TextIO, - TypeVar, ) -from typing_extensions import Final, TypeAlias as _TypeAlias - -from mypy_extensions import TypedDict +from typing_extensions import TypeAlias as _TypeAlias, TypedDict import mypy.semanal_main from mypy.checker import TypeChecker from mypy.errors import CompileError, ErrorInfo, Errors, report_internal_error +from mypy.graph_utils import prepare_sccs, strongly_connected_components, topsort from mypy.indirection import TypeIndirectionVisitor from mypy.messages import MessageBuilder from mypy.nodes import Import, ImportAll, ImportBase, ImportFrom, MypyFile, SymbolTable, TypeInfo @@ -57,7 +55,6 @@ DecodeError, decode_python_encoding, get_mypy_comments, - get_top_two_prefixes, hash_digest, is_stub_package_file, is_sub_path, @@ -93,12 +90,7 @@ from mypy.plugins.default import DefaultPlugin from mypy.renaming import LimitedVariableRenameVisitor, VariableRenameVisitor from mypy.stats import dump_type_stats -from mypy.stubinfo import ( - is_legacy_bundled_package, - legacy_bundled_packages, - non_bundled_packages, - stub_package_name, -) +from mypy.stubinfo import legacy_bundled_packages, non_bundled_packages, stub_distribution_name from mypy.types import Type from mypy.typestate import reset_global_state, type_state from mypy.version import __version__ @@ -116,7 +108,10 @@ "types", "typing_extensions", "mypy_extensions", - "_importlib_modulespec", + "_typeshed", + "_collections_abc", + "collections", + "collections.abc", "sys", "abc", } @@ -341,7 +336,9 @@ class CacheMeta(NamedTuple): # Metadata for the fine-grained dependencies file associated with a module. -FgDepMeta = TypedDict("FgDepMeta", {"path": str, "mtime": int}) +class FgDepMeta(TypedDict): + path: str + mtime: int def cache_meta_from_dict(meta: dict[str, Any], data_json: str) -> CacheMeta: @@ -659,8 +656,6 @@ def __init__( for module in CORE_BUILTIN_MODULES: if options.use_builtins_fixtures: continue - if module == "_importlib_modulespec": - continue path = self.find_module_cache.find_module(module) if not isinstance(path, str): raise CompileError( @@ -2238,7 +2233,7 @@ def semantic_analysis_pass1(self) -> None: analyzer = SemanticAnalyzerPreAnalysis() with self.wrap_context(): analyzer.visit_file(self.tree, self.xpath, self.id, options) - self.manager.errors.set_unreachable_lines(self.xpath, self.tree.unreachable_lines) + self.manager.errors.set_skipped_lines(self.xpath, self.tree.skipped_lines) # TODO: Do this while constructing the AST? self.tree.names = SymbolTable() if not self.tree.is_stub: @@ -2637,7 +2632,7 @@ def find_module_and_diagnose( result.endswith(".pyi") # Stubs are always normal and not options.follow_imports_for_stubs # except when they aren't ) - or id in mypy.semanal_main.core_modules # core is always normal + or id in CORE_BUILTIN_MODULES # core is always normal ): follow_imports = "normal" if skip_diagnose: @@ -2664,14 +2659,18 @@ def find_module_and_diagnose( # search path or the module has not been installed. ignore_missing_imports = options.ignore_missing_imports - top_level, second_level = get_top_two_prefixes(id) + + id_components = id.split(".") # Don't honor a global (not per-module) ignore_missing_imports # setting for modules that used to have bundled stubs, as # otherwise updating mypy can silently result in new false # negatives. (Unless there are stubs but they are incomplete.) global_ignore_missing_imports = manager.options.ignore_missing_imports if ( - (is_legacy_bundled_package(top_level) or is_legacy_bundled_package(second_level)) + any( + ".".join(id_components[:i]) in legacy_bundled_packages + for i in range(len(id_components), 0, -1) + ) and global_ignore_missing_imports and not options.ignore_missing_imports_per_module and result is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED @@ -2779,16 +2778,29 @@ def module_not_found( else: daemon = manager.options.fine_grained_incremental msg, notes = reason.error_message_templates(daemon) - errors.report(line, 0, msg.format(module=target), code=codes.IMPORT) - top_level, second_level = get_top_two_prefixes(target) - if second_level in legacy_bundled_packages or second_level in non_bundled_packages: - top_level = second_level + if reason == ModuleNotFoundReason.NOT_FOUND: + code = codes.IMPORT_NOT_FOUND + elif ( + reason == ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS + or reason == ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED + ): + code = codes.IMPORT_UNTYPED + else: + code = codes.IMPORT + errors.report(line, 0, msg.format(module=target), code=code) + + components = target.split(".") + for i in range(len(components), 0, -1): + module = ".".join(components[:i]) + if module in legacy_bundled_packages or module in non_bundled_packages: + break + for note in notes: if "{stub_dist}" in note: - note = note.format(stub_dist=stub_package_name(top_level)) - errors.report(line, 0, note, severity="note", only_once=True, code=codes.IMPORT) + note = note.format(stub_dist=stub_distribution_name(module)) + errors.report(line, 0, note, severity="note", only_once=True, code=code) if reason is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED: - manager.missing_stub_packages.add(stub_package_name(top_level)) + manager.missing_stub_packages.add(stub_distribution_name(module)) errors.set_import_context(save_import_context) @@ -3076,7 +3088,7 @@ def load_graph( manager.errors.report( -1, -1, - "See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules " # noqa: E501 + "See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules " "for more info", severity="note", ) @@ -3164,7 +3176,7 @@ def load_graph( manager.errors.report( -1, 0, - "See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules " # noqa: E501 + "See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules " "for more info", severity="note", ) @@ -3465,15 +3477,8 @@ def sorted_components( edges = {id: deps_filtered(graph, vertices, id, pri_max) for id in vertices} sccs = list(strongly_connected_components(vertices, edges)) # Topsort. - sccsmap = {id: frozenset(scc) for scc in sccs for id in scc} - data: dict[AbstractSet[str], set[AbstractSet[str]]] = {} - for scc in sccs: - deps: set[AbstractSet[str]] = set() - for id in scc: - deps.update(sccsmap[x] for x in deps_filtered(graph, vertices, id, pri_max)) - data[frozenset(scc)] = deps res = [] - for ready in topsort(data): + for ready in topsort(prepare_sccs(sccs, edges)): # Sort the sets in ready by reversed smallest State.order. Examples: # # - If ready is [{x}, {y}], x.order == 1, y.order == 2, we get @@ -3498,100 +3503,6 @@ def deps_filtered(graph: Graph, vertices: AbstractSet[str], id: str, pri_max: in ] -def strongly_connected_components( - vertices: AbstractSet[str], edges: dict[str, list[str]] -) -> Iterator[set[str]]: - """Compute Strongly Connected Components of a directed graph. - - Args: - vertices: the labels for the vertices - edges: for each vertex, gives the target vertices of its outgoing edges - - Returns: - An iterator yielding strongly connected components, each - represented as a set of vertices. Each input vertex will occur - exactly once; vertices not part of a SCC are returned as - singleton sets. - - From https://code.activestate.com/recipes/578507/. - """ - identified: set[str] = set() - stack: list[str] = [] - index: dict[str, int] = {} - boundaries: list[int] = [] - - def dfs(v: str) -> Iterator[set[str]]: - index[v] = len(stack) - stack.append(v) - boundaries.append(index[v]) - - for w in edges[v]: - if w not in index: - yield from dfs(w) - elif w not in identified: - while index[w] < boundaries[-1]: - boundaries.pop() - - if boundaries[-1] == index[v]: - boundaries.pop() - scc = set(stack[index[v] :]) - del stack[index[v] :] - identified.update(scc) - yield scc - - for v in vertices: - if v not in index: - yield from dfs(v) - - -T = TypeVar("T") - - -def topsort(data: dict[T, set[T]]) -> Iterable[set[T]]: - """Topological sort. - - Args: - data: A map from vertices to all vertices that it has an edge - connecting it to. NOTE: This data structure - is modified in place -- for normalization purposes, - self-dependencies are removed and entries representing - orphans are added. - - Returns: - An iterator yielding sets of vertices that have an equivalent - ordering. - - Example: - Suppose the input has the following structure: - - {A: {B, C}, B: {D}, C: {D}} - - This is normalized to: - - {A: {B, C}, B: {D}, C: {D}, D: {}} - - The algorithm will yield the following values: - - {D} - {B, C} - {A} - - From https://code.activestate.com/recipes/577413/. - """ - # TODO: Use a faster algorithm? - for k, v in data.items(): - v.discard(k) # Ignore self dependencies. - for item in set.union(*data.values()) - set(data.keys()): - data[item] = set() - while True: - ready = {item for item, dep in data.items() if not dep} - if not ready: - break - yield ready - data = {item: (dep - ready) for item, dep in data.items() if item not in ready} - assert not data, f"A cyclic dependency exists amongst {data!r}" - - def missing_stubs_file(cache_dir: str) -> str: return os.path.join(cache_dir, "missing_stubs") diff --git a/mypy/checker.py b/mypy/checker.py index c1c31538b7de..a44601b83e21 100644 --- a/mypy/checker.py +++ b/mypy/checker.py @@ -9,6 +9,7 @@ AbstractSet, Callable, Dict, + Final, Generic, Iterable, Iterator, @@ -22,7 +23,7 @@ cast, overload, ) -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing_extensions import TypeAlias as _TypeAlias import mypy.checkexpr from mypy import errorcodes as codes, message_registry, nodes, operators @@ -131,11 +132,13 @@ Var, WhileStmt, WithStmt, + YieldExpr, is_final_node, ) from mypy.options import Options from mypy.patterns import AsPattern, StarredPattern from mypy.plugin import CheckerPluginInterface, Plugin +from mypy.plugins import dataclasses as dataclasses_plugin from mypy.scope import Scope from mypy.semanal import is_trivial_body, refers_to_fullname, set_callable_name from mypy.semanal_enum import ENUM_BASES, ENUM_SPECIAL_PROPS @@ -213,7 +216,7 @@ is_literal_type, is_named_instance, ) -from mypy.types_utils import is_optional, remove_optional, store_argument_type, strip_type +from mypy.types_utils import is_overlapping_none, remove_optional, store_argument_type, strip_type from mypy.typetraverser import TypeTraverserVisitor from mypy.typevars import fill_typevars, fill_typevars_with_any, has_no_typevars from mypy.util import is_dunder, is_sunder, is_typeshed_file @@ -462,14 +465,14 @@ def check_first_pass(self) -> None: with self.tscope.module_scope(self.tree.fullname): with self.enter_partial_types(), self.binder.top_frame_context(): for d in self.tree.defs: - if ( - self.binder.is_unreachable() - and self.should_report_unreachable_issues() - and not self.is_raising_or_empty(d) - ): - self.msg.unreachable_statement(d) - break - self.accept(d) + if self.binder.is_unreachable(): + if not self.should_report_unreachable_issues(): + break + if not self.is_noop_for_reachability(d): + self.msg.unreachable_statement(d) + break + else: + self.accept(d) assert not self.current_node_deferred @@ -633,22 +636,69 @@ def _visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None: self.visit_decorator(defn.items[0]) for fdef in defn.items: assert isinstance(fdef, Decorator) - self.check_func_item(fdef.func, name=fdef.func.name, allow_empty=True) + if defn.is_property: + self.check_func_item(fdef.func, name=fdef.func.name, allow_empty=True) + else: + # Perform full check for real overloads to infer type of all decorated + # overload variants. + self.visit_decorator_inner(fdef, allow_empty=True) if fdef.func.abstract_status in (IS_ABSTRACT, IMPLICITLY_ABSTRACT): num_abstract += 1 if num_abstract not in (0, len(defn.items)): self.fail(message_registry.INCONSISTENT_ABSTRACT_OVERLOAD, defn) if defn.impl: defn.impl.accept(self) + if not defn.is_property: + self.check_overlapping_overloads(defn) + if defn.type is None: + item_types = [] + for item in defn.items: + assert isinstance(item, Decorator) + item_type = self.extract_callable_type(item.var.type, item) + if item_type is not None: + item_types.append(item_type) + if item_types: + defn.type = Overloaded(item_types) + # Check override validity after we analyzed current definition. if defn.info: - found_base_method = self.check_method_override(defn) - if defn.is_explicit_override and found_base_method is False: + found_method_base_classes = self.check_method_override(defn) + if ( + defn.is_explicit_override + and not found_method_base_classes + and found_method_base_classes is not None + ): self.msg.no_overridable_method(defn.name, defn) + self.check_explicit_override_decorator(defn, found_method_base_classes, defn.impl) self.check_inplace_operator_method(defn) - if not defn.is_property: - self.check_overlapping_overloads(defn) return None + def extract_callable_type(self, inner_type: Type | None, ctx: Context) -> CallableType | None: + """Get type as seen by an overload item caller.""" + inner_type = get_proper_type(inner_type) + outer_type: CallableType | None = None + if inner_type is not None and not isinstance(inner_type, AnyType): + if isinstance(inner_type, CallableType): + outer_type = inner_type + elif isinstance(inner_type, Instance): + inner_call = get_proper_type( + analyze_member_access( + name="__call__", + typ=inner_type, + context=ctx, + is_lvalue=False, + is_super=False, + is_operator=True, + msg=self.msg, + original_type=inner_type, + chk=self, + ) + ) + if isinstance(inner_call, CallableType): + outer_type = inner_call + if outer_type is None: + self.msg.not_callable(inner_type, ctx) + return outer_type + def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None: # At this point we should have set the impl already, and all remaining # items are decorators @@ -672,40 +722,20 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None: # This can happen if we've got an overload with a different # decorator or if the implementation is untyped -- we gave up on the types. - inner_type = get_proper_type(inner_type) - if inner_type is not None and not isinstance(inner_type, AnyType): - if isinstance(inner_type, CallableType): - impl_type = inner_type - elif isinstance(inner_type, Instance): - inner_call = get_proper_type( - analyze_member_access( - name="__call__", - typ=inner_type, - context=defn.impl, - is_lvalue=False, - is_super=False, - is_operator=True, - msg=self.msg, - original_type=inner_type, - chk=self, - ) - ) - if isinstance(inner_call, CallableType): - impl_type = inner_call - if impl_type is None: - self.msg.not_callable(inner_type, defn.impl) + impl_type = self.extract_callable_type(inner_type, defn.impl) is_descriptor_get = defn.info and defn.name == "__get__" for i, item in enumerate(defn.items): - # TODO overloads involving decorators assert isinstance(item, Decorator) - sig1 = self.function_type(item.func) - assert isinstance(sig1, CallableType) + sig1 = self.extract_callable_type(item.var.type, item) + if sig1 is None: + continue for j, item2 in enumerate(defn.items[i + 1 :]): assert isinstance(item2, Decorator) - sig2 = self.function_type(item2.func) - assert isinstance(sig2, CallableType) + sig2 = self.extract_callable_type(item2.var.type, item2) + if sig2 is None: + continue if not are_argument_counts_overlapping(sig1, sig2): continue @@ -726,8 +756,10 @@ def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None: # def foo(x: str) -> str: ... # # See Python 2's map function for a concrete example of this kind of overload. + current_class = self.scope.active_class() + type_vars = current_class.defn.type_vars if current_class else [] with state.strict_optional_set(True): - if is_unsafe_overlapping_overload_signatures(sig1, sig2): + if is_unsafe_overlapping_overload_signatures(sig1, sig2, type_vars): self.msg.overloaded_signatures_overlap(i + 1, i + j + 2, item.func) if impl_type is not None: @@ -970,7 +1002,8 @@ def _visit_func_def(self, defn: FuncDef) -> None: # overload, the legality of the override has already # been typechecked, and decorated methods will be # checked when the decorator is. - self.check_method_override(defn) + found_method_base_classes = self.check_method_override(defn) + self.check_explicit_override_decorator(defn, found_method_base_classes) self.check_inplace_operator_method(defn) if defn.original_def: # Override previous definition. @@ -1044,6 +1077,9 @@ def check_func_item( if name == "__exit__": self.check__exit__return_type(defn) + if name == "__post_init__": + if dataclasses_plugin.is_processed_dataclass(defn.info): + dataclasses_plugin.check_post_init(self, defn, defn.info) @contextmanager def enter_attribute_inference_context(self) -> Iterator[None]: @@ -1058,6 +1094,7 @@ def check_func_def( """Type check a function definition.""" # Expand type variables with value restrictions to ordinary types. expanded = self.expand_typevars(defn, typ) + original_typ = typ for item, typ in expanded: old_binder = self.binder self.binder = ConditionalTypeBinder() @@ -1115,6 +1152,12 @@ def check_func_def( message_registry.RETURN_TYPE_CANNOT_BE_CONTRAVARIANT, typ.ret_type ) self.check_unbound_return_typevar(typ) + elif ( + isinstance(original_typ.ret_type, TypeVarType) and original_typ.ret_type.values + ): + # Since type vars with values are expanded, the return type is changed + # to a raw value. This is a hack to get it back. + self.check_unbound_return_typevar(original_typ) # Check that Generator functions have the appropriate return type. if defn.is_generator: @@ -1159,11 +1202,10 @@ def check_func_def( isinstance(defn, FuncDef) and ref_type is not None and i == 0 - and not defn.is_static + and (not defn.is_static or defn.name == "__new__") and typ.arg_kinds[0] not in [nodes.ARG_STAR, nodes.ARG_STAR2] ): - isclass = defn.is_class or defn.name in ("__new__", "__init_subclass__") - if isclass: + if defn.is_class or defn.name == "__new__": ref_type = mypy.types.TypeType.make_normalized(ref_type) erased = get_proper_type(erase_to_bound(arg_type)) if not is_subtype(ref_type, erased, ignore_type_params=True): @@ -1190,9 +1232,10 @@ def check_func_def( elif isinstance(arg_type, TypeVarType): # Refuse covariant parameter type variables # TODO: check recursively for inner type variables - if arg_type.variance == COVARIANT and defn.name not in ( - "__init__", - "__new__", + if ( + arg_type.variance == COVARIANT + and defn.name not in ("__init__", "__new__", "__post_init__") + and not is_private(defn.name) # private methods are not inherited ): ctx: Context = arg_type if ctx.line < 0: @@ -1223,13 +1266,17 @@ def check_func_def( new_frame.types[key] = narrowed_type self.binder.declarations[key] = old_binder.declarations[key] with self.scope.push_function(defn): - # We suppress reachability warnings when we use TypeVars with value + # We suppress reachability warnings for empty generator functions + # (return; yield) which have a "yield" that's unreachable by definition + # since it's only there to promote the function into a generator function. + # + # We also suppress reachability warnings when we use TypeVars with value # restrictions: we only want to report a warning if a certain statement is # marked as being suppressed in *all* of the expansions, but we currently # have no good way of doing this. # # TODO: Find a way of working around this limitation - if len(expanded) >= 2: + if _is_empty_generator_function(item) or len(expanded) >= 2: self.binder.suppress_unreachable_warnings() self.accept(item.body) unreachable = self.binder.is_unreachable() @@ -1542,7 +1589,7 @@ def check_reverse_op_method( if opt_meta is not None: forward_inst = opt_meta - def has_readable_member(typ: Union[UnionType, Instance], name: str) -> bool: + def has_readable_member(typ: UnionType | Instance, name: str) -> bool: # TODO: Deal with attributes of TupleType etc. if isinstance(typ, Instance): return typ.type.has_readable_member(name) @@ -1679,7 +1726,9 @@ def is_unsafe_overlapping_op( first = forward_tweaked second = reverse_tweaked - return is_unsafe_overlapping_overload_signatures(first, second) + current_class = self.scope.active_class() + type_vars = current_class.defn.type_vars if current_class else [] + return is_unsafe_overlapping_overload_signatures(first, second, type_vars) def check_inplace_operator_method(self, defn: FuncBase) -> None: """Check an inplace operator method such as __iadd__. @@ -1809,23 +1858,41 @@ def expand_typevars( else: return [(defn, typ)] - def check_method_override(self, defn: FuncDef | OverloadedFuncDef | Decorator) -> bool | None: + def check_explicit_override_decorator( + self, + defn: FuncDef | OverloadedFuncDef, + found_method_base_classes: list[TypeInfo] | None, + context: Context | None = None, + ) -> None: + if ( + found_method_base_classes + and not defn.is_explicit_override + and defn.name not in ("__init__", "__new__") + ): + self.msg.explicit_override_decorator_missing( + defn.name, found_method_base_classes[0].fullname, context or defn + ) + + def check_method_override( + self, defn: FuncDef | OverloadedFuncDef | Decorator + ) -> list[TypeInfo] | None: """Check if function definition is compatible with base classes. This may defer the method if a signature is not available in at least one base class. Return ``None`` if that happens. - Return ``True`` if an attribute with the method name was found in the base class. + Return a list of base classes which contain an attribute with the method name. """ # Check against definitions in base classes. - found_base_method = False + found_method_base_classes: list[TypeInfo] = [] for base in defn.info.mro[1:]: result = self.check_method_or_accessor_override_for_base(defn, base) if result is None: # Node was deferred, we will have another attempt later. return None - found_base_method |= result - return found_base_method + if result: + found_method_base_classes.append(base) + return found_method_base_classes def check_method_or_accessor_override_for_base( self, defn: FuncDef | OverloadedFuncDef | Decorator, base: TypeInfo @@ -1851,7 +1918,7 @@ def check_method_or_accessor_override_for_base( found_base_method = True # Check the type of override. - if name not in ("__init__", "__new__", "__init_subclass__"): + if name not in ("__init__", "__new__", "__init_subclass__", "__post_init__"): # Check method override # (__init__, __new__, __init_subclass__ are special). if self.check_method_override_for_base_with_name(defn, name, base): @@ -2678,10 +2745,13 @@ def visit_block(self, b: Block) -> None: return for s in b.body: if self.binder.is_unreachable(): - if self.should_report_unreachable_issues() and not self.is_raising_or_empty(s): + if not self.should_report_unreachable_issues(): + break + if not self.is_noop_for_reachability(s): self.msg.unreachable_statement(s) - break - self.accept(s) + break + else: + self.accept(s) def should_report_unreachable_issues(self) -> bool: return ( @@ -2691,11 +2761,11 @@ def should_report_unreachable_issues(self) -> bool: and not self.binder.is_unreachable_warning_suppressed() ) - def is_raising_or_empty(self, s: Statement) -> bool: + def is_noop_for_reachability(self, s: Statement) -> bool: """Returns 'true' if the given statement either throws an error of some kind or is a no-op. - We use this function mostly while handling the '--warn-unreachable' flag. When + We use this function while handling the '--warn-unreachable' flag. When that flag is present, we normally report an error on any unreachable statement. But if that statement is just something like a 'pass' or a just-in-case 'assert False', reporting an error would be annoying. @@ -2812,6 +2882,9 @@ def check_assignment( if name == "__match_args__" and inferred is not None: typ = self.expr_checker.accept(rvalue) self.check_match_args(inferred, typ, lvalue) + if name == "__post_init__": + if dataclasses_plugin.is_processed_dataclass(self.scope.active_class()): + self.fail(message_registry.DATACLASS_POST_INIT_MUST_BE_A_FUNCTION, rvalue) # Defer PartialType's super type checking. if ( @@ -3871,11 +3944,12 @@ def is_valid_defaultdict_partial_value_type(self, t: ProperType) -> bool: return True if len(t.args) == 1: arg = get_proper_type(t.args[0]) - # TODO: This is too permissive -- we only allow TypeVarType since - # they leak in cases like defaultdict(list) due to a bug. - # This can result in incorrect types being inferred, but only - # in rare cases. - if isinstance(arg, (TypeVarType, UninhabitedType, NoneType)): + if self.options.new_type_inference: + allowed = isinstance(arg, (UninhabitedType, NoneType)) + else: + # Allow leaked TypeVars for legacy inference logic. + allowed = isinstance(arg, (UninhabitedType, NoneType, TypeVarType)) + if allowed: return True return False @@ -4228,12 +4302,14 @@ def check_return_stmt(self, s: ReturnStmt) -> None: return_type = self.return_types[-1] return_type = get_proper_type(return_type) + is_lambda = isinstance(self.scope.top_function(), LambdaExpr) if isinstance(return_type, UninhabitedType): - self.fail(message_registry.NO_RETURN_EXPECTED, s) - return + # Avoid extra error messages for failed inference in lambdas + if not is_lambda or not return_type.ambiguous: + self.fail(message_registry.NO_RETURN_EXPECTED, s) + return if s.expr: - is_lambda = isinstance(self.scope.top_function(), LambdaExpr) declared_none_return = isinstance(return_type, NoneType) declared_any_return = isinstance(return_type, AnyType) @@ -4270,6 +4346,7 @@ def check_return_stmt(self, s: ReturnStmt) -> None: isinstance(return_type, Instance) and return_type.type.fullname == "builtins.object" ) + and not is_lambda ): self.msg.incorrectly_returning_any(return_type, s) return @@ -4584,11 +4661,11 @@ def analyze_iterable_item_type(self, expr: Expression) -> tuple[Type, Type]: if int_type: return iterator, int_type - if isinstance(iterable, TupleType): - joined: Type = UninhabitedType() - for item in iterable.items: - joined = join_types(joined, item) - return iterator, joined + if ( + isinstance(iterable, TupleType) + and iterable.partial_fallback.type.fullname == "builtins.tuple" + ): + return iterator, tuple_fallback(iterable).args[0] else: # Non-tuple iterable. return iterator, echk.check_method_call_by_name("__next__", iterator, [], [], expr)[0] @@ -4693,17 +4770,20 @@ def visit_decorator(self, e: Decorator) -> None: e.var.type = AnyType(TypeOfAny.special_form) e.var.is_ready = True return + self.visit_decorator_inner(e) + def visit_decorator_inner(self, e: Decorator, allow_empty: bool = False) -> None: if self.recurse_into_functions: with self.tscope.function_scope(e.func): - self.check_func_item(e.func, name=e.func.name) + self.check_func_item(e.func, name=e.func.name, allow_empty=allow_empty) # Process decorators from the inside out to determine decorated signature, which # may be different from the declared signature. sig: Type = self.function_type(e.func) for d in reversed(e.decorators): if refers_to_fullname(d, OVERLOAD_NAMES): - self.fail(message_registry.MULTIPLE_OVERLOADS_REQUIRED, e) + if not allow_empty: + self.fail(message_registry.MULTIPLE_OVERLOADS_REQUIRED, e) continue dec = self.expr_checker.accept(d) temp = self.temp_node(sig, context=e) @@ -4730,10 +4810,17 @@ def visit_decorator(self, e: Decorator) -> None: self.msg.fail("Too many arguments for property", e) self.check_incompatible_property_override(e) # For overloaded functions we already checked override for overload as a whole. + if allow_empty: + return if e.func.info and not e.func.is_dynamic() and not e.is_overload: - found_base_method = self.check_method_override(e) - if e.func.is_explicit_override and found_base_method is False: + found_method_base_classes = self.check_method_override(e) + if ( + e.func.is_explicit_override + and not found_method_base_classes + and found_method_base_classes is not None + ): self.msg.no_overridable_method(e.func.name, e.func) + self.check_explicit_override_decorator(e.func, found_method_base_classes) if e.func.info and e.func.name in ("__init__", "__new__"): if e.type and not isinstance(get_proper_type(e.type), (FunctionLike, AnyType)): @@ -4883,7 +4970,7 @@ def visit_match_stmt(self, s: MatchStmt) -> None: self.push_type_map(pattern_map) self.push_type_map(pattern_type.captures) if g is not None: - with self.binder.frame_context(can_skip=True, fall_through=3): + with self.binder.frame_context(can_skip=False, fall_through=3): gt = get_proper_type(self.expr_checker.accept(g)) if isinstance(gt, DeletedType): @@ -4892,6 +4979,21 @@ def visit_match_stmt(self, s: MatchStmt) -> None: guard_map, guard_else_map = self.find_isinstance_check(g) else_map = or_conditional_maps(else_map, guard_else_map) + # If the guard narrowed the subject, copy the narrowed types over + if isinstance(p, AsPattern): + case_target = p.pattern or p.name + if isinstance(case_target, NameExpr): + for type_map in (guard_map, else_map): + if not type_map: + continue + for expr in list(type_map): + if not ( + isinstance(expr, NameExpr) + and expr.fullname == case_target.fullname + ): + continue + type_map[s.subject] = type_map[expr] + self.push_type_map(guard_map) self.accept(b) else: @@ -5604,13 +5706,13 @@ def has_no_custom_eq_checks(t: Type) -> bool: if left_index in narrowable_operand_index_to_hash: # We only try and narrow away 'None' for now - if is_optional(item_type): + if is_overlapping_none(item_type): collection_item_type = get_proper_type( builtin_item_type(iterable_type) ) if ( collection_item_type is not None - and not is_optional(collection_item_type) + and not is_overlapping_none(collection_item_type) and not ( isinstance(collection_item_type, Instance) and collection_item_type.type.fullname == "builtins.object" @@ -6017,7 +6119,7 @@ def refine_away_none_in_comparison( non_optional_types = [] for i in chain_indices: typ = operand_types[i] - if not is_optional(typ): + if not is_overlapping_none(typ): non_optional_types.append(typ) # Make sure we have a mixture of optional and non-optional types. @@ -6027,7 +6129,7 @@ def refine_away_none_in_comparison( if_map = {} for i in narrowable_operand_indices: expr_type = operand_types[i] - if not is_optional(expr_type): + if not is_overlapping_none(expr_type): continue if any(is_overlapping_erased_types(expr_type, t) for t in non_optional_types): if_map[operands[i]] = remove_optional(expr_type) @@ -6793,6 +6895,9 @@ def has_valid_attribute(self, typ: Type, name: str) -> bool: ) return not watcher.has_new_errors() + def get_expression_type(self, node: Expression, type_context: Type | None = None) -> Type: + return self.expr_checker.accept(node, type_context=type_context) + class CollectArgTypeVarTypes(TypeTraverserVisitor): """Collects the non-nested argument types in a set.""" @@ -6917,6 +7022,22 @@ def is_literal_not_implemented(n: Expression) -> bool: return isinstance(n, NameExpr) and n.fullname == "builtins.NotImplemented" +def _is_empty_generator_function(func: FuncItem) -> bool: + """ + Checks whether a function's body is 'return; yield' (the yield being added only + to promote the function into a generator function). + """ + body = func.body.body + return ( + len(body) == 2 + and isinstance(ret_stmt := body[0], ReturnStmt) + and (ret_stmt.expr is None or is_literal_none(ret_stmt.expr)) + and isinstance(expr_stmt := body[1], ExpressionStmt) + and isinstance(yield_expr := expr_stmt.expr, YieldExpr) + and (yield_expr.expr is None or is_literal_none(yield_expr.expr)) + ) + + def builtin_item_type(tp: Type) -> Type | None: """Get the item type of a builtin container. @@ -7074,6 +7195,8 @@ def flatten_types(t: Type) -> list[Type]: t = get_proper_type(t) if isinstance(t, TupleType): return [b for a in t.items for b in flatten_types(a)] + elif is_named_instance(t, "builtins.tuple"): + return [t.args[0]] else: return [t] @@ -7102,7 +7225,7 @@ def are_argument_counts_overlapping(t: CallableType, s: CallableType) -> bool: def is_unsafe_overlapping_overload_signatures( - signature: CallableType, other: CallableType + signature: CallableType, other: CallableType, class_type_vars: list[TypeVarLikeType] ) -> bool: """Check if two overloaded signatures are unsafely overlapping or partially overlapping. @@ -7121,8 +7244,8 @@ def is_unsafe_overlapping_overload_signatures( # This lets us identify cases where the two signatures use completely # incompatible types -- e.g. see the testOverloadingInferUnionReturnWithMixedTypevars # test case. - signature = detach_callable(signature) - other = detach_callable(other) + signature = detach_callable(signature, class_type_vars) + other = detach_callable(other, class_type_vars) # Note: We repeat this check twice in both directions due to a slight # asymmetry in 'is_callable_compatible'. When checking for partial overlaps, @@ -7134,26 +7257,36 @@ def is_unsafe_overlapping_overload_signatures( # # This discrepancy is unfortunately difficult to get rid of, so we repeat the # checks twice in both directions for now. + # + # Note that we ignore possible overlap between type variables and None. This + # is technically unsafe, but unsafety is tiny and this prevents some common + # use cases like: + # @overload + # def foo(x: None) -> None: .. + # @overload + # def foo(x: T) -> Foo[T]: ... return is_callable_compatible( signature, other, - is_compat=is_overlapping_types_no_promote_no_uninhabited, + is_compat=is_overlapping_types_no_promote_no_uninhabited_no_none, is_compat_return=lambda l, r: not is_subtype_no_promote(l, r), ignore_return=False, check_args_covariantly=True, allow_partial_overlap=True, + no_unify_none=True, ) or is_callable_compatible( other, signature, - is_compat=is_overlapping_types_no_promote_no_uninhabited, + is_compat=is_overlapping_types_no_promote_no_uninhabited_no_none, is_compat_return=lambda l, r: not is_subtype_no_promote(r, l), ignore_return=False, check_args_covariantly=False, allow_partial_overlap=True, + no_unify_none=True, ) -def detach_callable(typ: CallableType) -> CallableType: +def detach_callable(typ: CallableType, class_type_vars: list[TypeVarLikeType]) -> CallableType: """Ensures that the callable's type variables are 'detached' and independent of the context. A callable normally keeps track of the type variables it uses within its 'variables' field. @@ -7163,42 +7296,17 @@ def detach_callable(typ: CallableType) -> CallableType: This function will traverse the callable and find all used type vars and add them to the variables field if it isn't already present. - The caller can then unify on all type variables whether or not the callable is originally - from a class or not.""" - type_list = typ.arg_types + [typ.ret_type] - - appear_map: dict[str, list[int]] = {} - for i, inner_type in enumerate(type_list): - typevars_available = get_type_vars(inner_type) - for var in typevars_available: - if var.fullname not in appear_map: - appear_map[var.fullname] = [] - appear_map[var.fullname].append(i) - - used_type_var_names = set() - for var_name, appearances in appear_map.items(): - used_type_var_names.add(var_name) - - all_type_vars = get_type_vars(typ) - new_variables = [] - for var in set(all_type_vars): - if var.fullname not in used_type_var_names: - continue - new_variables.append( - TypeVarType( - name=var.name, - fullname=var.fullname, - id=var.id, - values=var.values, - upper_bound=var.upper_bound, - default=var.default, - variance=var.variance, - ) - ) - out = typ.copy_modified( - variables=new_variables, arg_types=type_list[:-1], ret_type=type_list[-1] + The caller can then unify on all type variables whether the callable is originally from + the class or not.""" + if not class_type_vars: + # Fast path, nothing to update. + return typ + seen_type_vars = set() + for t in typ.arg_types + [typ.ret_type]: + seen_type_vars |= set(get_type_vars(t)) + return typ.copy_modified( + variables=list(typ.variables) + [tv for tv in class_type_vars if tv in seen_type_vars] ) - return out def overload_can_never_match(signature: CallableType, other: CallableType) -> bool: @@ -7309,6 +7417,11 @@ def visit_erased_type(self, t: ErasedType) -> bool: # This can happen inside a lambda. return True + def visit_type_var(self, t: TypeVarType) -> bool: + # This is needed to prevent leaking into partial types during + # multi-step type inference. + return t.id.is_meta_var() + class SetNothingToAny(TypeTranslator): """Replace all ambiguous types with Any (to avoid spurious extra errors).""" @@ -7660,12 +7773,18 @@ def is_subtype_no_promote(left: Type, right: Type) -> bool: return is_subtype(left, right, ignore_promotions=True) -def is_overlapping_types_no_promote_no_uninhabited(left: Type, right: Type) -> bool: +def is_overlapping_types_no_promote_no_uninhabited_no_none(left: Type, right: Type) -> bool: # For the purpose of unsafe overload checks we consider list[] and list[int] # non-overlapping. This is consistent with how we treat list[int] and list[str] as # non-overlapping, despite [] belongs to both. Also this will prevent false positives # for failed type inference during unification. - return is_overlapping_types(left, right, ignore_promotions=True, ignore_uninhabited=True) + return is_overlapping_types( + left, + right, + ignore_promotions=True, + ignore_uninhabited=True, + prohibit_none_typevar_overlap=True, + ) def is_private(node_name: str) -> bool: diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py index cd0ff1100183..218568007b9e 100644 --- a/mypy/checkexpr.py +++ b/mypy/checkexpr.py @@ -4,19 +4,25 @@ import itertools import time +from collections import defaultdict from contextlib import contextmanager -from typing import Callable, ClassVar, Iterator, List, Optional, Sequence, cast -from typing_extensions import Final, TypeAlias as _TypeAlias, overload +from typing import Callable, ClassVar, Final, Iterable, Iterator, List, Optional, Sequence, cast +from typing_extensions import TypeAlias as _TypeAlias, overload import mypy.checker import mypy.errorcodes as codes from mypy import applytype, erasetype, join, message_registry, nodes, operators, types from mypy.argmap import ArgTypeExpander, map_actuals_to_formals, map_formals_to_actuals -from mypy.checkmember import analyze_member_access, type_object_type +from mypy.checkmember import analyze_member_access, freeze_all_type_vars, type_object_type from mypy.checkstrformat import StringFormatterChecker from mypy.erasetype import erase_type, remove_instance_last_known_values, replace_meta_vars from mypy.errors import ErrorWatcher, report_internal_error -from mypy.expandtype import expand_type, expand_type_by_instance, freshen_function_type_vars +from mypy.expandtype import ( + expand_type, + expand_type_by_instance, + freshen_all_functions_type_vars, + freshen_function_type_vars, +) from mypy.infer import ArgumentInferContext, infer_function_type_arguments, infer_type_arguments from mypy.literals import literal from mypy.maptype import map_instance_to_supertype @@ -98,8 +104,15 @@ ) from mypy.semanal_enum import ENUM_BASES from mypy.state import state -from mypy.subtypes import is_equivalent, is_same_type, is_subtype, non_method_protocol_members +from mypy.subtypes import ( + find_member, + is_equivalent, + is_same_type, + is_subtype, + non_method_protocol_members, +) from mypy.traverser import has_await_expression +from mypy.type_visitor import TypeTranslator from mypy.typeanal import ( check_for_explicit_any, has_any_from_unimported_type, @@ -114,6 +127,8 @@ false_only, fixup_partial_type, function_type, + get_all_type_vars, + get_type_vars, is_literal_type_like, make_simplified_union, simple_literal_type, @@ -136,6 +151,7 @@ LiteralValue, NoneType, Overloaded, + Parameters, ParamSpecFlavor, ParamSpecType, PartialType, @@ -146,23 +162,29 @@ TypedDictType, TypeOfAny, TypeType, + TypeVarLikeType, TypeVarTupleType, TypeVarType, UninhabitedType, UnionType, UnpackType, - flatten_nested_tuples, + find_unpack_in_list, flatten_nested_unions, get_proper_type, get_proper_types, has_recursive_types, is_named_instance, + remove_dups, split_with_prefix_and_suffix, ) -from mypy.types_utils import is_generic_instance, is_optional, is_self_type_like, remove_optional +from mypy.types_utils import ( + is_generic_instance, + is_overlapping_none, + is_self_type_like, + remove_optional, +) from mypy.typestate import type_state from mypy.typevars import fill_typevars -from mypy.typevartuples import find_unpack_in_list from mypy.util import split_module_names from mypy.visitor import ExpressionVisitor @@ -300,6 +322,7 @@ def __init__( # on whether current expression is a callee, to give better error messages # related to type context. self.is_callee = False + type_state.infer_polymorphic = self.chk.options.new_type_inference def reset(self) -> None: self.resolved_type = {} @@ -329,12 +352,13 @@ def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type: elif isinstance(node, FuncDef): # Reference to a global function. result = function_type(node, self.named_type("builtins.function")) - elif isinstance(node, OverloadedFuncDef) and node.type is not None: - # node.type is None when there are multiple definitions of a function - # and it's decorated by something that is not typing.overload - # TODO: use a dummy Overloaded instead of AnyType in this case - # like we do in mypy.types.function_type()? - result = node.type + elif isinstance(node, OverloadedFuncDef): + if node.type is None: + if self.chk.in_checked_function() and node.items: + self.chk.handle_cannot_determine_type(node.name, e) + result = AnyType(TypeOfAny.from_error) + else: + result = node.type elif isinstance(node, TypeInfo): # Reference to a type object. if node.typeddict_type: @@ -513,13 +537,6 @@ def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) -> callee_type = get_proper_type( self.accept(e.callee, type_context, always_allow_any=True, is_callee=True) ) - if ( - self.chk.options.disallow_untyped_calls - and self.chk.in_checked_function() - and isinstance(callee_type, CallableType) - and callee_type.implicit - ): - self.msg.untyped_function_call(callee_type, e) # Figure out the full name of the callee for plugin lookup. object_type = None @@ -545,6 +562,22 @@ def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) -> ): member = e.callee.name object_type = self.chk.lookup_type(e.callee.expr) + + if ( + self.chk.options.disallow_untyped_calls + and self.chk.in_checked_function() + and isinstance(callee_type, CallableType) + and callee_type.implicit + ): + if fullname is None and member is not None: + assert object_type is not None + fullname = self.method_fullname(object_type, member) + if not fullname or not any( + fullname == p or fullname.startswith(f"{p}.") + for p in self.chk.options.untyped_calls_exclude + ): + self.msg.untyped_function_call(callee_type, e) + ret_type = self.check_call_expr_with_callee_type( callee_type, e, fullname, object_type, member ) @@ -685,74 +718,183 @@ def check_typeddict_call( context: Context, orig_callee: Type | None, ) -> Type: - if args and all([ak == ARG_NAMED for ak in arg_kinds]): - # ex: Point(x=42, y=1337) - assert all(arg_name is not None for arg_name in arg_names) - item_names = cast(List[str], arg_names) - item_args = args - return self.check_typeddict_call_with_kwargs( - callee, dict(zip(item_names, item_args)), context, orig_callee - ) + if args and all([ak in (ARG_NAMED, ARG_STAR2) for ak in arg_kinds]): + # ex: Point(x=42, y=1337, **extras) + # This is a bit ugly, but this is a price for supporting all possible syntax + # variants for TypedDict constructors. + kwargs = zip([StrExpr(n) if n is not None else None for n in arg_names], args) + result = self.validate_typeddict_kwargs(kwargs=kwargs, callee=callee) + if result is not None: + validated_kwargs, always_present_keys = result + return self.check_typeddict_call_with_kwargs( + callee, validated_kwargs, context, orig_callee, always_present_keys + ) + return AnyType(TypeOfAny.from_error) if len(args) == 1 and arg_kinds[0] == ARG_POS: unique_arg = args[0] if isinstance(unique_arg, DictExpr): - # ex: Point({'x': 42, 'y': 1337}) + # ex: Point({'x': 42, 'y': 1337, **extras}) return self.check_typeddict_call_with_dict( - callee, unique_arg, context, orig_callee + callee, unique_arg.items, context, orig_callee ) if isinstance(unique_arg, CallExpr) and isinstance(unique_arg.analyzed, DictExpr): - # ex: Point(dict(x=42, y=1337)) + # ex: Point(dict(x=42, y=1337, **extras)) return self.check_typeddict_call_with_dict( - callee, unique_arg.analyzed, context, orig_callee + callee, unique_arg.analyzed.items, context, orig_callee ) if not args: # ex: EmptyDict() - return self.check_typeddict_call_with_kwargs(callee, {}, context, orig_callee) + return self.check_typeddict_call_with_kwargs(callee, {}, context, orig_callee, set()) self.chk.fail(message_registry.INVALID_TYPEDDICT_ARGS, context) return AnyType(TypeOfAny.from_error) - def validate_typeddict_kwargs(self, kwargs: DictExpr) -> dict[str, Expression] | None: - item_args = [item[1] for item in kwargs.items] - - item_names = [] # List[str] - for item_name_expr, item_arg in kwargs.items: - literal_value = None + def validate_typeddict_kwargs( + self, kwargs: Iterable[tuple[Expression | None, Expression]], callee: TypedDictType + ) -> tuple[dict[str, list[Expression]], set[str]] | None: + # All (actual or mapped from ** unpacks) expressions that can match given key. + result = defaultdict(list) + # Keys that are guaranteed to be present no matter what (e.g. for all items of a union) + always_present_keys = set() + # Indicates latest encountered ** unpack among items. + last_star_found = None + + for item_name_expr, item_arg in kwargs: if item_name_expr: key_type = self.accept(item_name_expr) values = try_getting_str_literals(item_name_expr, key_type) + literal_value = None if values and len(values) == 1: literal_value = values[0] - if literal_value is None: - key_context = item_name_expr or item_arg - self.chk.fail( - message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL, - key_context, - code=codes.LITERAL_REQ, - ) - return None + if literal_value is None: + key_context = item_name_expr or item_arg + self.chk.fail( + message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL, + key_context, + code=codes.LITERAL_REQ, + ) + return None + else: + # A directly present key unconditionally shadows all previously found + # values from ** items. + # TODO: for duplicate keys, type-check all values. + result[literal_value] = [item_arg] + always_present_keys.add(literal_value) + else: + last_star_found = item_arg + if not self.validate_star_typeddict_item( + item_arg, callee, result, always_present_keys + ): + return None + if self.chk.options.extra_checks and last_star_found is not None: + absent_keys = [] + for key in callee.items: + if key not in callee.required_keys and key not in result: + absent_keys.append(key) + if absent_keys: + # Having an optional key not explicitly declared by a ** unpacked + # TypedDict is unsafe, it may be an (incompatible) subtype at runtime. + # TODO: catch the cases where a declared key is overridden by a subsequent + # ** item without it (and not again overriden with complete ** item). + self.msg.non_required_keys_absent_with_star(absent_keys, last_star_found) + return result, always_present_keys + + def validate_star_typeddict_item( + self, + item_arg: Expression, + callee: TypedDictType, + result: dict[str, list[Expression]], + always_present_keys: set[str], + ) -> bool: + """Update keys/expressions from a ** expression in TypedDict constructor. + + Note `result` and `always_present_keys` are updated in place. Return true if the + expression `item_arg` may valid in `callee` TypedDict context. + """ + with self.chk.local_type_map(), self.msg.filter_errors(): + inferred = get_proper_type(self.accept(item_arg, type_context=callee)) + possible_tds = [] + if isinstance(inferred, TypedDictType): + possible_tds = [inferred] + elif isinstance(inferred, UnionType): + for item in get_proper_types(inferred.relevant_items()): + if isinstance(item, TypedDictType): + possible_tds.append(item) + elif not self.valid_unpack_fallback_item(item): + self.msg.unsupported_target_for_star_typeddict(item, item_arg) + return False + elif not self.valid_unpack_fallback_item(inferred): + self.msg.unsupported_target_for_star_typeddict(inferred, item_arg) + return False + all_keys: set[str] = set() + for td in possible_tds: + all_keys |= td.items.keys() + for key in all_keys: + arg = TempNode( + UnionType.make_union([td.items[key] for td in possible_tds if key in td.items]) + ) + arg.set_line(item_arg) + if all(key in td.required_keys for td in possible_tds): + always_present_keys.add(key) + # Always present keys override previously found values. This is done + # to support use cases like `Config({**defaults, **overrides})`, where + # some `overrides` types are narrower that types in `defaults`, and + # former are too wide for `Config`. + if result[key]: + first = result[key][0] + if not isinstance(first, TempNode): + # We must always preserve any non-synthetic values, so that + # we will accept them even if they are shadowed. + result[key] = [first, arg] + else: + result[key] = [arg] + else: + result[key] = [arg] else: - item_names.append(literal_value) - return dict(zip(item_names, item_args)) + # If this key is not required at least in some item of a union + # it may not shadow previous item, so we need to type check both. + result[key].append(arg) + return True + + def valid_unpack_fallback_item(self, typ: ProperType) -> bool: + if isinstance(typ, AnyType): + return True + if not isinstance(typ, Instance) or not typ.type.has_base("typing.Mapping"): + return False + mapped = map_instance_to_supertype(typ, self.chk.lookup_typeinfo("typing.Mapping")) + return all(isinstance(a, AnyType) for a in get_proper_types(mapped.args)) def match_typeddict_call_with_dict( - self, callee: TypedDictType, kwargs: DictExpr, context: Context + self, + callee: TypedDictType, + kwargs: list[tuple[Expression | None, Expression]], + context: Context, ) -> bool: - validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs) - if validated_kwargs is not None: + result = self.validate_typeddict_kwargs(kwargs=kwargs, callee=callee) + if result is not None: + validated_kwargs, _ = result return callee.required_keys <= set(validated_kwargs.keys()) <= set(callee.items.keys()) else: return False def check_typeddict_call_with_dict( - self, callee: TypedDictType, kwargs: DictExpr, context: Context, orig_callee: Type | None + self, + callee: TypedDictType, + kwargs: list[tuple[Expression | None, Expression]], + context: Context, + orig_callee: Type | None, ) -> Type: - validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs) - if validated_kwargs is not None: + result = self.validate_typeddict_kwargs(kwargs=kwargs, callee=callee) + if result is not None: + validated_kwargs, always_present_keys = result return self.check_typeddict_call_with_kwargs( - callee, kwargs=validated_kwargs, context=context, orig_callee=orig_callee + callee, + kwargs=validated_kwargs, + context=context, + orig_callee=orig_callee, + always_present_keys=always_present_keys, ) else: return AnyType(TypeOfAny.from_error) @@ -793,20 +935,37 @@ def typeddict_callable_from_context(self, callee: TypedDictType) -> CallableType def check_typeddict_call_with_kwargs( self, callee: TypedDictType, - kwargs: dict[str, Expression], + kwargs: dict[str, list[Expression]], context: Context, orig_callee: Type | None, + always_present_keys: set[str], ) -> Type: actual_keys = kwargs.keys() - if not (callee.required_keys <= actual_keys <= callee.items.keys()): - expected_keys = [ - key - for key in callee.items.keys() - if key in callee.required_keys or key in actual_keys - ] - self.msg.unexpected_typeddict_keys( - callee, expected_keys=expected_keys, actual_keys=list(actual_keys), context=context - ) + if not ( + callee.required_keys <= always_present_keys and actual_keys <= callee.items.keys() + ): + if not (actual_keys <= callee.items.keys()): + self.msg.unexpected_typeddict_keys( + callee, + expected_keys=[ + key + for key in callee.items.keys() + if key in callee.required_keys or key in actual_keys + ], + actual_keys=list(actual_keys), + context=context, + ) + if not (callee.required_keys <= always_present_keys): + self.msg.unexpected_typeddict_keys( + callee, + expected_keys=[ + key for key in callee.items.keys() if key in callee.required_keys + ], + actual_keys=[ + key for key in always_present_keys if key in callee.required_keys + ], + context=context, + ) if callee.required_keys > actual_keys: # found_set is a sub-set of the required_keys # This means we're missing some keys and as such, we can't @@ -829,7 +988,10 @@ def check_typeddict_call_with_kwargs( with self.msg.filter_errors(), self.chk.local_type_map(): orig_ret_type, _ = self.check_callable_call( infer_callee, - list(kwargs.values()), + # We use first expression for each key to infer type variables of a generic + # TypedDict. This is a bit arbitrary, but in most cases will work better than + # trying to infer a union or a join. + [args[0] for args in kwargs.values()], [ArgKind.ARG_NAMED] * len(kwargs), context, list(kwargs.keys()), @@ -846,17 +1008,18 @@ def check_typeddict_call_with_kwargs( for item_name, item_expected_type in ret_type.items.items(): if item_name in kwargs: - item_value = kwargs[item_name] - self.chk.check_simple_assignment( - lvalue_type=item_expected_type, - rvalue=item_value, - context=item_value, - msg=ErrorMessage( - message_registry.INCOMPATIBLE_TYPES.value, code=codes.TYPEDDICT_ITEM - ), - lvalue_name=f'TypedDict item "{item_name}"', - rvalue_name="expression", - ) + item_values = kwargs[item_name] + for item_value in item_values: + self.chk.check_simple_assignment( + lvalue_type=item_expected_type, + rvalue=item_value, + context=item_value, + msg=ErrorMessage( + message_registry.INCOMPATIBLE_TYPES.value, code=codes.TYPEDDICT_ITEM + ), + lvalue_name=f'TypedDict item "{item_name}"', + rvalue_name="expression", + ) return orig_ret_type @@ -1174,6 +1337,55 @@ def transform_callee_type( return callee + def is_generic_decorator_overload_call( + self, callee_type: CallableType, args: list[Expression] + ) -> Overloaded | None: + """Check if this looks like an application of a generic function to overload argument.""" + assert callee_type.variables + if len(callee_type.arg_types) != 1 or len(args) != 1: + # TODO: can we handle more general cases? + return None + if not isinstance(get_proper_type(callee_type.arg_types[0]), CallableType): + return None + if not isinstance(get_proper_type(callee_type.ret_type), CallableType): + return None + with self.chk.local_type_map(): + with self.msg.filter_errors(): + arg_type = get_proper_type(self.accept(args[0], type_context=None)) + if isinstance(arg_type, Overloaded): + return arg_type + return None + + def handle_decorator_overload_call( + self, callee_type: CallableType, overloaded: Overloaded, ctx: Context + ) -> tuple[Type, Type] | None: + """Type-check application of a generic callable to an overload. + + We check call on each individual overload item, and then combine results into a new + overload. This function should be only used if callee_type takes and returns a Callable. + """ + result = [] + inferred_args = [] + for item in overloaded.items: + arg = TempNode(typ=item) + with self.msg.filter_errors() as err: + item_result, inferred_arg = self.check_call(callee_type, [arg], [ARG_POS], ctx) + if err.has_new_errors(): + # This overload doesn't match. + continue + p_item_result = get_proper_type(item_result) + if not isinstance(p_item_result, CallableType): + continue + p_inferred_arg = get_proper_type(inferred_arg) + if not isinstance(p_inferred_arg, CallableType): + continue + inferred_args.append(p_inferred_arg) + result.append(p_item_result) + if not result or not inferred_args: + # None of the overload matched (or overload was initially malformed). + return None + return Overloaded(result), Overloaded(inferred_args) + def check_call_expr_with_callee_type( self, callee_type: Type, @@ -1288,6 +1500,17 @@ def check_call( callee = get_proper_type(callee) if isinstance(callee, CallableType): + if callee.variables: + overloaded = self.is_generic_decorator_overload_call(callee, args) + if overloaded is not None: + # Special casing for inline application of generic callables to overloads. + # Supporting general case would be tricky, but this should cover 95% of cases. + overloaded_result = self.handle_decorator_overload_call( + callee, overloaded, context + ) + if overloaded_result is not None: + return overloaded_result + return self.check_callable_call( callee, args, @@ -1376,7 +1599,7 @@ def check_callable_call( See the docstring of check_call for more information. """ # Always unpack **kwargs before checking a call. - callee = callee.with_unpacked_kwargs() + callee = callee.with_unpacked_kwargs().with_normalized_var_args() if callable_name is None and callee.name: callable_name = callee.name ret_type = get_proper_type(callee.ret_type) @@ -1424,6 +1647,16 @@ def check_callable_call( lambda i: self.accept(args[i]), ) + # This is tricky: return type may contain its own type variables, like in + # def [S] (S) -> def [T] (T) -> tuple[S, T], so we need to update their ids + # to avoid possible id clashes if this call itself appears in a generic + # function body. + ret_type = get_proper_type(callee.ret_type) + if isinstance(ret_type, CallableType) and ret_type.variables: + fresh_ret_type = freshen_all_functions_type_vars(callee.ret_type) + freeze_all_type_vars(fresh_ret_type) + callee = callee.copy_modified(ret_type=fresh_ret_type) + if callee.is_generic(): need_refresh = any( isinstance(v, (ParamSpecType, TypeVarTupleType)) for v in callee.variables @@ -1442,7 +1675,7 @@ def check_callable_call( lambda i: self.accept(args[i]), ) callee = self.infer_function_type_arguments( - callee, args, arg_kinds, formal_to_actual, context + callee, args, arg_kinds, arg_names, formal_to_actual, need_refresh, context ) if need_refresh: formal_to_actual = map_actuals_to_formals( @@ -1668,7 +1901,7 @@ def infer_function_type_arguments_using_context( # valid results. erased_ctx = replace_meta_vars(ctx, ErasedType()) ret_type = callable.ret_type - if is_optional(ret_type) and is_optional(ctx): + if is_overlapping_none(ret_type) and is_overlapping_none(ctx): # If both the context and the return type are optional, unwrap the optional, # since in 99% cases this is what a user expects. In other words, we replace # Optional[T] <: Optional[int] @@ -1709,9 +1942,11 @@ def infer_function_type_arguments_using_context( # def identity(x: T) -> T: return x # # expects_literal(identity(3)) # Should type-check + # TODO: we may want to add similar exception if all arguments are lambdas, since + # in this case external context is almost everything we have. if not is_generic_instance(ctx) and not is_literal_type_like(ctx): return callable.copy_modified() - args = infer_type_arguments(callable.type_var_ids(), ret_type, erased_ctx) + args = infer_type_arguments(callable.variables, ret_type, erased_ctx) # Only substitute non-Uninhabited and non-erased types. new_args: list[Type | None] = [] for arg in args: @@ -1730,7 +1965,9 @@ def infer_function_type_arguments( callee_type: CallableType, args: list[Expression], arg_kinds: list[ArgKind], + arg_names: Sequence[str | None] | None, formal_to_actual: list[list[int]], + need_refresh: bool, context: Context, ) -> CallableType: """Infer the type arguments for a generic callee type. @@ -1750,7 +1987,7 @@ def infer_function_type_arguments( ) arg_pass_nums = self.get_arg_infer_passes( - callee_type.arg_types, formal_to_actual, len(args) + callee_type, args, arg_types, formal_to_actual, len(args) ) pass1_args: list[Type | None] = [] @@ -1760,10 +1997,11 @@ def infer_function_type_arguments( else: pass1_args.append(arg) - inferred_args = infer_function_type_arguments( + inferred_args, _ = infer_function_type_arguments( callee_type, pass1_args, arg_kinds, + arg_names, formal_to_actual, context=self.argument_infer_context(), strict=self.chk.in_checked_function(), @@ -1772,7 +2010,14 @@ def infer_function_type_arguments( if 2 in arg_pass_nums: # Second pass of type inference. (callee_type, inferred_args) = self.infer_function_type_arguments_pass2( - callee_type, args, arg_kinds, formal_to_actual, inferred_args, context + callee_type, + args, + arg_kinds, + arg_names, + formal_to_actual, + inferred_args, + need_refresh, + context, ) if ( @@ -1791,6 +2036,61 @@ def infer_function_type_arguments( inferred_args[0] = self.named_type("builtins.str") elif not first_arg or not is_subtype(self.named_type("builtins.str"), first_arg): self.chk.fail(message_registry.KEYWORD_ARGUMENT_REQUIRES_STR_KEY_TYPE, context) + + if self.chk.options.new_type_inference and any( + a is None + or isinstance(get_proper_type(a), UninhabitedType) + or set(get_type_vars(a)) & set(callee_type.variables) + for a in inferred_args + ): + if need_refresh: + # Technically we need to refresh formal_to_actual after *each* inference pass, + # since each pass can expand ParamSpec or TypeVarTuple. Although such situations + # are very rare, not doing this can cause crashes. + formal_to_actual = map_actuals_to_formals( + arg_kinds, + arg_names, + callee_type.arg_kinds, + callee_type.arg_names, + lambda a: self.accept(args[a]), + ) + # If the regular two-phase inference didn't work, try inferring type + # variables while allowing for polymorphic solutions, i.e. for solutions + # potentially involving free variables. + # TODO: support the similar inference for return type context. + poly_inferred_args, free_vars = infer_function_type_arguments( + callee_type, + arg_types, + arg_kinds, + arg_names, + formal_to_actual, + context=self.argument_infer_context(), + strict=self.chk.in_checked_function(), + allow_polymorphic=True, + ) + poly_callee_type = self.apply_generic_arguments( + callee_type, poly_inferred_args, context + ) + # Try applying inferred polymorphic type if possible, e.g. Callable[[T], T] can + # be interpreted as def [T] (T) -> T, but dict[T, T] cannot be expressed. + applied = apply_poly(poly_callee_type, free_vars) + if applied is not None and all( + a is not None and not isinstance(get_proper_type(a), UninhabitedType) + for a in poly_inferred_args + ): + freeze_all_type_vars(applied) + return applied + # If it didn't work, erase free variables as , to avoid confusing errors. + unknown = UninhabitedType() + unknown.ambiguous = True + inferred_args = [ + expand_type( + a, {v.id: unknown for v in list(callee_type.variables) + free_vars} + ) + if a is not None + else None + for a in poly_inferred_args + ] else: # In dynamically typed functions use implicit 'Any' types for # type variables. @@ -1802,8 +2102,10 @@ def infer_function_type_arguments_pass2( callee_type: CallableType, args: list[Expression], arg_kinds: list[ArgKind], + arg_names: Sequence[str | None] | None, formal_to_actual: list[list[int]], old_inferred_args: Sequence[Type | None], + need_refresh: bool, context: Context, ) -> tuple[CallableType, list[Type | None]]: """Perform second pass of generic function type argument inference. @@ -1825,13 +2127,22 @@ def infer_function_type_arguments_pass2( if isinstance(arg, (NoneType, UninhabitedType)) or has_erased_component(arg): inferred_args[i] = None callee_type = self.apply_generic_arguments(callee_type, inferred_args, context) + if need_refresh: + formal_to_actual = map_actuals_to_formals( + arg_kinds, + arg_names, + callee_type.arg_kinds, + callee_type.arg_names, + lambda a: self.accept(args[a]), + ) arg_types = self.infer_arg_types_in_context(callee_type, args, arg_kinds, formal_to_actual) - inferred_args = infer_function_type_arguments( + inferred_args, _ = infer_function_type_arguments( callee_type, arg_types, arg_kinds, + arg_names, formal_to_actual, context=self.argument_infer_context(), ) @@ -1844,7 +2155,12 @@ def argument_infer_context(self) -> ArgumentInferContext: ) def get_arg_infer_passes( - self, arg_types: list[Type], formal_to_actual: list[list[int]], num_actuals: int + self, + callee: CallableType, + args: list[Expression], + arg_types: list[Type], + formal_to_actual: list[list[int]], + num_actuals: int, ) -> list[int]: """Return pass numbers for args for two-pass argument type inference. @@ -1855,8 +2171,32 @@ def get_arg_infer_passes( lambdas more effectively. """ res = [1] * num_actuals - for i, arg in enumerate(arg_types): - if arg.accept(ArgInferSecondPassQuery()): + for i, arg in enumerate(callee.arg_types): + skip_param_spec = False + p_formal = get_proper_type(callee.arg_types[i]) + if isinstance(p_formal, CallableType) and p_formal.param_spec(): + for j in formal_to_actual[i]: + p_actual = get_proper_type(arg_types[j]) + # This is an exception from the usual logic where we put generic Callable + # arguments in the second pass. If we have a non-generic actual, it is + # likely to infer good constraints, for example if we have: + # def run(Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None: ... + # def test(x: int, y: int) -> int: ... + # run(test, 1, 2) + # we will use `test` for inference, since it will allow to infer also + # argument *names* for P <: [x: int, y: int]. + if isinstance(p_actual, Instance): + call_method = find_member("__call__", p_actual, p_actual, is_operator=True) + if call_method is not None: + p_actual = get_proper_type(call_method) + if ( + isinstance(p_actual, CallableType) + and not p_actual.variables + and not isinstance(args[j], LambdaExpr) + ): + skip_param_spec = True + break + if not skip_param_spec and arg.accept(ArgInferSecondPassQuery()): for j in formal_to_actual[i]: res[j] = 2 return res @@ -2064,11 +2404,15 @@ def check_argument_types( ] actual_kinds = [nodes.ARG_STAR] + [nodes.ARG_POS] * (len(actuals) - 1) - assert isinstance(orig_callee_arg_type, TupleType) - assert orig_callee_arg_type.items - callee_arg_types = orig_callee_arg_type.items + # TODO: can we really assert this? What if formal is just plain Unpack[Ts]? + assert isinstance(orig_callee_arg_type, UnpackType) + assert isinstance(orig_callee_arg_type.type, ProperType) and isinstance( + orig_callee_arg_type.type, TupleType + ) + assert orig_callee_arg_type.type.items + callee_arg_types = orig_callee_arg_type.type.items callee_arg_kinds = [nodes.ARG_STAR] + [nodes.ARG_POS] * ( - len(orig_callee_arg_type.items) - 1 + len(orig_callee_arg_type.type.items) - 1 ) expanded_tuple = True @@ -2096,7 +2440,12 @@ def check_argument_types( + unpacked_type.items[inner_unpack_index + 1 :] ) callee_arg_kinds = [ARG_POS] * len(actuals) + elif isinstance(unpacked_type, TypeVarTupleType): + callee_arg_types = [orig_callee_arg_type] + callee_arg_kinds = [ARG_STAR] else: + # TODO: Any and can appear in Unpack (as a result of user error), + # fail gracefully here and elsewhere (and/or normalize them away). assert isinstance(unpacked_type, Instance) assert unpacked_type.type.fullname == "builtins.tuple" callee_arg_types = [unpacked_type.args[0]] * len(actuals) @@ -2166,15 +2515,7 @@ def check_arg( if isinstance(caller_type, DeletedType): self.msg.deleted_as_rvalue(caller_type, context) # Only non-abstract non-protocol class can be given where Type[...] is expected... - elif ( - isinstance(caller_type, CallableType) - and isinstance(callee_type, TypeType) - and caller_type.is_type_obj() - and (caller_type.type_object().is_abstract or caller_type.type_object().is_protocol) - and isinstance(callee_type.item, Instance) - and (callee_type.item.type.is_abstract or callee_type.item.type.is_protocol) - and not self.chk.allow_abstract_call - ): + elif self.has_abstract_type_part(caller_type, callee_type): self.msg.concrete_only_call(callee_type, context) elif not is_subtype(caller_type, callee_type, options=self.chk.options): code = self.msg.incompatible_argument( @@ -2219,6 +2560,11 @@ def check_overload_call( # typevar. See https://github.com/python/mypy/issues/4063 for related discussion. erased_targets: list[CallableType] | None = None unioned_result: tuple[Type, Type] | None = None + + # Determine whether we need to encourage union math. This should be generally safe, + # as union math infers better results in the vast majority of cases, but it is very + # computationally intensive. + none_type_var_overlap = self.possible_none_type_var_overlap(arg_types, plausible_targets) union_interrupted = False # did we try all union combinations? if any(self.real_union(arg) for arg in arg_types): try: @@ -2231,6 +2577,7 @@ def check_overload_call( arg_names, callable_name, object_type, + none_type_var_overlap, context, ) except TooManyUnions: @@ -2263,8 +2610,10 @@ def check_overload_call( # If any of checks succeed, stop early. if inferred_result is not None and unioned_result is not None: # Both unioned and direct checks succeeded, choose the more precise type. - if is_subtype(inferred_result[0], unioned_result[0]) and not isinstance( - get_proper_type(inferred_result[0]), AnyType + if ( + is_subtype(inferred_result[0], unioned_result[0]) + and not isinstance(get_proper_type(inferred_result[0]), AnyType) + and not none_type_var_overlap ): return inferred_result return unioned_result @@ -2314,7 +2663,8 @@ def check_overload_call( callable_name=callable_name, object_type=object_type, ) - if union_interrupted: + # Do not show the extra error if the union math was forced. + if union_interrupted and not none_type_var_overlap: self.chk.fail(message_registry.TOO_MANY_UNION_COMBINATIONS, context) return result @@ -2469,6 +2819,44 @@ def overload_erased_call_targets( matches.append(typ) return matches + def possible_none_type_var_overlap( + self, arg_types: list[Type], plausible_targets: list[CallableType] + ) -> bool: + """Heuristic to determine whether we need to try forcing union math. + + This is needed to avoid greedy type variable match in situations like this: + @overload + def foo(x: None) -> None: ... + @overload + def foo(x: T) -> list[T]: ... + + x: int | None + foo(x) + we want this call to infer list[int] | None, not list[int | None]. + """ + if not plausible_targets or not arg_types: + return False + has_optional_arg = False + for arg_type in get_proper_types(arg_types): + if not isinstance(arg_type, UnionType): + continue + for item in get_proper_types(arg_type.items): + if isinstance(item, NoneType): + has_optional_arg = True + break + if not has_optional_arg: + return False + + min_prefix = min(len(c.arg_types) for c in plausible_targets) + for i in range(min_prefix): + if any( + isinstance(get_proper_type(c.arg_types[i]), NoneType) for c in plausible_targets + ) and any( + isinstance(get_proper_type(c.arg_types[i]), TypeVarType) for c in plausible_targets + ): + return True + return False + def union_overload_result( self, plausible_targets: list[CallableType], @@ -2478,6 +2866,7 @@ def union_overload_result( arg_names: Sequence[str | None] | None, callable_name: str | None, object_type: Type | None, + none_type_var_overlap: bool, context: Context, level: int = 0, ) -> list[tuple[Type, Type]] | None: @@ -2517,20 +2906,23 @@ def union_overload_result( # Step 3: Try a direct match before splitting to avoid unnecessary union splits # and save performance. - with self.type_overrides_set(args, arg_types): - direct = self.infer_overload_return_type( - plausible_targets, - args, - arg_types, - arg_kinds, - arg_names, - callable_name, - object_type, - context, - ) - if direct is not None and not isinstance(get_proper_type(direct[0]), (UnionType, AnyType)): - # We only return non-unions soon, to avoid greedy match. - return [direct] + if not none_type_var_overlap: + with self.type_overrides_set(args, arg_types): + direct = self.infer_overload_return_type( + plausible_targets, + args, + arg_types, + arg_kinds, + arg_names, + callable_name, + object_type, + context, + ) + if direct is not None and not isinstance( + get_proper_type(direct[0]), (UnionType, AnyType) + ): + # We only return non-unions soon, to avoid greedy match. + return [direct] # Step 4: Split the first remaining union type in arguments into items and # try to match each item individually (recursive). @@ -2548,6 +2940,7 @@ def union_overload_result( arg_names, callable_name, object_type, + none_type_var_overlap, context, level + 1, ) @@ -2894,7 +3287,7 @@ def visit_op_expr(self, e: OpExpr) -> Type: # Expressions of form [...] * e get special type inference. return self.check_list_multiply(e) if e.op == "%": - if isinstance(e.left, BytesExpr) and self.chk.options.python_version >= (3, 5): + if isinstance(e.left, BytesExpr): return self.strfrm_checker.check_str_interpolation(e.left, e.right) if isinstance(e.left, StrExpr): return self.strfrm_checker.check_str_interpolation(e.left, e.right) @@ -3914,6 +4307,9 @@ def visit_assert_type_expr(self, expr: AssertTypeExpr) -> Type: allow_none_return=True, always_allow_any=True, ) + if self.chk.current_node_deferred: + return source_type + target_type = expr.type proper_source_type = get_proper_type(source_type) if ( @@ -4091,7 +4487,6 @@ class C(Generic[T, Unpack[Ts]]): ... prefix = next(i for (i, v) in enumerate(vars) if isinstance(v, TypeVarTupleType)) suffix = len(vars) - prefix - 1 - args = flatten_nested_tuples(args) if len(args) < len(vars) - 1: self.msg.incompatible_type_application(len(vars), len(args), ctx) return [AnyType(TypeOfAny.from_error)] * len(vars) @@ -4327,7 +4722,7 @@ def check_typeddict_literal_in_context( self, e: DictExpr, typeddict_context: TypedDictType ) -> Type: orig_ret_type = self.check_typeddict_call_with_dict( - callee=typeddict_context, kwargs=e, context=e, orig_callee=None + callee=typeddict_context, kwargs=e.items, context=e, orig_callee=None ) ret_type = get_proper_type(orig_ret_type) if isinstance(ret_type, TypedDictType): @@ -4427,7 +4822,9 @@ def find_typeddict_context( for item in context.items: item_contexts = self.find_typeddict_context(item, dict_expr) for item_context in item_contexts: - if self.match_typeddict_call_with_dict(item_context, dict_expr, dict_expr): + if self.match_typeddict_call_with_dict( + item_context, dict_expr.items, dict_expr + ): items.append(item_context) return items # No TypedDict type in context. @@ -4491,8 +4888,22 @@ def infer_lambda_type_using_context( # they must be considered as indeterminate. We use ErasedType since it # does not affect type inference results (it is for purposes like this # only). - callable_ctx = get_proper_type(replace_meta_vars(ctx, ErasedType())) - assert isinstance(callable_ctx, CallableType) + if self.chk.options.new_type_inference: + # With new type inference we can preserve argument types even if they + # are generic, since new inference algorithm can handle constraints + # like S <: T (we still erase return type since it's ultimately unknown). + extra_vars = [] + for arg in ctx.arg_types: + meta_vars = [tv for tv in get_all_type_vars(arg) if tv.id.is_meta_var()] + extra_vars.extend([tv for tv in meta_vars if tv not in extra_vars]) + callable_ctx = ctx.copy_modified( + ret_type=replace_meta_vars(ctx.ret_type, ErasedType()), + variables=list(ctx.variables) + extra_vars, + ) + else: + erased_ctx = replace_meta_vars(ctx, ErasedType()) + assert isinstance(erased_ctx, ProperType) and isinstance(erased_ctx, CallableType) + callable_ctx = erased_ctx # The callable_ctx may have a fallback of builtins.type if the context # is a constructor -- but this fallback doesn't make sense for lambdas. @@ -4524,7 +4935,9 @@ def infer_lambda_type_using_context( self.chk.fail(message_registry.CANNOT_INFER_LAMBDA_TYPE, e) return None, None - return callable_ctx, callable_ctx + # Type of lambda must have correct argument names, to prevent false + # negatives when lambdas appear in `ParamSpec` context. + return callable_ctx.copy_modified(arg_names=e.arg_names), callable_ctx def visit_super_expr(self, e: SuperExpr) -> Type: """Type check a super expression (non-lvalue).""" @@ -5197,6 +5610,15 @@ def visit_temp_node(self, e: TempNode) -> Type: return e.type def visit_type_var_expr(self, e: TypeVarExpr) -> Type: + p_default = get_proper_type(e.default) + if not ( + isinstance(p_default, AnyType) + and p_default.type_of_any == TypeOfAny.from_omitted_generics + ): + if not is_subtype(p_default, e.upper_bound): + self.chk.fail("TypeVar default must be a subtype of the bound type", e) + if e.values and not any(p_default == value for value in e.values): + self.chk.fail("TypeVar default must be one of the constraint types", e) return AnyType(TypeOfAny.special_form) def visit_paramspec_expr(self, e: ParamSpecExpr) -> Type: @@ -5287,6 +5709,26 @@ def narrow_type_from_binder( return narrow_declared_type(known_type, restriction) return known_type + def has_abstract_type_part(self, caller_type: ProperType, callee_type: ProperType) -> bool: + # TODO: support other possible types here + if isinstance(caller_type, TupleType) and isinstance(callee_type, TupleType): + return any( + self.has_abstract_type(get_proper_type(caller), get_proper_type(callee)) + for caller, callee in zip(caller_type.items, callee_type.items) + ) + return self.has_abstract_type(caller_type, callee_type) + + def has_abstract_type(self, caller_type: ProperType, callee_type: ProperType) -> bool: + return ( + isinstance(caller_type, CallableType) + and isinstance(callee_type, TypeType) + and caller_type.is_type_obj() + and (caller_type.type_object().is_abstract or caller_type.type_object().is_protocol) + and isinstance(callee_type.item, Instance) + and (callee_type.item.type.is_abstract or callee_type.item.type.is_protocol) + and not self.chk.allow_abstract_call + ) + def has_any_type(t: Type, ignore_in_type_obj: bool = False) -> bool: """Whether t contains an Any type""" @@ -5384,6 +5826,123 @@ def replace_callable_return_type(c: CallableType, new_ret_type: Type) -> Callabl return c.copy_modified(ret_type=new_ret_type) +def apply_poly(tp: CallableType, poly_tvars: Sequence[TypeVarLikeType]) -> CallableType | None: + """Make free type variables generic in the type if possible. + + This will translate the type `tp` while trying to create valid bindings for + type variables `poly_tvars` while traversing the type. This follows the same rules + as we do during semantic analysis phase, examples: + * Callable[Callable[[T], T], T] -> def [T] (def (T) -> T) -> T + * Callable[[], Callable[[T], T]] -> def () -> def [T] (T -> T) + * List[T] -> None (not possible) + """ + try: + return tp.copy_modified( + arg_types=[t.accept(PolyTranslator(poly_tvars)) for t in tp.arg_types], + ret_type=tp.ret_type.accept(PolyTranslator(poly_tvars)), + variables=[], + ) + except PolyTranslationError: + return None + + +class PolyTranslationError(Exception): + pass + + +class PolyTranslator(TypeTranslator): + """Make free type variables generic in the type if possible. + + See docstring for apply_poly() for details. + """ + + def __init__(self, poly_tvars: Sequence[TypeVarLikeType]) -> None: + self.poly_tvars = set(poly_tvars) + # This is a simplified version of TypeVarScope used during semantic analysis. + self.bound_tvars: set[TypeVarLikeType] = set() + self.seen_aliases: set[TypeInfo] = set() + + def collect_vars(self, t: CallableType | Parameters) -> list[TypeVarLikeType]: + found_vars = [] + for arg in t.arg_types: + for tv in get_all_type_vars(arg): + if isinstance(tv, ParamSpecType): + normalized: TypeVarLikeType = tv.copy_modified( + flavor=ParamSpecFlavor.BARE, prefix=Parameters([], [], []) + ) + else: + normalized = tv + if normalized in self.poly_tvars and normalized not in self.bound_tvars: + found_vars.append(normalized) + return remove_dups(found_vars) + + def visit_callable_type(self, t: CallableType) -> Type: + found_vars = self.collect_vars(t) + self.bound_tvars |= set(found_vars) + result = super().visit_callable_type(t) + self.bound_tvars -= set(found_vars) + + assert isinstance(result, ProperType) and isinstance(result, CallableType) + result.variables = list(result.variables) + found_vars + return result + + def visit_type_var(self, t: TypeVarType) -> Type: + if t in self.poly_tvars and t not in self.bound_tvars: + raise PolyTranslationError() + return super().visit_type_var(t) + + def visit_param_spec(self, t: ParamSpecType) -> Type: + if t in self.poly_tvars and t not in self.bound_tvars: + raise PolyTranslationError() + return super().visit_param_spec(t) + + def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type: + if t in self.poly_tvars and t not in self.bound_tvars: + raise PolyTranslationError() + return super().visit_type_var_tuple(t) + + def visit_type_alias_type(self, t: TypeAliasType) -> Type: + if not t.args: + return t.copy_modified() + if not t.is_recursive: + return get_proper_type(t).accept(self) + # We can't handle polymorphic application for recursive generic aliases + # without risking an infinite recursion, just give up for now. + raise PolyTranslationError() + + def visit_instance(self, t: Instance) -> Type: + if t.type.has_param_spec_type: + # We need this special-casing to preserve the possibility to store a + # generic function in an instance type. Things like + # forall T . Foo[[x: T], T] + # are not really expressible in current type system, but this looks like + # a useful feature, so let's keep it. + param_spec_index = next( + i for (i, tv) in enumerate(t.type.defn.type_vars) if isinstance(tv, ParamSpecType) + ) + p = get_proper_type(t.args[param_spec_index]) + if isinstance(p, Parameters): + found_vars = self.collect_vars(p) + self.bound_tvars |= set(found_vars) + new_args = [a.accept(self) for a in t.args] + self.bound_tvars -= set(found_vars) + + repl = new_args[param_spec_index] + assert isinstance(repl, ProperType) and isinstance(repl, Parameters) + repl.variables = list(repl.variables) + list(found_vars) + return t.copy_modified(args=new_args) + # There is the same problem with callback protocols as with aliases + # (callback protocols are essentially more flexible aliases to callables). + if t.args and t.type.is_protocol and t.type.protocol_members == ["__call__"]: + if t.type in self.seen_aliases: + raise PolyTranslationError() + self.seen_aliases.add(t.type) + call = find_member("__call__", t, t, is_operator=True) + assert call is not None + return call.accept(self) + return super().visit_instance(t) + + class ArgInferSecondPassQuery(types.BoolTypeQuery): """Query whether an argument type should be inferred in the second pass. @@ -5396,6 +5955,7 @@ def __init__(self) -> None: super().__init__(types.ANY_STRATEGY) def visit_callable_type(self, t: CallableType) -> bool: + # TODO: we need to check only for type variables of original callable. return self.query_types(t.arg_types) or t.accept(HasTypeVarQuery()) @@ -5408,6 +5968,12 @@ def __init__(self) -> None: def visit_type_var(self, t: TypeVarType) -> bool: return True + def visit_param_spec(self, t: ParamSpecType) -> bool: + return True + + def visit_type_var_tuple(self, t: TypeVarTupleType) -> bool: + return True + def has_erased_component(t: Type | None) -> bool: return t is not None and t.accept(HasErasedComponentsQuery()) diff --git a/mypy/checkmember.py b/mypy/checkmember.py index c2c6b3555805..1bdc00a6eb59 100644 --- a/mypy/checkmember.py +++ b/mypy/checkmember.py @@ -317,13 +317,19 @@ def analyze_instance_member_access( return analyze_var(name, first_item.var, typ, info, mx) if mx.is_lvalue: mx.msg.cant_assign_to_method(mx.context) - signature = function_type(method, mx.named_type("builtins.function")) - signature = freshen_all_functions_type_vars(signature) - if name == "__new__" or method.is_static: - # __new__ is special and behaves like a static method -- don't strip - # the first argument. - pass + if not isinstance(method, OverloadedFuncDef): + signature = function_type(method, mx.named_type("builtins.function")) else: + if method.type is None: + # Overloads may be not ready if they are decorated. Handle this in same + # manner as we would handle a regular decorated function: defer if possible. + if not mx.no_deferral and method.items: + mx.not_ready_callback(method.name, mx.context) + return AnyType(TypeOfAny.special_form) + assert isinstance(method.type, Overloaded) + signature = method.type + signature = freshen_all_functions_type_vars(signature) + if not method.is_static: if name != "__call__": # TODO: use proper treatment of special methods on unions instead # of this hack here and below (i.e. mx.self_type). @@ -388,7 +394,7 @@ def analyze_type_callable_member_access(name: str, typ: FunctionLike, mx: Member # See https://github.com/python/mypy/pull/1787 for more info. # TODO: do not rely on same type variables being present in all constructor overloads. result = analyze_class_attribute_access( - ret_type, name, mx, original_vars=typ.items[0].variables + ret_type, name, mx, original_vars=typ.items[0].variables, mcs_fallback=typ.fallback ) if result: return result @@ -434,17 +440,21 @@ def analyze_type_type_member_access( if isinstance(typ.item.item, Instance): item = typ.item.item.type.metaclass_type ignore_messages = False + + if item is not None: + fallback = item.type.metaclass_type or fallback + if item and not mx.is_operator: # See comment above for why operators are skipped - result = analyze_class_attribute_access(item, name, mx, override_info) + result = analyze_class_attribute_access( + item, name, mx, mcs_fallback=fallback, override_info=override_info + ) if result: if not (isinstance(get_proper_type(result), AnyType) and item.type.fallback_to_any): return result else: # We don't want errors on metaclass lookup for classes with Any fallback ignore_messages = True - if item is not None: - fallback = item.type.metaclass_type or fallback with mx.msg.filter_errors(filter_errors=ignore_messages): return _analyze_member_access(name, fallback, mx, override_info) @@ -731,12 +741,12 @@ def analyze_var( """Analyze access to an attribute via a Var node. This is conceptually part of analyze_member_access and the arguments are similar. - - itype is the class object in which var is defined + itype is the instance type in which attribute should be looked up original_type is the type of E in the expression E.var if implicit is True, the original Var was created as an assignment to self """ # Found a member variable. + original_itype = itype itype = map_instance_to_supertype(itype, var.info) typ = var.type if typ: @@ -752,6 +762,16 @@ def analyze_var( get_proper_type(mx.original_type) ): t = expand_self_type(var, t, mx.original_type) + elif ( + mx.is_self + and original_itype.type != var.info + # If an attribute with Self-type was defined in a supertype, we need to + # rebind the Self type variable to Self type variable of current class... + and original_itype.type.self_type is not None + # ...unless `self` has an explicit non-trivial annotation. + and original_itype == mx.chk.scope.active_self_type() + ): + t = expand_self_type(var, t, original_itype.type.self_type) t = get_proper_type(expand_type_by_instance(t, itype)) freeze_all_type_vars(t) result: Type = t @@ -893,6 +913,8 @@ def analyze_class_attribute_access( itype: Instance, name: str, mx: MemberContext, + *, + mcs_fallback: Instance, override_info: TypeInfo | None = None, original_vars: Sequence[TypeVarLikeType] | None = None, ) -> Type | None: @@ -919,6 +941,22 @@ def analyze_class_attribute_access( return apply_class_attr_hook(mx, hook, AnyType(TypeOfAny.special_form)) return None + if ( + isinstance(node.node, Var) + and not node.node.is_classvar + and not hook + and mcs_fallback.type.get(name) + ): + # If the same attribute is declared on the metaclass and the class but with different types, + # and the attribute on the class is not a ClassVar, + # the type of the attribute on the metaclass should take priority + # over the type of the attribute on the class, + # when the attribute is being accessed from the class object itself. + # + # Return `None` here to signify that the name should be looked up + # on the class object itself rather than the instance. + return None + is_decorated = isinstance(node.node, Decorator) is_method = is_decorated or isinstance(node.node, FuncBase) if mx.is_lvalue: @@ -1160,12 +1198,12 @@ class B(A[str]): pass # (i.e. appear in the return type of the class object on which the method was accessed). if isinstance(t, CallableType): tvars = original_vars if original_vars is not None else [] + t = freshen_all_functions_type_vars(t) if is_classmethod: - t = freshen_all_functions_type_vars(t) t = bind_self(t, original_type, is_classmethod=True) assert isuper is not None t = expand_type_by_instance(t, isuper) - freeze_all_type_vars(t) + freeze_all_type_vars(t) return t.copy_modified(variables=list(tvars) + list(t.variables)) elif isinstance(t, Overloaded): return Overloaded( diff --git a/mypy/checkpattern.py b/mypy/checkpattern.py index e132a23ff55f..e432675b0b5a 100644 --- a/mypy/checkpattern.py +++ b/mypy/checkpattern.py @@ -3,8 +3,7 @@ from __future__ import annotations from collections import defaultdict -from typing import NamedTuple -from typing_extensions import Final +from typing import Final, NamedTuple import mypy.checker from mypy import message_registry @@ -468,7 +467,7 @@ def visit_class_pattern(self, o: ClassPattern) -> PatternType: name = type_info.type.str_with_options(self.options) else: name = type_info.name - self.msg.fail(message_registry.CLASS_PATTERN_TYPE_REQUIRED.format(name), o.class_ref) + self.msg.fail(message_registry.CLASS_PATTERN_TYPE_REQUIRED.format(name), o) return self.early_non_match() new_type, rest_type = self.chk.conditional_types_with_intersection( diff --git a/mypy/checkstrformat.py b/mypy/checkstrformat.py index 974985d8b4fc..eeb9e7633756 100644 --- a/mypy/checkstrformat.py +++ b/mypy/checkstrformat.py @@ -13,8 +13,8 @@ from __future__ import annotations import re -from typing import TYPE_CHECKING, Callable, Dict, Match, Pattern, Tuple, Union, cast -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import TYPE_CHECKING, Callable, Dict, Final, Match, Pattern, Tuple, Union, cast +from typing_extensions import TypeAlias as _TypeAlias import mypy.errorcodes as codes from mypy.errors import Errors @@ -682,14 +682,6 @@ def check_str_interpolation(self, expr: FormatStringExpr, replacements: Expressi self.exprchk.accept(expr) specifiers = parse_conversion_specifiers(expr.value) has_mapping_keys = self.analyze_conversion_specifiers(specifiers, expr) - if isinstance(expr, BytesExpr) and self.chk.options.python_version < (3, 5): - self.msg.fail( - "Bytes formatting is only supported in Python 3.5 and later", - replacements, - code=codes.STRING_FORMATTING, - ) - return AnyType(TypeOfAny.from_error) - if has_mapping_keys is None: pass # Error was reported elif has_mapping_keys: @@ -1023,13 +1015,6 @@ def conversion_type( NUMERIC_TYPES = NUMERIC_TYPES_NEW if format_call else NUMERIC_TYPES_OLD INT_TYPES = REQUIRE_INT_NEW if format_call else REQUIRE_INT_OLD if p == "b" and not format_call: - if self.chk.options.python_version < (3, 5): - self.msg.fail( - 'Format character "b" is only supported in Python 3.5 and later', - context, - code=codes.STRING_FORMATTING, - ) - return None if not isinstance(expr, BytesExpr): self.msg.fail( 'Format character "b" is only supported on bytes patterns', diff --git a/mypy/config_parser.py b/mypy/config_parser.py index 05af2ba6e21e..a84f3594a0d2 100644 --- a/mypy/config_parser.py +++ b/mypy/config_parser.py @@ -19,6 +19,7 @@ Any, Callable, Dict, + Final, Iterable, List, Mapping, @@ -28,7 +29,7 @@ Tuple, Union, ) -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing_extensions import TypeAlias as _TypeAlias from mypy import defaults from mypy.options import PER_MODULE_OPTIONS, Options @@ -80,6 +81,20 @@ def validate_codes(codes: list[str]) -> list[str]: return codes +def validate_package_allow_list(allow_list: list[str]) -> list[str]: + for p in allow_list: + msg = f"Invalid allow list entry: {p}" + if "*" in p: + raise argparse.ArgumentTypeError( + f"{msg} (entries are already prefixes so must not contain *)" + ) + if "\\" in p or "/" in p: + raise argparse.ArgumentTypeError( + f"{msg} (entries must be packages like foo.bar not directories or files)" + ) + return allow_list + + def expand_path(path: str) -> str: """Expand the user home directory and any environment variables contained within the provided path. @@ -163,6 +178,9 @@ def split_commas(value: str) -> list[str]: "plugins": lambda s: [p.strip() for p in split_commas(s)], "always_true": lambda s: [p.strip() for p in split_commas(s)], "always_false": lambda s: [p.strip() for p in split_commas(s)], + "untyped_calls_exclude": lambda s: validate_package_allow_list( + [p.strip() for p in split_commas(s)] + ), "enable_incomplete_feature": lambda s: [p.strip() for p in split_commas(s)], "disable_error_code": lambda s: validate_codes([p.strip() for p in split_commas(s)]), "enable_error_code": lambda s: validate_codes([p.strip() for p in split_commas(s)]), @@ -186,6 +204,7 @@ def split_commas(value: str) -> list[str]: "plugins": try_split, "always_true": try_split, "always_false": try_split, + "untyped_calls_exclude": lambda s: validate_package_allow_list(try_split(s)), "enable_incomplete_feature": try_split, "disable_error_code": lambda s: validate_codes(try_split(s)), "enable_error_code": lambda s: validate_codes(try_split(s)), diff --git a/mypy/constant_fold.py b/mypy/constant_fold.py index a1011397eba8..4582b2a7396d 100644 --- a/mypy/constant_fold.py +++ b/mypy/constant_fold.py @@ -5,14 +5,23 @@ from __future__ import annotations -from typing import Union -from typing_extensions import Final - -from mypy.nodes import Expression, FloatExpr, IntExpr, NameExpr, OpExpr, StrExpr, UnaryExpr, Var +from typing import Final, Union + +from mypy.nodes import ( + ComplexExpr, + Expression, + FloatExpr, + IntExpr, + NameExpr, + OpExpr, + StrExpr, + UnaryExpr, + Var, +) # All possible result types of constant folding -ConstantValue = Union[int, bool, float, str] -CONST_TYPES: Final = (int, bool, float, str) +ConstantValue = Union[int, bool, float, complex, str] +CONST_TYPES: Final = (int, bool, float, complex, str) def constant_fold_expr(expr: Expression, cur_mod_id: str) -> ConstantValue | None: @@ -39,6 +48,8 @@ def constant_fold_expr(expr: Expression, cur_mod_id: str) -> ConstantValue | Non return expr.value if isinstance(expr, FloatExpr): return expr.value + if isinstance(expr, ComplexExpr): + return expr.value elif isinstance(expr, NameExpr): if expr.name == "True": return True @@ -56,26 +67,60 @@ def constant_fold_expr(expr: Expression, cur_mod_id: str) -> ConstantValue | Non elif isinstance(expr, OpExpr): left = constant_fold_expr(expr.left, cur_mod_id) right = constant_fold_expr(expr.right, cur_mod_id) - if isinstance(left, int) and isinstance(right, int): - return constant_fold_binary_int_op(expr.op, left, right) - elif isinstance(left, str) and isinstance(right, str): - return constant_fold_binary_str_op(expr.op, left, right) + if left is not None and right is not None: + return constant_fold_binary_op(expr.op, left, right) elif isinstance(expr, UnaryExpr): value = constant_fold_expr(expr.expr, cur_mod_id) - if isinstance(value, int): - return constant_fold_unary_int_op(expr.op, value) - if isinstance(value, float): - return constant_fold_unary_float_op(expr.op, value) + if value is not None: + return constant_fold_unary_op(expr.op, value) return None -def constant_fold_binary_int_op(op: str, left: int, right: int) -> int | None: +def constant_fold_binary_op( + op: str, left: ConstantValue, right: ConstantValue +) -> ConstantValue | None: + if isinstance(left, int) and isinstance(right, int): + return constant_fold_binary_int_op(op, left, right) + + # Float and mixed int/float arithmetic. + if isinstance(left, float) and isinstance(right, float): + return constant_fold_binary_float_op(op, left, right) + elif isinstance(left, float) and isinstance(right, int): + return constant_fold_binary_float_op(op, left, right) + elif isinstance(left, int) and isinstance(right, float): + return constant_fold_binary_float_op(op, left, right) + + # String concatenation and multiplication. + if op == "+" and isinstance(left, str) and isinstance(right, str): + return left + right + elif op == "*" and isinstance(left, str) and isinstance(right, int): + return left * right + elif op == "*" and isinstance(left, int) and isinstance(right, str): + return left * right + + # Complex construction. + if op == "+" and isinstance(left, (int, float)) and isinstance(right, complex): + return left + right + elif op == "+" and isinstance(left, complex) and isinstance(right, (int, float)): + return left + right + elif op == "-" and isinstance(left, (int, float)) and isinstance(right, complex): + return left - right + elif op == "-" and isinstance(left, complex) and isinstance(right, (int, float)): + return left - right + + return None + + +def constant_fold_binary_int_op(op: str, left: int, right: int) -> int | float | None: if op == "+": return left + right if op == "-": return left - right elif op == "*": return left * right + elif op == "/": + if right != 0: + return left / right elif op == "//": if right != 0: return left // right @@ -102,25 +147,41 @@ def constant_fold_binary_int_op(op: str, left: int, right: int) -> int | None: return None -def constant_fold_unary_int_op(op: str, value: int) -> int | None: - if op == "-": - return -value - elif op == "~": - return ~value - elif op == "+": - return value +def constant_fold_binary_float_op(op: str, left: int | float, right: int | float) -> float | None: + assert not (isinstance(left, int) and isinstance(right, int)), (op, left, right) + if op == "+": + return left + right + elif op == "-": + return left - right + elif op == "*": + return left * right + elif op == "/": + if right != 0: + return left / right + elif op == "//": + if right != 0: + return left // right + elif op == "%": + if right != 0: + return left % right + elif op == "**": + if (left < 0 and isinstance(right, int)) or left > 0: + try: + ret = left**right + except OverflowError: + return None + else: + assert isinstance(ret, float), ret + return ret + return None -def constant_fold_unary_float_op(op: str, value: float) -> float | None: - if op == "-": +def constant_fold_unary_op(op: str, value: ConstantValue) -> int | float | None: + if op == "-" and isinstance(value, (int, float)): return -value - elif op == "+": + elif op == "~" and isinstance(value, int): + return ~value + elif op == "+" and isinstance(value, (int, float)): return value return None - - -def constant_fold_binary_str_op(op: str, left: str, right: str) -> str | None: - if op == "+": - return left + right - return None diff --git a/mypy/constraints.py b/mypy/constraints.py index 33230871b505..0e59b5459fd4 100644 --- a/mypy/constraints.py +++ b/mypy/constraints.py @@ -2,15 +2,23 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Iterable, List, Sequence, cast -from typing_extensions import Final +from typing import TYPE_CHECKING, Final, Iterable, List, Sequence, cast import mypy.subtypes import mypy.typeops from mypy.argmap import ArgTypeExpander from mypy.erasetype import erase_typevars from mypy.maptype import map_instance_to_supertype -from mypy.nodes import ARG_OPT, ARG_POS, CONTRAVARIANT, COVARIANT, ArgKind +from mypy.nodes import ( + ARG_OPT, + ARG_POS, + ARG_STAR, + ARG_STAR2, + CONTRAVARIANT, + COVARIANT, + ArgKind, + TypeInfo, +) from mypy.types import ( TUPLE_LIKE_INSTANCE_NAMES, AnyType, @@ -41,7 +49,7 @@ UninhabitedType, UnionType, UnpackType, - callable_with_ellipsis, + find_unpack_in_list, get_proper_type, has_recursive_types, has_type_vars, @@ -50,7 +58,7 @@ ) from mypy.types_utils import is_union_with_any from mypy.typestate import type_state -from mypy.typevartuples import extract_unpack, find_unpack_in_list, split_with_mapped_and_template +from mypy.typevartuples import extract_unpack, split_with_mapped_and_template if TYPE_CHECKING: from mypy.infer import ArgumentInferContext @@ -72,8 +80,14 @@ class Constraint: def __init__(self, type_var: TypeVarLikeType, op: int, target: Type) -> None: self.type_var = type_var.id self.op = op + # TODO: should we add "assert not isinstance(target, UnpackType)"? + # UnpackType is a synthetic type, and is never valid as a constraint target. self.target = target self.origin_type_var = type_var + # These are additional type variables that should be solved for together with type_var. + # TODO: A cleaner solution may be to modify the return type of infer_constraints() + # to include these instead, but this is a rather big refactoring. + self.extra_tvars: list[TypeVarLikeType] = [] def __repr__(self) -> str: op_str = "<:" @@ -94,6 +108,7 @@ def infer_constraints_for_callable( callee: CallableType, arg_types: Sequence[Type | None], arg_kinds: list[ArgKind], + arg_names: Sequence[str | None] | None, formal_to_actual: list[list[int]], context: ArgumentInferContext, ) -> list[Constraint]: @@ -104,6 +119,20 @@ def infer_constraints_for_callable( constraints: list[Constraint] = [] mapper = ArgTypeExpander(context) + param_spec = callee.param_spec() + param_spec_arg_types = [] + param_spec_arg_names = [] + param_spec_arg_kinds = [] + + incomplete_star_mapping = False + for i, actuals in enumerate(formal_to_actual): + for actual in actuals: + if actual is None and callee.arg_kinds[i] in (ARG_STAR, ARG_STAR2): + # We can't use arguments to infer ParamSpec constraint, if only some + # are present in the current inference pass. + incomplete_star_mapping = True + break + for i, actuals in enumerate(formal_to_actual): if isinstance(callee.arg_types[i], UnpackType): unpack_type = callee.arg_types[i] @@ -142,16 +171,33 @@ def infer_constraints_for_callable( # not to hold we can always handle the prefixes too. inner_unpack = unpacked_type.items[0] assert isinstance(inner_unpack, UnpackType) - inner_unpacked_type = inner_unpack.type - assert isinstance(inner_unpacked_type, TypeVarTupleType) + inner_unpacked_type = get_proper_type(inner_unpack.type) suffix_len = len(unpacked_type.items) - 1 - constraints.append( - Constraint( - inner_unpacked_type, - SUPERTYPE_OF, - TupleType(actual_types[:-suffix_len], inner_unpacked_type.tuple_fallback), + if isinstance(inner_unpacked_type, TypeVarTupleType): + # Variadic item can be either *Ts... + constraints.append( + Constraint( + inner_unpacked_type, + SUPERTYPE_OF, + TupleType( + actual_types[:-suffix_len], inner_unpacked_type.tuple_fallback + ), + ) ) - ) + else: + # ...or it can be a homogeneous tuple. + assert ( + isinstance(inner_unpacked_type, Instance) + and inner_unpacked_type.type.fullname == "builtins.tuple" + ) + for at in actual_types[:-suffix_len]: + constraints.extend( + infer_constraints(inner_unpacked_type.args[0], at, SUPERTYPE_OF) + ) + # Now handle the suffix (if any). + if suffix_len: + for tt, at in zip(unpacked_type.items[1:], actual_types[-suffix_len:]): + constraints.extend(infer_constraints(tt, at, SUPERTYPE_OF)) else: assert False, "mypy bug: unhandled constraint inference case" else: @@ -163,13 +209,53 @@ def infer_constraints_for_callable( actual_type = mapper.expand_actual_type( actual_arg_type, arg_kinds[actual], callee.arg_names[i], callee.arg_kinds[i] ) - c = infer_constraints(callee.arg_types[i], actual_type, SUPERTYPE_OF) - constraints.extend(c) - + if ( + param_spec + and callee.arg_kinds[i] in (ARG_STAR, ARG_STAR2) + and not incomplete_star_mapping + ): + # If actual arguments are mapped to ParamSpec type, we can't infer individual + # constraints, instead store them and infer single constraint at the end. + # It is impossible to map actual kind to formal kind, so use some heuristic. + # This inference is used as a fallback, so relying on heuristic should be OK. + param_spec_arg_types.append( + mapper.expand_actual_type( + actual_arg_type, arg_kinds[actual], None, arg_kinds[actual] + ) + ) + actual_kind = arg_kinds[actual] + param_spec_arg_kinds.append( + ARG_POS if actual_kind not in (ARG_STAR, ARG_STAR2) else actual_kind + ) + param_spec_arg_names.append(arg_names[actual] if arg_names else None) + else: + c = infer_constraints(callee.arg_types[i], actual_type, SUPERTYPE_OF) + constraints.extend(c) + if ( + param_spec + and not any(c.type_var == param_spec.id for c in constraints) + and not incomplete_star_mapping + ): + # Use ParamSpec constraint from arguments only if there are no other constraints, + # since as explained above it is quite ad-hoc. + constraints.append( + Constraint( + param_spec, + SUPERTYPE_OF, + Parameters( + arg_types=param_spec_arg_types, + arg_kinds=param_spec_arg_kinds, + arg_names=param_spec_arg_names, + imprecise_arg_kinds=True, + ), + ) + ) return constraints -def infer_constraints(template: Type, actual: Type, direction: int) -> list[Constraint]: +def infer_constraints( + template: Type, actual: Type, direction: int, skip_neg_op: bool = False +) -> list[Constraint]: """Infer type constraints. Match a template type, which may contain type variable references, @@ -188,7 +274,9 @@ def infer_constraints(template: Type, actual: Type, direction: int) -> list[Cons ((T, S), (X, Y)) --> T :> X and S :> Y (X[T], Any) --> T <: Any and T :> Any - The constraints are represented as Constraint objects. + The constraints are represented as Constraint objects. If skip_neg_op == True, + then skip adding reverse (polymorphic) constraints (since this is already a call + to infer such constraints). """ if any( get_proper_type(template) == get_proper_type(t) @@ -203,13 +291,15 @@ def infer_constraints(template: Type, actual: Type, direction: int) -> list[Cons # Return early on an empty branch. return [] type_state.inferring.append((template, actual)) - res = _infer_constraints(template, actual, direction) + res = _infer_constraints(template, actual, direction, skip_neg_op) type_state.inferring.pop() return res - return _infer_constraints(template, actual, direction) + return _infer_constraints(template, actual, direction, skip_neg_op) -def _infer_constraints(template: Type, actual: Type, direction: int) -> list[Constraint]: +def _infer_constraints( + template: Type, actual: Type, direction: int, skip_neg_op: bool +) -> list[Constraint]: orig_template = template template = get_proper_type(template) actual = get_proper_type(actual) @@ -285,7 +375,7 @@ def _infer_constraints(template: Type, actual: Type, direction: int) -> list[Con return [] # Remaining cases are handled by ConstraintBuilderVisitor. - return template.accept(ConstraintBuilderVisitor(actual, direction)) + return template.accept(ConstraintBuilderVisitor(actual, direction, skip_neg_op)) def infer_constraints_if_possible( @@ -511,10 +601,14 @@ class ConstraintBuilderVisitor(TypeVisitor[List[Constraint]]): # TODO: The value may be None. Is that actually correct? actual: ProperType - def __init__(self, actual: ProperType, direction: int) -> None: + def __init__(self, actual: ProperType, direction: int, skip_neg_op: bool) -> None: # Direction must be SUBTYPE_OF or SUPERTYPE_OF. self.actual = actual self.direction = direction + # Whether to skip polymorphic inference (involves inference in opposite direction) + # this is used to prevent infinite recursion when both template and actual are + # generic callables. + self.skip_neg_op = skip_neg_op # Trivial leaf types @@ -564,10 +658,17 @@ def visit_unpack_type(self, template: UnpackType) -> list[Constraint]: raise RuntimeError("Mypy bug: unpack should be handled at a higher level.") def visit_parameters(self, template: Parameters) -> list[Constraint]: - # constraining Any against C[P] turns into infer_against_any([P], Any) - # ... which seems like the only case this can happen. Better to fail loudly. + # Constraining Any against C[P] turns into infer_against_any([P], Any) + # ... which seems like the only case this can happen. Better to fail loudly otherwise. if isinstance(self.actual, AnyType): return self.infer_against_any(template.arg_types, self.actual) + if type_state.infer_polymorphic and isinstance(self.actual, Parameters): + # For polymorphic inference we need to be able to infer secondary constraints + # in situations like [x: T] <: P <: [x: int]. Note we invert direction, since + # this function expects direction between callables. + return infer_callable_arguments_constraints( + template, self.actual, neg_op(self.direction) + ) raise RuntimeError("Parameters cannot be constrained to") # Non-leaf types @@ -649,13 +750,13 @@ def visit_instance(self, template: Instance) -> list[Constraint]: assert mapped.type.type_var_tuple_prefix is not None assert mapped.type.type_var_tuple_suffix is not None - unpack_constraints, mapped_args, instance_args = build_constraints_for_unpack( - mapped.args, - mapped.type.type_var_tuple_prefix, - mapped.type.type_var_tuple_suffix, + unpack_constraints, instance_args, mapped_args = build_constraints_for_unpack( instance.args, instance.type.type_var_tuple_prefix, instance.type.type_var_tuple_suffix, + mapped.args, + mapped.type.type_var_tuple_prefix, + mapped.type.type_var_tuple_suffix, self.direction, ) res.extend(unpack_constraints) @@ -673,7 +774,6 @@ def visit_instance(self, template: Instance) -> list[Constraint]: # N.B: We use zip instead of indexing because the lengths might have # mismatches during daemon reprocessing. for tvar, mapped_arg, instance_arg in zip(tvars, mapped_args, instance_args): - # TODO(PEP612): More ParamSpec work (or is Parameters the only thing accepted) if isinstance(tvar, TypeVarType): # The constraints for generic type parameters depend on variance. # Include constraints from both directions if invariant. @@ -684,25 +784,27 @@ def visit_instance(self, template: Instance) -> list[Constraint]: infer_constraints(mapped_arg, instance_arg, neg_op(self.direction)) ) elif isinstance(tvar, ParamSpecType) and isinstance(mapped_arg, ParamSpecType): - suffix = get_proper_type(instance_arg) - - if isinstance(suffix, CallableType): - prefix = mapped_arg.prefix - from_concat = bool(prefix.arg_types) or suffix.from_concatenate - suffix = suffix.copy_modified(from_concatenate=from_concat) - - if isinstance(suffix, (Parameters, CallableType)): - # no such thing as variance for ParamSpecs - # TODO: is there a case I am missing? - # TODO: constraints between prefixes - prefix = mapped_arg.prefix - suffix = suffix.copy_modified( - suffix.arg_types[len(prefix.arg_types) :], - suffix.arg_kinds[len(prefix.arg_kinds) :], - suffix.arg_names[len(prefix.arg_names) :], + prefix = mapped_arg.prefix + if isinstance(instance_arg, Parameters): + # No such thing as variance for ParamSpecs, consider them invariant + # TODO: constraints between prefixes using + # infer_callable_arguments_constraints() + suffix: Type = instance_arg.copy_modified( + instance_arg.arg_types[len(prefix.arg_types) :], + instance_arg.arg_kinds[len(prefix.arg_kinds) :], + instance_arg.arg_names[len(prefix.arg_names) :], ) + res.append(Constraint(mapped_arg, SUBTYPE_OF, suffix)) res.append(Constraint(mapped_arg, SUPERTYPE_OF, suffix)) - elif isinstance(suffix, ParamSpecType): + elif isinstance(instance_arg, ParamSpecType): + suffix = instance_arg.copy_modified( + prefix=Parameters( + instance_arg.prefix.arg_types[len(prefix.arg_types) :], + instance_arg.prefix.arg_kinds[len(prefix.arg_kinds) :], + instance_arg.prefix.arg_names[len(prefix.arg_names) :], + ) + ) + res.append(Constraint(mapped_arg, SUBTYPE_OF, suffix)) res.append(Constraint(mapped_arg, SUPERTYPE_OF, suffix)) else: # This case should have been handled above. @@ -754,26 +856,27 @@ def visit_instance(self, template: Instance) -> list[Constraint]: elif isinstance(tvar, ParamSpecType) and isinstance( template_arg, ParamSpecType ): - suffix = get_proper_type(mapped_arg) - - if isinstance(suffix, CallableType): - prefix = template_arg.prefix - from_concat = bool(prefix.arg_types) or suffix.from_concatenate - suffix = suffix.copy_modified(from_concatenate=from_concat) - - if isinstance(suffix, (Parameters, CallableType)): - # no such thing as variance for ParamSpecs - # TODO: is there a case I am missing? - # TODO: constraints between prefixes - prefix = template_arg.prefix - - suffix = suffix.copy_modified( - suffix.arg_types[len(prefix.arg_types) :], - suffix.arg_kinds[len(prefix.arg_kinds) :], - suffix.arg_names[len(prefix.arg_names) :], + prefix = template_arg.prefix + if isinstance(mapped_arg, Parameters): + # No such thing as variance for ParamSpecs, consider them invariant + # TODO: constraints between prefixes using + # infer_callable_arguments_constraints() + suffix = mapped_arg.copy_modified( + mapped_arg.arg_types[len(prefix.arg_types) :], + mapped_arg.arg_kinds[len(prefix.arg_kinds) :], + mapped_arg.arg_names[len(prefix.arg_names) :], ) + res.append(Constraint(template_arg, SUBTYPE_OF, suffix)) res.append(Constraint(template_arg, SUPERTYPE_OF, suffix)) - elif isinstance(suffix, ParamSpecType): + elif isinstance(mapped_arg, ParamSpecType): + suffix = mapped_arg.copy_modified( + prefix=Parameters( + mapped_arg.prefix.arg_types[len(prefix.arg_types) :], + mapped_arg.prefix.arg_kinds[len(prefix.arg_kinds) :], + mapped_arg.prefix.arg_names[len(prefix.arg_names) :], + ) + ) + res.append(Constraint(template_arg, SUBTYPE_OF, suffix)) res.append(Constraint(template_arg, SUPERTYPE_OF, suffix)) else: # This case should have been handled above. @@ -829,13 +932,23 @@ def visit_instance(self, template: Instance) -> list[Constraint]: and self.direction == SUPERTYPE_OF ): for item in actual.items: + if isinstance(item, UnpackType): + unpacked = get_proper_type(item.type) + if isinstance(unpacked, TypeVarType): + # Cannot infer anything for T from [T, ...] <: *Ts + continue + assert ( + isinstance(unpacked, Instance) + and unpacked.type.fullname == "builtins.tuple" + ) + item = unpacked.args[0] cb = infer_constraints(template.args[0], item, SUPERTYPE_OF) res.extend(cb) return res elif isinstance(actual, TupleType) and self.direction == SUPERTYPE_OF: return infer_constraints(template, mypy.typeops.tuple_fallback(actual), self.direction) elif isinstance(actual, TypeVarType): - if not actual.values: + if not actual.values and not actual.id.is_meta_var(): return infer_constraints(template, actual.upper_bound, self.direction) return [] elif isinstance(actual, ParamSpecType): @@ -879,83 +992,128 @@ def visit_callable_type(self, template: CallableType) -> list[Constraint]: # Normalize callables before matching against each other. # Note that non-normalized callables can be created in annotations # using e.g. callback protocols. + # TODO: check that callables match? Ideally we should not infer constraints + # callables that can never be subtypes of one another in given direction. template = template.with_unpacked_kwargs() + extra_tvars = False if isinstance(self.actual, CallableType): res: list[Constraint] = [] cactual = self.actual.with_unpacked_kwargs() param_spec = template.param_spec() + + template_ret_type, cactual_ret_type = template.ret_type, cactual.ret_type + if template.type_guard is not None: + template_ret_type = template.type_guard + if cactual.type_guard is not None: + cactual_ret_type = cactual.type_guard + res.extend(infer_constraints(template_ret_type, cactual_ret_type, self.direction)) + if param_spec is None: - # FIX verify argument counts - # FIX what if one of the functions is generic + # TODO: Erase template variables if it is generic? + if ( + type_state.infer_polymorphic + and cactual.variables + and not self.skip_neg_op + # Technically, the correct inferred type for application of e.g. + # Callable[..., T] -> Callable[..., T] (with literal ellipsis), to a generic + # like U -> U, should be Callable[..., Any], but if U is a self-type, we can + # allow it to leak, to be later bound to self. A bunch of existing code + # depends on this old behaviour. + and not any(tv.id.raw_id == 0 for tv in cactual.variables) + ): + # If the actual callable is generic, infer constraints in the opposite + # direction, and indicate to the solver there are extra type variables + # to solve for (see more details in mypy/solve.py). + res.extend( + infer_constraints( + cactual, template, neg_op(self.direction), skip_neg_op=True + ) + ) + extra_tvars = True # We can't infer constraints from arguments if the template is Callable[..., T] # (with literal '...'). if not template.is_ellipsis_args: - if find_unpack_in_list(template.arg_types) is not None: - ( - unpack_constraints, - cactual_args_t, - template_args_t, - ) = find_and_build_constraints_for_unpack( - tuple(cactual.arg_types), tuple(template.arg_types), self.direction + unpack_present = find_unpack_in_list(template.arg_types) + if unpack_present is not None: + # We need to re-normalize args to the form they appear in tuples, + # for callables we always pack the suffix inside another tuple. + unpack = template.arg_types[unpack_present] + assert isinstance(unpack, UnpackType) + tuple_type = get_tuple_fallback_from_unpack(unpack) + template_types = repack_callable_args(template, tuple_type) + actual_types = repack_callable_args(cactual, tuple_type) + # Now we can use the same general helper as for tuple types. + unpack_constraints = build_constraints_for_simple_unpack( + template_types, actual_types, neg_op(self.direction) ) - template_args = list(template_args_t) - cactual_args = list(cactual_args_t) res.extend(unpack_constraints) - assert len(template_args) == len(cactual_args) else: - template_args = template.arg_types - cactual_args = cactual.arg_types - # The lengths should match, but don't crash (it will error elsewhere). - for t, a in zip(template_args, cactual_args): # Negate direction due to function argument type contravariance. - res.extend(infer_constraints(t, a, neg_op(self.direction))) + res.extend( + infer_callable_arguments_constraints(template, cactual, self.direction) + ) else: - # sometimes, it appears we try to get constraints between two paramspec callables? - - # TODO: Direction - # TODO: check the prefixes match prefix = param_spec.prefix prefix_len = len(prefix.arg_types) cactual_ps = cactual.param_spec() - if not cactual_ps: - max_prefix_len = len([k for k in cactual.arg_kinds if k in (ARG_POS, ARG_OPT)]) - prefix_len = min(prefix_len, max_prefix_len) - res.append( - Constraint( - param_spec, - SUBTYPE_OF, - cactual.copy_modified( - arg_types=cactual.arg_types[prefix_len:], - arg_kinds=cactual.arg_kinds[prefix_len:], - arg_names=cactual.arg_names[prefix_len:], - ret_type=UninhabitedType(), - ), + if type_state.infer_polymorphic and cactual.variables and not self.skip_neg_op: + # Similar logic to the branch above. + res.extend( + infer_constraints( + cactual, template, neg_op(self.direction), skip_neg_op=True ) ) - else: - res.append(Constraint(param_spec, SUBTYPE_OF, cactual_ps)) + extra_tvars = True - # compare prefixes + # Compare prefixes as well cactual_prefix = cactual.copy_modified( arg_types=cactual.arg_types[:prefix_len], arg_kinds=cactual.arg_kinds[:prefix_len], arg_names=cactual.arg_names[:prefix_len], ) + res.extend( + infer_callable_arguments_constraints(prefix, cactual_prefix, self.direction) + ) - # TODO: see above "FIX" comments for param_spec is None case - # TODO: this assumes positional arguments - for t, a in zip(prefix.arg_types, cactual_prefix.arg_types): - res.extend(infer_constraints(t, a, neg_op(self.direction))) - - template_ret_type, cactual_ret_type = template.ret_type, cactual.ret_type - if template.type_guard is not None: - template_ret_type = template.type_guard - if cactual.type_guard is not None: - cactual_ret_type = cactual.type_guard - - res.extend(infer_constraints(template_ret_type, cactual_ret_type, self.direction)) + param_spec_target: Type | None = None + skip_imprecise = ( + any(c.type_var == param_spec.id for c in res) and cactual.imprecise_arg_kinds + ) + if not cactual_ps: + max_prefix_len = len([k for k in cactual.arg_kinds if k in (ARG_POS, ARG_OPT)]) + prefix_len = min(prefix_len, max_prefix_len) + # This logic matches top-level callable constraint exception, if we managed + # to get other constraints for ParamSpec, don't infer one with imprecise kinds + if not skip_imprecise: + param_spec_target = Parameters( + arg_types=cactual.arg_types[prefix_len:], + arg_kinds=cactual.arg_kinds[prefix_len:], + arg_names=cactual.arg_names[prefix_len:], + variables=cactual.variables + if not type_state.infer_polymorphic + else [], + imprecise_arg_kinds=cactual.imprecise_arg_kinds, + ) + else: + if ( + len(param_spec.prefix.arg_types) <= len(cactual_ps.prefix.arg_types) + and not skip_imprecise + ): + param_spec_target = cactual_ps.copy_modified( + prefix=Parameters( + arg_types=cactual_ps.prefix.arg_types[prefix_len:], + arg_kinds=cactual_ps.prefix.arg_kinds[prefix_len:], + arg_names=cactual_ps.prefix.arg_names[prefix_len:], + imprecise_arg_kinds=cactual_ps.prefix.imprecise_arg_kinds, + ) + ) + if param_spec_target is not None: + res.append(Constraint(param_spec, neg_op(self.direction), param_spec_target)) + if extra_tvars: + for c in res: + c.extra_tvars += cactual.variables return res elif isinstance(self.actual, AnyType): param_spec = template.param_spec() @@ -968,7 +1126,7 @@ def visit_callable_type(self, template: CallableType) -> list[Constraint]: Constraint( param_spec, SUBTYPE_OF, - callable_with_ellipsis(any_type, any_type, template.fallback), + Parameters([any_type, any_type], [ARG_STAR, ARG_STAR2], [None, None]), ) ] res.extend(infer_constraints(template.ret_type, any_type, self.direction)) @@ -1019,13 +1177,11 @@ def visit_tuple_type(self, template: TupleType) -> list[Constraint]: return [Constraint(type_var=unpacked_type, op=self.direction, target=actual)] else: assert isinstance(actual, TupleType) - ( - unpack_constraints, - actual_items, - template_items, - ) = find_and_build_constraints_for_unpack( - tuple(actual.items), tuple(template.items), self.direction + unpack_constraints = build_constraints_for_simple_unpack( + template.items, actual.items, self.direction ) + actual_items: tuple[Type, ...] = () + template_items: tuple[Type, ...] = () res.extend(unpack_constraints) elif isinstance(actual, TupleType): actual_items = tuple(actual.items) @@ -1158,31 +1314,138 @@ def find_matching_overload_items( return res -def find_and_build_constraints_for_unpack( - mapped: tuple[Type, ...], template: tuple[Type, ...], direction: int -) -> tuple[list[Constraint], tuple[Type, ...], tuple[Type, ...]]: - mapped_prefix_len = find_unpack_in_list(mapped) - if mapped_prefix_len is not None: - mapped_suffix_len: int | None = len(mapped) - mapped_prefix_len - 1 +def get_tuple_fallback_from_unpack(unpack: UnpackType) -> TypeInfo | None: + """Get builtins.tuple type from available types to construct homogeneous tuples.""" + tp = get_proper_type(unpack.type) + if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple": + return tp.type + if isinstance(tp, TypeVarTupleType): + return tp.tuple_fallback.type + if isinstance(tp, TupleType): + for base in tp.partial_fallback.type.mro: + if base.fullname == "builtins.tuple": + return base + return None + + +def repack_callable_args(callable: CallableType, tuple_type: TypeInfo | None) -> list[Type]: + """Present callable with star unpack in a normalized form. + + Since positional arguments cannot follow star argument, they are packed in a suffix, + while prefix is represented as individual positional args. We want to put all in a single + list with unpack in the middle, and prefix/suffix on the sides (as they would appear + in e.g. a TupleType). + """ + if ARG_STAR not in callable.arg_kinds: + return callable.arg_types + star_index = callable.arg_kinds.index(ARG_STAR) + arg_types = callable.arg_types[:star_index] + star_type = callable.arg_types[star_index] + suffix_types = [] + if not isinstance(star_type, UnpackType): + if tuple_type is not None: + # Re-normalize *args: X -> *args: *tuple[X, ...] + star_type = UnpackType(Instance(tuple_type, [star_type])) + else: + # This is unfortunate, something like tuple[Any, ...] would be better. + star_type = UnpackType(AnyType(TypeOfAny.from_error)) else: - mapped_suffix_len = None + tp = get_proper_type(star_type.type) + if isinstance(tp, TupleType): + assert isinstance(tp.items[0], UnpackType) + star_type = tp.items[0] + suffix_types = tp.items[1:] + return arg_types + [star_type] + suffix_types - template_prefix_len = find_unpack_in_list(template) - assert template_prefix_len is not None - template_suffix_len = len(template) - template_prefix_len - 1 - return build_constraints_for_unpack( - mapped, - mapped_prefix_len, - mapped_suffix_len, - template, - template_prefix_len, - template_suffix_len, - direction, +def build_constraints_for_simple_unpack( + template_args: list[Type], actual_args: list[Type], direction: int +) -> list[Constraint]: + """Infer constraints between two lists of types with variadic items. + + This function is only supposed to be called when a variadic item is present in templates. + If there is no variadic item the actuals, we simply use split_with_prefix_and_suffix() + and infer prefix <: prefix, suffix <: suffix, variadic <: middle. If there is a variadic + item in the actuals we need to be more careful, only common prefix/suffix can generate + constraints, also we can only infer constraints for variadic template item, if template + prefix/suffix are shorter that actual ones, otherwise there may be partial overlap + between variadic items, for example if template prefix is longer: + + templates: T1, T2, Ts, Ts, Ts, ... + actuals: A1, As, As, As, ... + + Note: this function can only be called for builtin variadic constructors: Tuple and Callable, + for Instances variance depends on position, and a much more complex function + build_constraints_for_unpack() should be used. + """ + template_unpack = find_unpack_in_list(template_args) + assert template_unpack is not None + template_prefix = template_unpack + template_suffix = len(template_args) - template_prefix - 1 + + t_unpack = None + res = [] + + actual_unpack = find_unpack_in_list(actual_args) + if actual_unpack is None: + t_unpack = template_args[template_unpack] + if template_prefix + template_suffix > len(actual_args): + # These can't be subtypes of each-other, return fast. + assert isinstance(t_unpack, UnpackType) + if isinstance(t_unpack.type, TypeVarTupleType): + # Set TypeVarTuple to empty to improve error messages. + return [ + Constraint( + t_unpack.type, direction, TupleType([], t_unpack.type.tuple_fallback) + ) + ] + else: + return [] + common_prefix = template_prefix + common_suffix = template_suffix + else: + actual_prefix = actual_unpack + actual_suffix = len(actual_args) - actual_prefix - 1 + common_prefix = min(template_prefix, actual_prefix) + common_suffix = min(template_suffix, actual_suffix) + if actual_prefix >= template_prefix and actual_suffix >= template_suffix: + # This is the only case where we can guarantee there will be no partial overlap. + t_unpack = template_args[template_unpack] + + # Handle constraints from prefixes/suffixes first. + start, middle, end = split_with_prefix_and_suffix( + tuple(actual_args), common_prefix, common_suffix ) + for t, a in zip(template_args[:common_prefix], start): + res.extend(infer_constraints(t, a, direction)) + if common_suffix: + for t, a in zip(template_args[-common_suffix:], end): + res.extend(infer_constraints(t, a, direction)) + + if t_unpack is not None: + # Add constraint(s) for variadic item when possible. + assert isinstance(t_unpack, UnpackType) + tp = get_proper_type(t_unpack.type) + if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple": + # Homogeneous case *tuple[T, ...] <: [X, Y, Z, ...]. + for a in middle: + # TODO: should we use union instead of join here? + if not isinstance(a, UnpackType): + res.extend(infer_constraints(tp.args[0], a, direction)) + else: + a_tp = get_proper_type(a.type) + # This is the case *tuple[T, ...] <: *tuple[A, ...]. + if isinstance(a_tp, Instance) and a_tp.type.fullname == "builtins.tuple": + res.extend(infer_constraints(tp.args[0], a_tp.args[0], direction)) + elif isinstance(tp, TypeVarTupleType): + res.append(Constraint(tp, direction, TupleType(list(middle), tp.tuple_fallback))) + return res def build_constraints_for_unpack( + # TODO: this naming is misleading, these should be "actual", not "mapped" + # both template and actual can be mapped before, depending on direction. + # Also the convention is to put template related args first. mapped: tuple[Type, ...], mapped_prefix_len: int | None, mapped_suffix_len: int | None, @@ -1191,6 +1454,10 @@ def build_constraints_for_unpack( template_suffix_len: int, direction: int, ) -> tuple[list[Constraint], tuple[Type, ...], tuple[Type, ...]]: + # TODO: this function looks broken: + # a) it should take into account variances, but it doesn't + # b) it looks like both call sites always pass identical values to args (2, 3) and (5, 6) + # because after map_instance_to_supertype() both template and actual have same TypeInfo. if mapped_prefix_len is None: mapped_prefix_len = template_prefix_len if mapped_suffix_len is None: @@ -1237,4 +1504,90 @@ def build_constraints_for_unpack( if len(template_unpack.items) == len(mapped_middle): for template_arg, item in zip(template_unpack.items, mapped_middle): res.extend(infer_constraints(template_arg, item, direction)) - return (res, mapped_prefix + mapped_suffix, template_prefix + template_suffix) + return res, mapped_prefix + mapped_suffix, template_prefix + template_suffix + + +def infer_directed_arg_constraints(left: Type, right: Type, direction: int) -> list[Constraint]: + """Infer constraints between two arguments using direction between original callables.""" + if isinstance(left, (ParamSpecType, UnpackType)) or isinstance( + right, (ParamSpecType, UnpackType) + ): + # This avoids bogus constraints like T <: P.args + # TODO: can we infer something useful for *T vs P? + return [] + if direction == SUBTYPE_OF: + # We invert direction to account for argument contravariance. + return infer_constraints(left, right, neg_op(direction)) + else: + return infer_constraints(right, left, neg_op(direction)) + + +def infer_callable_arguments_constraints( + template: CallableType | Parameters, actual: CallableType | Parameters, direction: int +) -> list[Constraint]: + """Infer constraints between argument types of two callables. + + This function essentially extracts four steps from are_parameters_compatible() in + subtypes.py that involve subtype checks between argument types. We keep the argument + matching logic, but ignore various strictness flags present there, and checks that + do not involve subtyping. Then in place of every subtype check we put an infer_constraints() + call for the same types. + """ + res = [] + if direction == SUBTYPE_OF: + left, right = template, actual + else: + left, right = actual, template + left_star = left.var_arg() + left_star2 = left.kw_arg() + right_star = right.var_arg() + right_star2 = right.kw_arg() + + # Numbering of steps below matches the one in are_parameters_compatible() for convenience. + # Phase 1a: compare star vs star arguments. + if left_star is not None and right_star is not None: + res.extend(infer_directed_arg_constraints(left_star.typ, right_star.typ, direction)) + if left_star2 is not None and right_star2 is not None: + res.extend(infer_directed_arg_constraints(left_star2.typ, right_star2.typ, direction)) + + # Phase 1b: compare left args with corresponding non-star right arguments. + for right_arg in right.formal_arguments(): + left_arg = mypy.typeops.callable_corresponding_argument(left, right_arg) + if left_arg is None: + continue + res.extend(infer_directed_arg_constraints(left_arg.typ, right_arg.typ, direction)) + + # Phase 1c: compare left args with right *args. + if right_star is not None: + right_by_position = right.try_synthesizing_arg_from_vararg(None) + assert right_by_position is not None + i = right_star.pos + assert i is not None + while i < len(left.arg_kinds) and left.arg_kinds[i].is_positional(): + left_by_position = left.argument_by_position(i) + assert left_by_position is not None + res.extend( + infer_directed_arg_constraints( + left_by_position.typ, right_by_position.typ, direction + ) + ) + i += 1 + + # Phase 1d: compare left args with right **kwargs. + if right_star2 is not None: + right_names = {name for name in right.arg_names if name is not None} + left_only_names = set() + for name, kind in zip(left.arg_names, left.arg_kinds): + if name is None or kind.is_star() or name in right_names: + continue + left_only_names.add(name) + + right_by_name = right.try_synthesizing_arg_from_kwarg(None) + assert right_by_name is not None + for name in left_only_names: + left_by_name = left.argument_by_name(name) + assert left_by_name is not None + res.extend( + infer_directed_arg_constraints(left_by_name.typ, right_by_name.typ, direction) + ) + return res diff --git a/mypy/copytype.py b/mypy/copytype.py index 0b63c8e07ae8..4ca381c4a8c4 100644 --- a/mypy/copytype.py +++ b/mypy/copytype.py @@ -28,7 +28,7 @@ ) # type_visitor needs to be imported after types -from mypy.type_visitor import TypeVisitor # isort: skip +from mypy.type_visitor import TypeVisitor # ruff: isort: skip def copy_type(t: ProperType) -> ProperType: diff --git a/mypy/defaults.py b/mypy/defaults.py index d167997464f4..6a09a61a461e 100644 --- a/mypy/defaults.py +++ b/mypy/defaults.py @@ -1,18 +1,16 @@ from __future__ import annotations import os -from typing_extensions import Final - -PYTHON2_VERSION: Final = (2, 7) +from typing import Final # Earliest fully supported Python 3.x version. Used as the default Python # version in tests. Mypy wheels should be built starting with this version, # and CI tests should be run on this version (and later versions). -PYTHON3_VERSION: Final = (3, 7) +PYTHON3_VERSION: Final = (3, 8) # Earliest Python 3.x version supported via --python-version 3.x. To run # mypy, at least version PYTHON3_VERSION is needed. -PYTHON3_VERSION_MIN: Final = (3, 4) +PYTHON3_VERSION_MIN: Final = (3, 7) # Keep in sync with typeshed's python support CACHE_DIR: Final = ".mypy_cache" CONFIG_FILE: Final = ["mypy.ini", ".mypy.ini"] diff --git a/mypy/dmypy/client.py b/mypy/dmypy/client.py index 0e9120608509..c3a2308d1b44 100644 --- a/mypy/dmypy/client.py +++ b/mypy/dmypy/client.py @@ -562,6 +562,7 @@ def check_output( sys.stdout.write(out) sys.stdout.flush() sys.stderr.write(err) + sys.stderr.flush() if verbose: show_stats(response) if junit_xml: @@ -588,13 +589,14 @@ def check_output( def show_stats(response: Mapping[str, object]) -> None: for key, value in sorted(response.items()): - if key not in ("out", "err"): - print("%-24s: %10s" % (key, "%.3f" % value if isinstance(value, float) else value)) - else: + if key in ("out", "err", "stdout", "stderr"): + # Special case text output to display just 40 characters of text value = repr(value)[1:-1] if len(value) > 50: - value = value[:40] + " ..." + value = f"{value[:40]} ... {len(value)-40} more characters" print("%-24s: %s" % (key, value)) + continue + print("%-24s: %10s" % (key, "%.3f" % value if isinstance(value, float) else value)) @action(hang_parser) @@ -668,6 +670,8 @@ def request( # TODO: Other errors, e.g. ValueError, UnicodeError else: # Display debugging output written to stdout/stderr in the server process for convenience. + # This should not be confused with "out" and "err" fields in the response. + # Those fields hold the output of the "check" command, and are handled in check_output(). stdout = response.get("stdout") if stdout: sys.stdout.write(stdout) diff --git a/mypy/dmypy_server.py b/mypy/dmypy_server.py index c742f3116402..a50ebc5415ba 100644 --- a/mypy/dmypy_server.py +++ b/mypy/dmypy_server.py @@ -17,8 +17,8 @@ import time import traceback from contextlib import redirect_stderr, redirect_stdout -from typing import AbstractSet, Any, Callable, List, Sequence, Tuple -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import AbstractSet, Any, Callable, Final, List, Sequence, Tuple +from typing_extensions import TypeAlias as _TypeAlias import mypy.build import mypy.errors @@ -330,7 +330,7 @@ def cmd_run( header=argparse.SUPPRESS, ) # Signal that we need to restart if the options have changed - if self.options_snapshot != options.snapshot(): + if not options.compare_stable(self.options_snapshot): return {"restart": "configuration changed"} if __version__ != version: return {"restart": "mypy version changed"} @@ -857,6 +857,21 @@ def _find_changed( assert path removed.append((source.module, path)) + # Always add modules that were (re-)added, since they may be detected as not changed by + # fswatcher (if they were actually not changed), but they may still need to be checked + # in case they had errors before they were deleted from sources on previous runs. + previous_modules = {source.module for source in self.previous_sources} + changed_set = set(changed) + changed.extend( + [ + (source.module, source.path) + for source in sources + if source.path + and source.module not in previous_modules + and (source.module, source.path) not in changed_set + ] + ) + # Find anything that has had its module path change because of added or removed __init__s last = {s.path: s.module for s in self.previous_sources} for s in sources: @@ -881,8 +896,6 @@ def cmd_inspect( force_reload: bool = False, ) -> dict[str, object]: """Locate and inspect expression(s).""" - if sys.version_info < (3, 8): - return {"error": 'Python 3.8 required for "inspect" command'} if not self.fine_grained_manager: return { "error": 'Command "inspect" is only valid after a "check" command' diff --git a/mypy/dmypy_util.py b/mypy/dmypy_util.py index a1b419617f73..2aae41d998da 100644 --- a/mypy/dmypy_util.py +++ b/mypy/dmypy_util.py @@ -6,8 +6,7 @@ from __future__ import annotations import json -from typing import Any -from typing_extensions import Final +from typing import Any, Final from mypy.ipc import IPCBase diff --git a/mypy/erasetype.py b/mypy/erasetype.py index 6533d0c4e0f9..fbbb4f80b578 100644 --- a/mypy/erasetype.py +++ b/mypy/erasetype.py @@ -71,7 +71,7 @@ def visit_erased_type(self, t: ErasedType) -> ProperType: def visit_partial_type(self, t: PartialType) -> ProperType: # Should not get here. - raise RuntimeError() + raise RuntimeError("Cannot erase partial types") def visit_deleted_type(self, t: DeletedType) -> ProperType: return t diff --git a/mypy/errorcodes.py b/mypy/errorcodes.py index 50a82be9816d..3594458fa362 100644 --- a/mypy/errorcodes.py +++ b/mypy/errorcodes.py @@ -6,7 +6,7 @@ from __future__ import annotations from collections import defaultdict -from typing_extensions import Final +from typing import Final from mypy_extensions import mypyc_attr @@ -37,6 +37,14 @@ def __init__( def __str__(self) -> str: return f"" + def __eq__(self, other: object) -> bool: + if not isinstance(other, ErrorCode): + return False + return self.code == other.code + + def __hash__(self) -> int: + return hash((self.code,)) + ATTR_DEFINED: Final = ErrorCode("attr-defined", "Check that attribute exists", "General") NAME_DEFINED: Final = ErrorCode("name-defined", "Check that name is defined", "General") @@ -99,6 +107,12 @@ def __str__(self) -> str: IMPORT: Final = ErrorCode( "import", "Require that imported module can be found or has stubs", "General" ) +IMPORT_NOT_FOUND: Final = ErrorCode( + "import-not-found", "Require that imported module can be found", "General", sub_code_of=IMPORT +) +IMPORT_UNTYPED: Final = ErrorCode( + "import-untyped", "Require that imported module has stubs", "General", sub_code_of=IMPORT +) NO_REDEF: Final = ErrorCode("no-redef", "Check that each name is defined once", "General") FUNC_RETURNS_VALUE: Final = ErrorCode( "func-returns-value", "Check that called function returns a value in value context", "General" @@ -136,9 +150,11 @@ def __str__(self) -> str: "safe-super", "Warn about calls to abstract methods with empty/trivial bodies", "General" ) TOP_LEVEL_AWAIT: Final = ErrorCode( - "top-level-await", "Warn about top level await experessions", "General" + "top-level-await", "Warn about top level await expressions", "General" +) +AWAIT_NOT_ASYNC: Final = ErrorCode( + "await-not-async", 'Warn about "await" outside coroutine ("async def")', "General" ) - # These error codes aren't enabled by default. NO_UNTYPED_DEF: Final[ErrorCode] = ErrorCode( "no-untyped-def", "Check that every function has an annotation", "General" @@ -227,6 +243,12 @@ def __str__(self) -> str: UNUSED_IGNORE: Final = ErrorCode( "unused-ignore", "Ensure that all type ignores are used", "General", default_enabled=False ) +EXPLICIT_OVERRIDE_REQUIRED: Final = ErrorCode( + "explicit-override", + "Require @override decorator if method is overriding a base class method", + "General", + default_enabled=False, +) # Syntax errors are often blocking. diff --git a/mypy/errors.py b/mypy/errors.py index 9d29259e943c..4e62a48aeb27 100644 --- a/mypy/errors.py +++ b/mypy/errors.py @@ -4,11 +4,11 @@ import sys import traceback from collections import defaultdict -from typing import Callable, Iterable, NoReturn, Optional, TextIO, Tuple, TypeVar -from typing_extensions import Final, Literal, TypeAlias as _TypeAlias +from typing import Callable, Final, Iterable, NoReturn, Optional, TextIO, Tuple, TypeVar +from typing_extensions import Literal, TypeAlias as _TypeAlias from mypy import errorcodes as codes -from mypy.errorcodes import IMPORT, ErrorCode +from mypy.errorcodes import IMPORT, IMPORT_NOT_FOUND, IMPORT_UNTYPED, ErrorCode from mypy.message_registry import ErrorMessage from mypy.options import Options from mypy.scope import Scope @@ -20,8 +20,27 @@ # Show error codes for some note-level messages (these usually appear alone # and not as a comment for a previous error-level message). SHOW_NOTE_CODES: Final = {codes.ANNOTATION_UNCHECKED} + +# Do not add notes with links to error code docs to errors with these codes. +# We can tweak this set as we get more experience about what is helpful and what is not. +HIDE_LINK_CODES: Final = { + # This is a generic error code, so it has no useful docs + codes.MISC, + # These are trivial and have some custom notes (e.g. for list being invariant) + codes.ASSIGNMENT, + codes.ARG_TYPE, + codes.RETURN_VALUE, + # Undefined name/attribute errors are self-explanatory + codes.ATTR_DEFINED, + codes.NAME_DEFINED, + # Overrides have a custom link to docs + codes.OVERRIDE, +} + allowed_duplicates: Final = ["@overload", "Got:", "Expected:"] +BASE_RTD_URL: Final = "https://mypy.rtfd.io/en/stable/_refs.html#code" + # Keep track of the original error code when the error code of a message is changed. # This is used to give notes about out-of-date "type: ignore" comments. original_error_codes: Final = {codes.LITERAL_REQ: codes.MISC, codes.TYPE_ABSTRACT: codes.MISC} @@ -90,6 +109,7 @@ class ErrorInfo: def __init__( self, import_ctx: list[tuple[str, int]], + *, file: str, module: str | None, typ: str | None, @@ -106,6 +126,7 @@ def __init__( allow_dups: bool, origin: tuple[str, Iterable[int]] | None = None, target: str | None = None, + priority: int = 0, ) -> None: self.import_ctx = import_ctx self.file = file @@ -124,6 +145,7 @@ def __init__( self.allow_dups = allow_dups self.origin = origin or (file, [line]) self.target = target + self.priority = priority # Type used internally to represent errors: @@ -222,8 +244,9 @@ class Errors: # (path -> line -> error-codes) ignored_lines: dict[str, dict[int, list[str]]] - # Lines that are statically unreachable (e.g. due to platform/version check). - unreachable_lines: dict[str, set[int]] + # Lines that were skipped during semantic analysis e.g. due to ALWAYS_FALSE, MYPY_FALSE, + # or platform/version checks. Those lines would not be type-checked. + skipped_lines: dict[str, set[int]] # Lines on which an error was actually ignored. used_ignored_lines: dict[str, dict[int, list[str]]] @@ -280,7 +303,7 @@ def initialize(self) -> None: self.import_ctx = [] self.function_or_member = [None] self.ignored_lines = {} - self.unreachable_lines = {} + self.skipped_lines = {} self.used_ignored_lines = defaultdict(lambda: defaultdict(list)) self.ignored_files = set() self.only_once_messages = set() @@ -329,8 +352,8 @@ def set_file_ignored_lines( if ignore_all: self.ignored_files.add(file) - def set_unreachable_lines(self, file: str, unreachable_lines: set[int]) -> None: - self.unreachable_lines[file] = unreachable_lines + def set_skipped_lines(self, file: str, skipped_lines: set[int]) -> None: + self.skipped_lines[file] = skipped_lines def current_target(self) -> str | None: """Retrieves the current target from the associated scope. @@ -415,21 +438,21 @@ def report( code = code or (codes.MISC if not blocker else None) info = ErrorInfo( - self.import_context(), - file, - self.current_module(), - type, - function, - line, - column, - end_line, - end_column, - severity, - message, - code, - blocker, - only_once, - allow_dups, + import_ctx=self.import_context(), + file=file, + module=self.current_module(), + typ=type, + function_or_member=function, + line=line, + column=column, + end_line=end_line, + end_column=end_column, + severity=severity, + message=message, + code=code, + blocker=blocker, + only_once=only_once, + allow_dups=allow_dups, origin=(self.file, origin_span), target=self.current_target(), ) @@ -446,7 +469,7 @@ def _add_error_info(self, file: str, info: ErrorInfo) -> None: self.error_info_map[file].append(info) if info.blocker: self.has_blockers.add(file) - if info.code is IMPORT: + if info.code in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND): self.seen_import_error = True def _filter_error(self, file: str, info: ErrorInfo) -> bool: @@ -487,7 +510,11 @@ def add_error_info(self, info: ErrorInfo) -> None: if info.message in self.only_once_messages: return self.only_once_messages.add(info.message) - if self.seen_import_error and info.code is not IMPORT and self.has_many_errors(): + if ( + self.seen_import_error + and info.code not in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND) + and self.has_many_errors() + ): # Missing stubs can easily cause thousands of errors about # Any types, especially when upgrading to mypy 0.900, # which no longer bundles third-party library stubs. Avoid @@ -511,23 +538,52 @@ def add_error_info(self, info: ErrorInfo) -> None: + "may be out of date" ) note = ErrorInfo( - info.import_ctx, - info.file, - info.module, - info.type, - info.function_or_member, - info.line, - info.column, - info.end_line, - info.end_column, - "note", - msg, + import_ctx=info.import_ctx, + file=info.file, + module=info.module, + typ=info.type, + function_or_member=info.function_or_member, + line=info.line, + column=info.column, + end_line=info.end_line, + end_column=info.end_column, + severity="note", + message=msg, code=None, blocker=False, only_once=False, allow_dups=False, ) self._add_error_info(file, note) + if ( + self.options.show_error_code_links + and not self.options.hide_error_codes + and info.code is not None + and info.code not in HIDE_LINK_CODES + ): + message = f"See {BASE_RTD_URL}-{info.code.code} for more info" + if message in self.only_once_messages: + return + self.only_once_messages.add(message) + info = ErrorInfo( + import_ctx=info.import_ctx, + file=info.file, + module=info.module, + typ=info.type, + function_or_member=info.function_or_member, + line=info.line, + column=info.column, + end_line=info.end_line, + end_column=info.end_column, + severity="note", + message=message, + code=info.code, + blocker=False, + only_once=True, + allow_dups=False, + priority=20, + ) + self._add_error_info(file, info) def has_many_errors(self) -> bool: if self.options.many_errors_threshold < 0: @@ -630,7 +686,7 @@ def generate_unused_ignore_errors(self, file: str) -> None: ignored_lines = self.ignored_lines[file] used_ignored_lines = self.used_ignored_lines[file] for line, ignored_codes in ignored_lines.items(): - if line in self.unreachable_lines[file]: + if line in self.skipped_lines[file]: continue if codes.UNUSED_IGNORE.code in ignored_codes: continue @@ -653,21 +709,21 @@ def generate_unused_ignore_errors(self, file: str) -> None: message += f", use narrower [{', '.join(narrower)}] instead of [{unused}] code" # Don't use report since add_error_info will ignore the error! info = ErrorInfo( - self.import_context(), - file, - self.current_module(), - None, - None, - line, - -1, - line, - -1, - "error", - message, - codes.UNUSED_IGNORE, - False, - False, - False, + import_ctx=self.import_context(), + file=file, + module=self.current_module(), + typ=None, + function_or_member=None, + line=line, + column=-1, + end_line=line, + end_column=-1, + severity="error", + message=message, + code=codes.UNUSED_IGNORE, + blocker=False, + only_once=False, + allow_dups=False, ) self._add_error_info(file, info) @@ -705,21 +761,21 @@ def generate_ignore_without_code_errors( message = f'"type: ignore" comment without error code{codes_hint}' # Don't use report since add_error_info will ignore the error! info = ErrorInfo( - self.import_context(), - file, - self.current_module(), - None, - None, - line, - -1, - line, - -1, - "error", - message, - codes.IGNORE_WITHOUT_CODE, - False, - False, - False, + import_ctx=self.import_context(), + file=file, + module=self.current_module(), + typ=None, + function_or_member=None, + line=line, + column=-1, + end_line=line, + end_column=-1, + severity="error", + message=message, + code=codes.IGNORE_WITHOUT_CODE, + blocker=False, + only_once=False, + allow_dups=False, ) self._add_error_info(file, info) @@ -853,8 +909,7 @@ def file_messages(self, path: str) -> list[str]: return [] self.flushed_files.add(path) source_lines = None - if self.options.pretty: - assert self.read_source + if self.options.pretty and self.read_source: source_lines = self.read_source(path) return self.format_messages(self.error_info_map[path], source_lines) @@ -1039,6 +1094,34 @@ def sort_messages(self, errors: list[ErrorInfo]) -> list[ErrorInfo]: # Sort the errors specific to a file according to line number and column. a = sorted(errors[i0:i], key=lambda x: (x.line, x.column)) + a = self.sort_within_context(a) + result.extend(a) + return result + + def sort_within_context(self, errors: list[ErrorInfo]) -> list[ErrorInfo]: + """For the same location decide which messages to show first/last. + + Currently, we only compare within the same error code, to decide the + order of various additional notes. + """ + result = [] + i = 0 + while i < len(errors): + i0 = i + # Find neighbouring errors with the same position and error code. + while ( + i + 1 < len(errors) + and errors[i + 1].line == errors[i].line + and errors[i + 1].column == errors[i].column + and errors[i + 1].end_line == errors[i].end_line + and errors[i + 1].end_column == errors[i].end_column + and errors[i + 1].code == errors[i].code + ): + i += 1 + i += 1 + + # Sort the messages specific to a given error by priority. + a = sorted(errors[i0:i], key=lambda x: x.priority) result.extend(a) return result diff --git a/mypy/evalexpr.py b/mypy/evalexpr.py index 2bc6966fa2fa..4b3abb1be3e2 100644 --- a/mypy/evalexpr.py +++ b/mypy/evalexpr.py @@ -7,7 +7,7 @@ """ import ast -from typing_extensions import Final +from typing import Final import mypy.nodes from mypy.visitor import ExpressionVisitor diff --git a/mypy/expandtype.py b/mypy/expandtype.py index 5bbdd5311da7..26353c043cb7 100644 --- a/mypy/expandtype.py +++ b/mypy/expandtype.py @@ -1,11 +1,9 @@ from __future__ import annotations -from typing import Iterable, Mapping, Sequence, TypeVar, cast, overload -from typing_extensions import Final +from typing import Final, Iterable, Mapping, Sequence, TypeVar, cast, overload -from mypy.nodes import ARG_POS, ARG_STAR, ArgKind, Var +from mypy.nodes import ARG_STAR, Var from mypy.state import state -from mypy.type_visitor import TypeTranslator from mypy.types import ( ANY_STRATEGY, AnyType, @@ -37,12 +35,14 @@ UninhabitedType, UnionType, UnpackType, - flatten_nested_tuples, flatten_nested_unions, get_proper_type, split_with_prefix_and_suffix, ) -from mypy.typevartuples import find_unpack_in_list, split_with_instance +from mypy.typevartuples import split_with_instance + +# Solving the import cycle: +import mypy.type_visitor # ruff: isort: skip # WARNING: these functions should never (directly or indirectly) depend on # is_subtype(), meet_types(), join_types() etc. @@ -167,7 +167,7 @@ def freshen_all_functions_type_vars(t: T) -> T: return result -class FreshenCallableVisitor(TypeTranslator): +class FreshenCallableVisitor(mypy.type_visitor.TypeTranslator): def visit_callable_type(self, t: CallableType) -> Type: result = super().visit_callable_type(t) assert isinstance(result, ProperType) and isinstance(result, CallableType) @@ -230,47 +230,35 @@ def visit_type_var(self, t: TypeVarType) -> Type: return repl def visit_param_spec(self, t: ParamSpecType) -> Type: - # set prefix to something empty so we don't duplicate it - repl = get_proper_type( - self.variables.get(t.id, t.copy_modified(prefix=Parameters([], [], []))) - ) - if isinstance(repl, Instance): - # TODO: what does prefix mean in this case? - # TODO: why does this case even happen? Instances aren't plural. - return repl - elif isinstance(repl, (ParamSpecType, Parameters, CallableType)): - if isinstance(repl, ParamSpecType): - return repl.copy_modified( - flavor=t.flavor, - prefix=t.prefix.copy_modified( - arg_types=t.prefix.arg_types + repl.prefix.arg_types, - arg_kinds=t.prefix.arg_kinds + repl.prefix.arg_kinds, - arg_names=t.prefix.arg_names + repl.prefix.arg_names, - ), - ) - else: - # if the paramspec is *P.args or **P.kwargs: - if t.flavor != ParamSpecFlavor.BARE: - assert isinstance(repl, CallableType), "Should not be able to get here." - # Is this always the right thing to do? - param_spec = repl.param_spec() - if param_spec: - return param_spec.with_flavor(t.flavor) - else: - return repl - else: - return Parameters( - t.prefix.arg_types + repl.arg_types, - t.prefix.arg_kinds + repl.arg_kinds, - t.prefix.arg_names + repl.arg_names, - variables=[*t.prefix.variables, *repl.variables], - ) - + # Set prefix to something empty, so we don't duplicate it below. + repl = self.variables.get(t.id, t.copy_modified(prefix=Parameters([], [], []))) + if isinstance(repl, ParamSpecType): + return repl.copy_modified( + flavor=t.flavor, + prefix=t.prefix.copy_modified( + arg_types=self.expand_types(t.prefix.arg_types + repl.prefix.arg_types), + arg_kinds=t.prefix.arg_kinds + repl.prefix.arg_kinds, + arg_names=t.prefix.arg_names + repl.prefix.arg_names, + ), + ) + elif isinstance(repl, Parameters): + assert t.flavor == ParamSpecFlavor.BARE + return Parameters( + self.expand_types(t.prefix.arg_types + repl.arg_types), + t.prefix.arg_kinds + repl.arg_kinds, + t.prefix.arg_names + repl.arg_names, + variables=[*t.prefix.variables, *repl.variables], + ) else: - # TODO: should this branch be removed? better not to fail silently + # TODO: replace this with "assert False" return repl def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type: + # Sometimes solver may need to expand a type variable with (a copy of) itself + # (usually together with other TypeVars, but it is hard to filter out TypeVarTuples). + repl = self.variables.get(t.id, t) + if isinstance(repl, TypeVarTupleType): + return repl raise NotImplementedError def visit_unpack_type(self, t: UnpackType) -> Type: @@ -280,108 +268,56 @@ def visit_unpack_type(self, t: UnpackType) -> Type: # Relevant sections that can call unpack should call expand_unpack() # instead. # However, if the item is a variadic tuple, we can simply carry it over. - # it is hard to assert this without getting proper type. + # In particular, if we expand A[*tuple[T, ...]] with substitutions {T: str}, + # it is hard to assert this without getting proper type. Another important + # example is non-normalized types when called from semanal.py. return UnpackType(t.type.accept(self)) - def expand_unpack(self, t: UnpackType) -> list[Type] | Instance | AnyType | None: - return expand_unpack_with_variables(t, self.variables) + def expand_unpack(self, t: UnpackType) -> list[Type]: + assert isinstance(t.type, TypeVarTupleType) + repl = get_proper_type(self.variables.get(t.type.id, t.type)) + if isinstance(repl, TupleType): + return repl.items + elif ( + isinstance(repl, Instance) + and repl.type.fullname == "builtins.tuple" + or isinstance(repl, TypeVarTupleType) + ): + return [UnpackType(typ=repl)] + elif isinstance(repl, (AnyType, UninhabitedType)): + # Replace *Ts = Any with *Ts = *tuple[Any, ...] and some for . + # These types may appear here as a result of user error or failed inference. + return [UnpackType(t.type.tuple_fallback.copy_modified(args=[repl]))] + else: + raise RuntimeError(f"Invalid type replacement to expand: {repl}") def visit_parameters(self, t: Parameters) -> Type: return t.copy_modified(arg_types=self.expand_types(t.arg_types)) - def interpolate_args_for_unpack( - self, t: CallableType, var_arg: UnpackType - ) -> tuple[list[str | None], list[ArgKind], list[Type]]: + def interpolate_args_for_unpack(self, t: CallableType, var_arg: UnpackType) -> list[Type]: star_index = t.arg_kinds.index(ARG_STAR) + prefix = self.expand_types(t.arg_types[:star_index]) + suffix = self.expand_types(t.arg_types[star_index + 1 :]) - # We have something like Unpack[Tuple[X1, X2, Unpack[Ts], Y1, Y2]] var_arg_type = get_proper_type(var_arg.type) + # We have something like Unpack[Tuple[Unpack[Ts], X1, X2]] if isinstance(var_arg_type, TupleType): expanded_tuple = var_arg_type.accept(self) - # TODO: handle the case that expanded_tuple is a variable length tuple. assert isinstance(expanded_tuple, ProperType) and isinstance(expanded_tuple, TupleType) expanded_items = expanded_tuple.items + fallback = var_arg_type.partial_fallback else: - expanded_items_res = self.expand_unpack(var_arg) - if isinstance(expanded_items_res, list): - expanded_items = expanded_items_res - elif ( - isinstance(expanded_items_res, Instance) - and expanded_items_res.type.fullname == "builtins.tuple" - ): - # TODO: We shouldnt't simply treat this as a *arg because of suffix handling - # (there cannot be positional args after a *arg) - arg_types = ( - t.arg_types[:star_index] - + [expanded_items_res.args[0]] - + t.arg_types[star_index + 1 :] - ) - return (t.arg_names, t.arg_kinds, arg_types) - else: - return (t.arg_names, t.arg_kinds, t.arg_types) - - expanded_unpack_index = find_unpack_in_list(expanded_items) - # This is the case where we just have Unpack[Tuple[X1, X2, X3]] - # (for example if either the tuple had no unpacks, or the unpack in the - # tuple got fully expanded to something with fixed length) - if expanded_unpack_index is None: - arg_names = ( - t.arg_names[:star_index] - + [None] * len(expanded_items) - + t.arg_names[star_index + 1 :] - ) - arg_kinds = ( - t.arg_kinds[:star_index] - + [ARG_POS] * len(expanded_items) - + t.arg_kinds[star_index + 1 :] - ) - arg_types = ( - self.expand_types(t.arg_types[:star_index]) - + expanded_items - + self.expand_types(t.arg_types[star_index + 1 :]) - ) - else: - # If Unpack[Ts] simplest form still has an unpack or is a - # homogenous tuple, then only the prefix can be represented as - # positional arguments, and we pass Tuple[Unpack[Ts-1], Y1, Y2] - # as the star arg, for example. - expanded_unpack = expanded_items[expanded_unpack_index] - assert isinstance(expanded_unpack, UnpackType) - - # Extract the typevartuple so we can get a tuple fallback from it. - expanded_unpacked_tvt = expanded_unpack.type - if isinstance(expanded_unpacked_tvt, TypeVarTupleType): - fallback = expanded_unpacked_tvt.tuple_fallback - else: - # This can happen when tuple[Any, ...] is used to "patch" a variadic - # generic type without type arguments provided. - assert isinstance(expanded_unpacked_tvt, ProperType) - assert isinstance(expanded_unpacked_tvt, Instance) - assert expanded_unpacked_tvt.type.fullname == "builtins.tuple" - fallback = expanded_unpacked_tvt - - prefix_len = expanded_unpack_index - arg_names = t.arg_names[:star_index] + [None] * prefix_len + t.arg_names[star_index:] - arg_kinds = ( - t.arg_kinds[:star_index] + [ARG_POS] * prefix_len + t.arg_kinds[star_index:] - ) - arg_types = ( - self.expand_types(t.arg_types[:star_index]) - + expanded_items[:prefix_len] - # Constructing the Unpack containing the tuple without the prefix. - + [ - UnpackType(TupleType(expanded_items[prefix_len:], fallback)) - if len(expanded_items) - prefix_len > 1 - else expanded_items[0] - ] - + self.expand_types(t.arg_types[star_index + 1 :]) - ) - return (arg_names, arg_kinds, arg_types) + # We have plain Unpack[Ts] + assert isinstance(var_arg_type, TypeVarTupleType) + fallback = var_arg_type.tuple_fallback + expanded_items = self.expand_unpack(var_arg) + new_unpack = UnpackType(TupleType(expanded_items, fallback)) + return prefix + [new_unpack] + suffix def visit_callable_type(self, t: CallableType) -> CallableType: param_spec = t.param_spec() if param_spec is not None: - repl = get_proper_type(self.variables.get(param_spec.id)) + repl = self.variables.get(param_spec.id) # If a ParamSpec in a callable type is substituted with a # callable type, we can't use normal substitution logic, # since ParamSpec is actually split into two components @@ -389,52 +325,48 @@ def visit_callable_type(self, t: CallableType) -> CallableType: # must expand both of them with all the argument types, # kinds and names in the replacement. The return type in # the replacement is ignored. - if isinstance(repl, (CallableType, Parameters)): - # Substitute *args: P.args, **kwargs: P.kwargs - prefix = param_spec.prefix - # we need to expand the types in the prefix, so might as well - # not get them in the first place - t = t.expand_param_spec(repl, no_prefix=True) + if isinstance(repl, Parameters): + # We need to expand both the types in the prefix and the ParamSpec itself + t = t.expand_param_spec(repl) return t.copy_modified( - arg_types=self.expand_types(prefix.arg_types) + t.arg_types, - arg_kinds=prefix.arg_kinds + t.arg_kinds, - arg_names=prefix.arg_names + t.arg_names, + arg_types=self.expand_types(t.arg_types), ret_type=t.ret_type.accept(self), type_guard=(t.type_guard.accept(self) if t.type_guard is not None else None), + imprecise_arg_kinds=(t.imprecise_arg_kinds or repl.imprecise_arg_kinds), ) - # TODO: Conceptually, the "len(t.arg_types) == 2" should not be here. However, this - # errors without it. Either figure out how to eliminate this or place an - # explanation for why this is necessary. - elif isinstance(repl, ParamSpecType) and len(t.arg_types) == 2: - # We're substituting one paramspec for another; this can mean that the prefix - # changes. (e.g. sub Concatenate[int, P] for Q) + elif isinstance(repl, ParamSpecType): + # We're substituting one ParamSpec for another; this can mean that the prefix + # changes, e.g. substitute Concatenate[int, P] in place of Q. prefix = repl.prefix - old_prefix = param_spec.prefix - - # Check assumptions. I'm not sure what order to place new prefix vs old prefix: - assert not old_prefix.arg_types or not prefix.arg_types - - t = t.copy_modified( - arg_types=prefix.arg_types + old_prefix.arg_types + t.arg_types, - arg_kinds=prefix.arg_kinds + old_prefix.arg_kinds + t.arg_kinds, - arg_names=prefix.arg_names + old_prefix.arg_names + t.arg_names, + clean_repl = repl.copy_modified(prefix=Parameters([], [], [])) + return t.copy_modified( + arg_types=self.expand_types(t.arg_types[:-2] + prefix.arg_types) + + [ + clean_repl.with_flavor(ParamSpecFlavor.ARGS), + clean_repl.with_flavor(ParamSpecFlavor.KWARGS), + ], + arg_kinds=t.arg_kinds[:-2] + prefix.arg_kinds + t.arg_kinds[-2:], + arg_names=t.arg_names[:-2] + prefix.arg_names + t.arg_names[-2:], + ret_type=t.ret_type.accept(self), + from_concatenate=t.from_concatenate or bool(repl.prefix.arg_types), + imprecise_arg_kinds=(t.imprecise_arg_kinds or prefix.imprecise_arg_kinds), ) var_arg = t.var_arg() + needs_normalization = False if var_arg is not None and isinstance(var_arg.typ, UnpackType): - arg_names, arg_kinds, arg_types = self.interpolate_args_for_unpack(t, var_arg.typ) + needs_normalization = True + arg_types = self.interpolate_args_for_unpack(t, var_arg.typ) else: - arg_names = t.arg_names - arg_kinds = t.arg_kinds arg_types = self.expand_types(t.arg_types) - - return t.copy_modified( + expanded = t.copy_modified( arg_types=arg_types, - arg_names=arg_names, - arg_kinds=arg_kinds, ret_type=t.ret_type.accept(self), type_guard=(t.type_guard.accept(self) if t.type_guard is not None else None), ) + if needs_normalization: + return expanded.with_normalized_var_args() + return expanded def visit_overloaded(self, t: Overloaded) -> Type: items: list[CallableType] = [] @@ -447,40 +379,36 @@ def visit_overloaded(self, t: Overloaded) -> Type: def expand_types_with_unpack( self, typs: Sequence[Type] - ) -> list[Type] | AnyType | UninhabitedType | Instance: + ) -> list[Type] | AnyType | UninhabitedType: """Expands a list of types that has an unpack. In corner cases, this can return a type rather than a list, in which case this indicates use of Any or some error occurred earlier. In this case callers should simply propagate the resulting type. """ - # TODO: this will cause a crash on aliases like A = Tuple[int, Unpack[A]]. - # Although it is unlikely anyone will write this, we should fail gracefully. - typs = flatten_nested_tuples(typs) items: list[Type] = [] for item in typs: if isinstance(item, UnpackType) and isinstance(item.type, TypeVarTupleType): - unpacked_items = self.expand_unpack(item) - if unpacked_items is None: - # TODO: better error, something like tuple of unknown? - return UninhabitedType() - elif isinstance(unpacked_items, Instance): - if len(typs) == 1: - return unpacked_items - else: - assert False, "Invalid unpack of variable length tuple" - elif isinstance(unpacked_items, AnyType): - return unpacked_items - else: - items.extend(unpacked_items) + items.extend(self.expand_unpack(item)) else: - # Must preserve original aliases when possible. items.append(item.accept(self)) return items def visit_tuple_type(self, t: TupleType) -> Type: items = self.expand_types_with_unpack(t.items) if isinstance(items, list): + if len(items) == 1: + # Normalize Tuple[*Tuple[X, ...]] -> Tuple[X, ...] + item = items[0] + if isinstance(item, UnpackType): + unpacked = get_proper_type(item.type) + if isinstance(unpacked, Instance): + assert unpacked.type.fullname == "builtins.tuple" + if t.partial_fallback.type.fullname != "builtins.tuple": + # If it is a subtype (like named tuple) we need to preserve it, + # this essentially mimics the logic in tuple_fallback(). + return t.partial_fallback.accept(self) + return unpacked fallback = t.partial_fallback.accept(self) assert isinstance(fallback, ProperType) and isinstance(fallback, Instance) return t.copy_modified(items=items, fallback=fallback) @@ -526,6 +454,7 @@ def visit_type_alias_type(self, t: TypeAliasType) -> Type: # alias itself), so we just expand the arguments. args = self.expand_types_with_unpack(t.args) if isinstance(args, list): + # TODO: normalize if target is Tuple, and args are [*tuple[X, ...]]? return t.copy_modified(args=args) else: return args @@ -537,34 +466,6 @@ def expand_types(self, types: Iterable[Type]) -> list[Type]: return a -def expand_unpack_with_variables( - t: UnpackType, variables: Mapping[TypeVarId, Type] -) -> list[Type] | Instance | AnyType | None: - """May return either a list of types to unpack to, any, or a single - variable length tuple. The latter may not be valid in all contexts. - """ - if isinstance(t.type, TypeVarTupleType): - repl = get_proper_type(variables.get(t.type.id, t)) - if isinstance(repl, TupleType): - return repl.items - elif isinstance(repl, Instance) and repl.type.fullname == "builtins.tuple": - return repl - elif isinstance(repl, AnyType): - # tuple[Any, ...] would be better, but we don't have - # the type info to construct that type here. - return repl - elif isinstance(repl, TypeVarTupleType): - return [UnpackType(typ=repl)] - elif isinstance(repl, UnpackType): - return [repl] - elif isinstance(repl, UninhabitedType): - return None - else: - raise NotImplementedError(f"Invalid type replacement to expand: {repl}") - else: - raise NotImplementedError(f"Invalid type to expand: {t.type}") - - @overload def expand_self_type(var: Var, typ: ProperType, replacement: ProperType) -> ProperType: ... diff --git a/mypy/exprtotype.py b/mypy/exprtotype.py index bbc284a5188a..b82d35607ef1 100644 --- a/mypy/exprtotype.py +++ b/mypy/exprtotype.py @@ -17,6 +17,7 @@ NameExpr, OpExpr, RefExpr, + StarExpr, StrExpr, TupleExpr, UnaryExpr, @@ -35,6 +36,7 @@ TypeOfAny, UnboundType, UnionType, + UnpackType, ) @@ -56,6 +58,7 @@ def expr_to_unanalyzed_type( options: Options | None = None, allow_new_syntax: bool = False, _parent: Expression | None = None, + allow_unpack: bool = False, ) -> ProperType: """Translate an expression to the corresponding type. @@ -163,7 +166,10 @@ def expr_to_unanalyzed_type( return CallableArgument(typ, name, arg_const, expr.line, expr.column) elif isinstance(expr, ListExpr): return TypeList( - [expr_to_unanalyzed_type(t, options, allow_new_syntax, expr) for t in expr.items], + [ + expr_to_unanalyzed_type(t, options, allow_new_syntax, expr, allow_unpack=True) + for t in expr.items + ], line=expr.line, column=expr.column, ) @@ -189,5 +195,7 @@ def expr_to_unanalyzed_type( return RawExpressionType(None, "builtins.complex", line=expr.line, column=expr.column) elif isinstance(expr, EllipsisExpr): return EllipsisType(expr.line) + elif allow_unpack and isinstance(expr, StarExpr): + return UnpackType(expr_to_unanalyzed_type(expr.expr, options, allow_new_syntax)) else: raise TypeTranslationError() diff --git a/mypy/fastparse.py b/mypy/fastparse.py index 902bde110421..a96e697d40bf 100644 --- a/mypy/fastparse.py +++ b/mypy/fastparse.py @@ -4,8 +4,8 @@ import re import sys import warnings -from typing import Any, Callable, List, Optional, Sequence, TypeVar, Union, cast -from typing_extensions import Final, Literal, overload +from typing import Any, Callable, Final, List, Optional, Sequence, TypeVar, Union, cast +from typing_extensions import Literal, overload from mypy import defaults, errorcodes as codes, message_registry from mypy.errors import Errors @@ -115,126 +115,66 @@ TypeOfAny, UnboundType, UnionType, + UnpackType, ) from mypy.util import bytes_to_human_readable_repr, unnamed_function -try: - # pull this into a final variable to make mypyc be quiet about the - # the default argument warning - PY_MINOR_VERSION: Final = sys.version_info[1] - - # Check if we can use the stdlib ast module instead of typed_ast. - if sys.version_info >= (3, 8): - import ast as ast3 - - assert ( - "kind" in ast3.Constant._fields - ), f"This 3.8.0 alpha ({sys.version.split()[0]}) is too old; 3.8.0a3 required" - # TODO: Num, Str, Bytes, NameConstant, Ellipsis are deprecated in 3.8. - # TODO: Index, ExtSlice are deprecated in 3.9. - from ast import ( - AST, - Attribute, - Bytes, - Call, - Ellipsis as ast3_Ellipsis, - Expression as ast3_Expression, - FunctionType, - Index, - Name, - NameConstant, - Num, - Starred, - Str, - UnaryOp, - USub, - ) +# pull this into a final variable to make mypyc be quiet about the +# the default argument warning +PY_MINOR_VERSION: Final = sys.version_info[1] - def ast3_parse( - source: str | bytes, filename: str, mode: str, feature_version: int = PY_MINOR_VERSION - ) -> AST: - return ast3.parse( - source, - filename, - mode, - type_comments=True, # This works the magic - feature_version=feature_version, - ) +import ast as ast3 - NamedExpr = ast3.NamedExpr - Constant = ast3.Constant - else: - from typed_ast import ast3 - from typed_ast.ast3 import ( - AST, - Attribute, - Bytes, - Call, - Ellipsis as ast3_Ellipsis, - Expression as ast3_Expression, - FunctionType, - Index, - Name, - NameConstant, - Num, - Starred, - Str, - UnaryOp, - USub, - ) +# TODO: Index, ExtSlice are deprecated in 3.9. +from ast import AST, Attribute, Call, FunctionType, Index, Name, Starred, UnaryOp, USub - def ast3_parse( - source: str | bytes, filename: str, mode: str, feature_version: int = PY_MINOR_VERSION - ) -> AST: - return ast3.parse(source, filename, mode, feature_version=feature_version) - - # These don't exist before 3.8 - NamedExpr = Any - Constant = Any - - if sys.version_info >= (3, 10): - Match = ast3.Match - MatchValue = ast3.MatchValue - MatchSingleton = ast3.MatchSingleton - MatchSequence = ast3.MatchSequence - MatchStar = ast3.MatchStar - MatchMapping = ast3.MatchMapping - MatchClass = ast3.MatchClass - MatchAs = ast3.MatchAs - MatchOr = ast3.MatchOr - AstNode = Union[ast3.expr, ast3.stmt, ast3.pattern, ast3.ExceptHandler] - else: - Match = Any - MatchValue = Any - MatchSingleton = Any - MatchSequence = Any - MatchStar = Any - MatchMapping = Any - MatchClass = Any - MatchAs = Any - MatchOr = Any - AstNode = Union[ast3.expr, ast3.stmt, ast3.ExceptHandler] - if sys.version_info >= (3, 11): - TryStar = ast3.TryStar - else: - TryStar = Any -except ImportError: - try: - from typed_ast import ast35 # type: ignore[attr-defined] # noqa: F401 - except ImportError: - print( - "The typed_ast package is not installed.\n" - "You can install it with `python3 -m pip install typed-ast`.", - file=sys.stderr, - ) - else: - print( - "You need a more recent version of the typed_ast package.\n" - "You can update to the latest version with " - "`python3 -m pip install -U typed-ast`.", - file=sys.stderr, - ) - sys.exit(1) + +def ast3_parse( + source: str | bytes, filename: str, mode: str, feature_version: int = PY_MINOR_VERSION +) -> AST: + return ast3.parse( + source, + filename, + mode, + type_comments=True, # This works the magic + feature_version=feature_version, + ) + + +NamedExpr = ast3.NamedExpr +Constant = ast3.Constant + +if sys.version_info >= (3, 12): + ast_TypeAlias = ast3.TypeAlias +else: + ast_TypeAlias = Any + +if sys.version_info >= (3, 10): + Match = ast3.Match + MatchValue = ast3.MatchValue + MatchSingleton = ast3.MatchSingleton + MatchSequence = ast3.MatchSequence + MatchStar = ast3.MatchStar + MatchMapping = ast3.MatchMapping + MatchClass = ast3.MatchClass + MatchAs = ast3.MatchAs + MatchOr = ast3.MatchOr + AstNode = Union[ast3.expr, ast3.stmt, ast3.pattern, ast3.ExceptHandler] +else: + Match = Any + MatchValue = Any + MatchSingleton = Any + MatchSequence = Any + MatchStar = Any + MatchMapping = Any + MatchClass = Any + MatchAs = Any + MatchOr = Any + AstNode = Union[ast3.expr, ast3.stmt, ast3.ExceptHandler] +if sys.version_info >= (3, 11): + TryStar = ast3.TryStar +else: + TryStar = Any N = TypeVar("N", bound=Node) @@ -370,7 +310,7 @@ def parse_type_comment( raise SyntaxError else: ignored = None - assert isinstance(typ, ast3_Expression) + assert isinstance(typ, ast3.Expression) converted = TypeConverter( errors, line=line, override_column=column, is_evaluated=False ).visit(typ.body) @@ -521,7 +461,14 @@ def translate_stmt_list( return [block] stack = self.class_and_function_stack - if self.strip_function_bodies and len(stack) == 1 and stack[0] == "F": + # Fast case for stripping function bodies + if ( + can_strip + and self.strip_function_bodies + and len(stack) == 1 + and stack[0] == "F" + and not is_coroutine + ): return [] res: list[Statement] = [] @@ -529,32 +476,33 @@ def translate_stmt_list( node = self.visit(stmt) res.append(node) - if ( - self.strip_function_bodies - and can_strip - and stack[-2:] == ["C", "F"] - and not is_possible_trivial_body(res) - ): - # We only strip method bodies if they don't assign to an attribute, as - # this may define an attribute which has an externally visible effect. - visitor = FindAttributeAssign() - for s in res: - s.accept(visitor) - if visitor.found: - break - else: - if is_coroutine: - # Yields inside an async function affect the return type and should not - # be stripped. - yield_visitor = FindYield() + # Slow case for stripping function bodies + if can_strip and self.strip_function_bodies: + if stack[-2:] == ["C", "F"]: + if is_possible_trivial_body(res): + can_strip = False + else: + # We only strip method bodies if they don't assign to an attribute, as + # this may define an attribute which has an externally visible effect. + visitor = FindAttributeAssign() for s in res: - s.accept(yield_visitor) - if yield_visitor.found: + s.accept(visitor) + if visitor.found: + can_strip = False break - else: - return [] - else: - return [] + + if can_strip and stack[-1] == "F" and is_coroutine: + # Yields inside an async function affect the return type and should not + # be stripped. + yield_visitor = FindYield() + for s in res: + s.accept(yield_visitor) + if yield_visitor.found: + can_strip = False + break + + if can_strip: + return [] return res def translate_type_comment( @@ -953,8 +901,10 @@ def do_func_def( func_type_ast = ast3_parse(n.type_comment, "", "func_type") assert isinstance(func_type_ast, FunctionType) # for ellipsis arg - if len(func_type_ast.argtypes) == 1 and isinstance( - func_type_ast.argtypes[0], ast3_Ellipsis + if ( + len(func_type_ast.argtypes) == 1 + and isinstance(func_type_ast.argtypes[0], Constant) + and func_type_ast.argtypes[0].value is Ellipsis ): if n.returns: # PEP 484 disallows both type annotations and type comments @@ -991,6 +941,14 @@ def do_func_def( arg_types = [AnyType(TypeOfAny.from_error)] * len(args) return_type = AnyType(TypeOfAny.from_error) else: + if sys.version_info >= (3, 12) and n.type_params: + self.fail( + ErrorMessage("PEP 695 generics are not yet supported", code=codes.VALID_TYPE), + n.type_params[0].lineno, + n.type_params[0].col_offset, + blocker=False, + ) + arg_types = [a.type_annotation for a in args] return_type = TypeConverter( self.errors, line=n.returns.lineno if n.returns else lineno @@ -1044,15 +1002,9 @@ def do_func_def( func_type.line = lineno if n.decorator_list: - if sys.version_info < (3, 8): - # Before 3.8, [typed_]ast the line number points to the first decorator. - # In 3.8, it points to the 'def' line, where we want it. - deco_line = lineno - lineno += len(n.decorator_list) # this is only approximately true - else: - # Set deco_line to the old pre-3.8 lineno, in order to keep - # existing "# type: ignore" comments working: - deco_line = n.decorator_list[0].lineno + # Set deco_line to the old pre-3.8 lineno, in order to keep + # existing "# type: ignore" comments working: + deco_line = n.decorator_list[0].lineno var = Var(func_def.name) var.is_ready = False @@ -1070,6 +1022,8 @@ def do_func_def( # FuncDef overrides set_line -- can't use self.set_line func_def.set_line(lineno, n.col_offset, end_line, end_column) retval = func_def + if self.options.include_docstrings: + func_def.docstring = ast3.get_docstring(n, clean=False) self.class_and_function_stack.pop() return retval @@ -1169,6 +1123,14 @@ def visit_ClassDef(self, n: ast3.ClassDef) -> ClassDef: self.class_and_function_stack.append("C") keywords = [(kw.arg, self.visit(kw.value)) for kw in n.keywords if kw.arg] + if sys.version_info >= (3, 12) and n.type_params: + self.fail( + ErrorMessage("PEP 695 generics are not yet supported", code=codes.VALID_TYPE), + n.type_params[0].lineno, + n.type_params[0].col_offset, + blocker=False, + ) + cdef = ClassDef( n.name, self.as_required_block(n.body), @@ -1180,12 +1142,11 @@ def visit_ClassDef(self, n: ast3.ClassDef) -> ClassDef: cdef.decorators = self.translate_expr_list(n.decorator_list) # Set lines to match the old mypy 0.700 lines, in order to keep # existing "# type: ignore" comments working: - if sys.version_info < (3, 8): - cdef.line = n.lineno + len(n.decorator_list) - cdef.deco_line = n.lineno - else: - cdef.line = n.lineno - cdef.deco_line = n.decorator_list[0].lineno if n.decorator_list else None + cdef.line = n.lineno + cdef.deco_line = n.decorator_list[0].lineno if n.decorator_list else None + + if self.options.include_docstrings: + cdef.docstring = ast3.get_docstring(n, clean=False) cdef.column = n.col_offset cdef.end_line = getattr(n, "end_lineno", None) cdef.end_column = getattr(n, "end_col_offset", None) @@ -1572,9 +1533,9 @@ def visit_Constant(self, n: Constant) -> Any: if val is None: e = NameExpr("None") elif isinstance(val, str): - e = StrExpr(n.s) + e = StrExpr(val) elif isinstance(val, bytes): - e = BytesExpr(bytes_to_human_readable_repr(n.s)) + e = BytesExpr(bytes_to_human_readable_repr(val)) elif isinstance(val, bool): # Must check before int! e = NameExpr(str(val)) elif isinstance(val, int): @@ -1589,28 +1550,6 @@ def visit_Constant(self, n: Constant) -> Any: raise RuntimeError("Constant not implemented for " + str(type(val))) return self.set_line(e, n) - # Num(object n) -- a number as a PyObject. - def visit_Num(self, n: ast3.Num) -> IntExpr | FloatExpr | ComplexExpr: - # The n field has the type complex, but complex isn't *really* - # a parent of int and float, and this causes isinstance below - # to think that the complex branch is always picked. Avoid - # this by throwing away the type. - val: object = n.n - if isinstance(val, int): - e: IntExpr | FloatExpr | ComplexExpr = IntExpr(val) - elif isinstance(val, float): - e = FloatExpr(val) - elif isinstance(val, complex): - e = ComplexExpr(val) - else: - raise RuntimeError("num not implemented for " + str(type(val))) - return self.set_line(e, n) - - # Str(string s) - def visit_Str(self, n: Str) -> StrExpr: - e = StrExpr(n.s) - return self.set_line(e, n) - # JoinedStr(expr* values) def visit_JoinedStr(self, n: ast3.JoinedStr) -> Expression: # Each of n.values is a str or FormattedValue; we just concatenate @@ -1622,6 +1561,12 @@ def visit_JoinedStr(self, n: ast3.JoinedStr) -> Expression: # Don't make unnecessary join call if there is only one str to join if len(strs_to_join.items) == 1: return self.set_line(strs_to_join.items[0], n) + elif len(strs_to_join.items) > 1: + last = strs_to_join.items[-1] + if isinstance(last, StrExpr) and last.value == "": + # 3.12 can add an empty literal at the end. Delete it for consistency + # between Python versions. + del strs_to_join.items[-1:] join_method = MemberExpr(empty_string, "join") join_method.set_line(empty_string) result_expression = CallExpr(join_method, [strs_to_join], [ARG_POS], [None]) @@ -1635,7 +1580,7 @@ def visit_FormattedValue(self, n: ast3.FormattedValue) -> Expression: # to allow mypyc to support f-strings with format specifiers and conversions. val_exp = self.visit(n.value) val_exp.set_line(n.lineno, n.col_offset) - conv_str = "" if n.conversion is None or n.conversion < 0 else "!" + chr(n.conversion) + conv_str = "" if n.conversion < 0 else "!" + chr(n.conversion) format_string = StrExpr("{" + conv_str + ":{}}") format_spec_exp = self.visit(n.format_spec) if n.format_spec is not None else StrExpr("") format_string.set_line(n.lineno, n.col_offset) @@ -1646,21 +1591,6 @@ def visit_FormattedValue(self, n: ast3.FormattedValue) -> Expression: ) return self.set_line(result_expression, n) - # Bytes(bytes s) - def visit_Bytes(self, n: ast3.Bytes) -> BytesExpr | StrExpr: - e = BytesExpr(bytes_to_human_readable_repr(n.s)) - return self.set_line(e, n) - - # NameConstant(singleton value) - def visit_NameConstant(self, n: NameConstant) -> NameExpr: - e = NameExpr(str(n.value)) - return self.set_line(e, n) - - # Ellipsis - def visit_Ellipsis(self, n: ast3_Ellipsis) -> EllipsisExpr: - e = EllipsisExpr() - return self.set_line(e, n) - # Attribute(expr value, identifier attr, expr_context ctx) def visit_Attribute(self, n: Attribute) -> MemberExpr | SuperExpr: value = n.value @@ -1808,6 +1738,16 @@ def visit_MatchOr(self, n: MatchOr) -> OrPattern: node = OrPattern([self.visit(pattern) for pattern in n.patterns]) return self.set_line(node, n) + def visit_TypeAlias(self, n: ast_TypeAlias) -> AssignmentStmt: + self.fail( + ErrorMessage("PEP 695 type aliases are not yet supported", code=codes.VALID_TYPE), + n.lineno, + n.col_offset, + blocker=False, + ) + node = AssignmentStmt([NameExpr(n.name.id)], self.visit(n.value)) + return self.set_line(node, n) + class TypeConverter: def __init__( @@ -1822,6 +1762,7 @@ def __init__( self.override_column = override_column self.node_stack: list[AST] = [] self.is_evaluated = is_evaluated + self.allow_unpack = False def convert_column(self, column: int) -> int: """Apply column override if defined; otherwise return column. @@ -1947,9 +1888,9 @@ def translate_argument_list(self, l: Sequence[ast3.expr]) -> TypeList: return TypeList([self.visit(e) for e in l], line=self.line) def _extract_argument_name(self, n: ast3.expr) -> str | None: - if isinstance(n, Str): - return n.s.strip() - elif isinstance(n, NameConstant) and str(n.value) == "None": + if isinstance(n, Constant) and isinstance(n.value, str): + return n.value.strip() + elif isinstance(n, Constant) and n.value is None: return None self.fail( message_registry.ARG_NAME_EXPECTED_STRING_LITERAL.format(type(n).__name__), @@ -1975,13 +1916,6 @@ def visit_BinOp(self, n: ast3.BinOp) -> Type: uses_pep604_syntax=True, ) - def visit_NameConstant(self, n: NameConstant) -> Type: - if isinstance(n.value, bool): - return RawExpressionType(n.value, "builtins.bool", line=self.line) - else: - return UnboundType(str(n.value), line=self.line, column=n.col_offset) - - # Only for 3.8 and newer def visit_Constant(self, n: Constant) -> Type: val = n.value if val is None: @@ -1989,7 +1923,7 @@ def visit_Constant(self, n: Constant) -> Type: return UnboundType("None", line=self.line) if isinstance(val, str): # Parse forward reference. - return parse_type_string(n.s, "builtins.str", self.line, n.col_offset) + return parse_type_string(val, "builtins.str", self.line, n.col_offset) if val is Ellipsis: # '...' is valid in some types. return EllipsisType(line=self.line) @@ -2033,23 +1967,6 @@ def numeric_type(self, value: object, n: AST) -> Type: numeric_value, type_name, line=self.line, column=getattr(n, "col_offset", -1) ) - # These next three methods are only used if we are on python < - # 3.8, using typed_ast. They are defined unconditionally because - # mypyc can't handle conditional method definitions. - - # Num(number n) - def visit_Num(self, n: Num) -> Type: - return self.numeric_type(n.n, n) - - # Str(string s) - def visit_Str(self, n: Str) -> Type: - return parse_type_string(n.s, "builtins.str", self.line, n.col_offset) - - # Bytes(bytes s) - def visit_Bytes(self, n: Bytes) -> Type: - contents = bytes_to_human_readable_repr(n.s) - return RawExpressionType(contents, "builtins.bytes", self.line, column=n.col_offset) - def visit_Index(self, n: ast3.Index) -> Type: # cast for mypyc's benefit on Python 3.9 value = self.visit(cast(Any, n).value) @@ -2078,10 +1995,10 @@ def visit_Subscript(self, n: ast3.Subscript) -> Type: for s in dims: if getattr(s, "col_offset", None) is None: if isinstance(s, ast3.Index): - s.col_offset = s.value.col_offset # type: ignore[attr-defined] + s.col_offset = s.value.col_offset elif isinstance(s, ast3.Slice): assert s.lower is not None - s.col_offset = s.lower.col_offset # type: ignore[attr-defined] + s.col_offset = s.lower.col_offset sliceval = ast3.Tuple(dims, n.ctx) empty_tuple_index = False @@ -2122,14 +2039,20 @@ def visit_Attribute(self, n: Attribute) -> Type: else: return self.invalid_type(n) - # Ellipsis - def visit_Ellipsis(self, n: ast3_Ellipsis) -> Type: - return EllipsisType(line=self.line) + # Used for Callable[[X *Ys, Z], R] + def visit_Starred(self, n: ast3.Starred) -> Type: + return UnpackType(self.visit(n.value)) # List(expr* elts, expr_context ctx) def visit_List(self, n: ast3.List) -> Type: assert isinstance(n.ctx, ast3.Load) - return self.translate_argument_list(n.elts) + old_allow_unpack = self.allow_unpack + # We specifically only allow starred expressions in a list to avoid + # confusing errors for top-level unpacks (e.g. in base classes). + self.allow_unpack = True + result = self.translate_argument_list(n.elts) + self.allow_unpack = old_allow_unpack + return result def stringify_name(n: AST) -> str | None: diff --git a/mypy/find_sources.py b/mypy/find_sources.py index a3ef2d3db052..3565fc4609cd 100644 --- a/mypy/find_sources.py +++ b/mypy/find_sources.py @@ -4,8 +4,7 @@ import functools import os -from typing import Sequence -from typing_extensions import Final +from typing import Final, Sequence from mypy.fscache import FileSystemCache from mypy.modulefinder import PYTHON_EXTENSIONS, BuildSource, matches_exclude, mypy_path @@ -160,7 +159,7 @@ def crawl_up(self, path: str) -> tuple[str, str]: def crawl_up_dir(self, dir: str) -> tuple[str, str]: return self._crawl_up_helper(dir) or ("", dir) - @functools.lru_cache() # noqa: B019 + @functools.lru_cache # noqa: B019 def _crawl_up_helper(self, dir: str) -> tuple[str, str] | None: """Given a directory, maybe returns module and base directory. diff --git a/mypy/fixup.py b/mypy/fixup.py index 15f4c13c20fa..2b2e1210ee4e 100644 --- a/mypy/fixup.py +++ b/mypy/fixup.py @@ -2,8 +2,7 @@ from __future__ import annotations -from typing import Any -from typing_extensions import Final +from typing import Any, Final from mypy.lookup import lookup_fully_qualified from mypy.nodes import ( diff --git a/mypy/graph_utils.py b/mypy/graph_utils.py new file mode 100644 index 000000000000..399301a6b0fd --- /dev/null +++ b/mypy/graph_utils.py @@ -0,0 +1,112 @@ +"""Helpers for manipulations with graphs.""" + +from __future__ import annotations + +from typing import AbstractSet, Iterable, Iterator, TypeVar + +T = TypeVar("T") + + +def strongly_connected_components( + vertices: AbstractSet[T], edges: dict[T, list[T]] +) -> Iterator[set[T]]: + """Compute Strongly Connected Components of a directed graph. + + Args: + vertices: the labels for the vertices + edges: for each vertex, gives the target vertices of its outgoing edges + + Returns: + An iterator yielding strongly connected components, each + represented as a set of vertices. Each input vertex will occur + exactly once; vertices not part of a SCC are returned as + singleton sets. + + From https://code.activestate.com/recipes/578507/. + """ + identified: set[T] = set() + stack: list[T] = [] + index: dict[T, int] = {} + boundaries: list[int] = [] + + def dfs(v: T) -> Iterator[set[T]]: + index[v] = len(stack) + stack.append(v) + boundaries.append(index[v]) + + for w in edges[v]: + if w not in index: + yield from dfs(w) + elif w not in identified: + while index[w] < boundaries[-1]: + boundaries.pop() + + if boundaries[-1] == index[v]: + boundaries.pop() + scc = set(stack[index[v] :]) + del stack[index[v] :] + identified.update(scc) + yield scc + + for v in vertices: + if v not in index: + yield from dfs(v) + + +def prepare_sccs( + sccs: list[set[T]], edges: dict[T, list[T]] +) -> dict[AbstractSet[T], set[AbstractSet[T]]]: + """Use original edges to organize SCCs in a graph by dependencies between them.""" + sccsmap = {v: frozenset(scc) for scc in sccs for v in scc} + data: dict[AbstractSet[T], set[AbstractSet[T]]] = {} + for scc in sccs: + deps: set[AbstractSet[T]] = set() + for v in scc: + deps.update(sccsmap[x] for x in edges[v]) + data[frozenset(scc)] = deps + return data + + +def topsort(data: dict[T, set[T]]) -> Iterable[set[T]]: + """Topological sort. + + Args: + data: A map from vertices to all vertices that it has an edge + connecting it to. NOTE: This data structure + is modified in place -- for normalization purposes, + self-dependencies are removed and entries representing + orphans are added. + + Returns: + An iterator yielding sets of vertices that have an equivalent + ordering. + + Example: + Suppose the input has the following structure: + + {A: {B, C}, B: {D}, C: {D}} + + This is normalized to: + + {A: {B, C}, B: {D}, C: {D}, D: {}} + + The algorithm will yield the following values: + + {D} + {B, C} + {A} + + From https://code.activestate.com/recipes/577413/. + """ + # TODO: Use a faster algorithm? + for k, v in data.items(): + v.discard(k) # Ignore self dependencies. + for item in set.union(*data.values()) - set(data.keys()): + data[item] = set() + while True: + ready = {item for item, dep in data.items() if not dep} + if not ready: + break + yield ready + data = {item: (dep - ready) for item, dep in data.items() if item not in ready} + assert not data, f"A cyclic dependency exists amongst {data!r}" diff --git a/mypy/infer.py b/mypy/infer.py index fbec3d7c4278..ba4a1d2bc9b1 100644 --- a/mypy/infer.py +++ b/mypy/infer.py @@ -12,7 +12,7 @@ ) from mypy.nodes import ArgKind from mypy.solve import solve_constraints -from mypy.types import CallableType, Instance, Type, TypeVarId +from mypy.types import CallableType, Instance, Type, TypeVarLikeType class ArgumentInferContext(NamedTuple): @@ -33,10 +33,12 @@ def infer_function_type_arguments( callee_type: CallableType, arg_types: Sequence[Type | None], arg_kinds: list[ArgKind], + arg_names: Sequence[str | None] | None, formal_to_actual: list[list[int]], context: ArgumentInferContext, strict: bool = True, -) -> list[Type | None]: + allow_polymorphic: bool = False, +) -> tuple[list[Type | None], list[TypeVarLikeType]]: """Infer the type arguments of a generic function. Return an array of lower bound types for the type variables -1 (at @@ -52,18 +54,18 @@ def infer_function_type_arguments( """ # Infer constraints. constraints = infer_constraints_for_callable( - callee_type, arg_types, arg_kinds, formal_to_actual, context + callee_type, arg_types, arg_kinds, arg_names, formal_to_actual, context ) # Solve constraints. - type_vars = callee_type.type_var_ids() - return solve_constraints(type_vars, constraints, strict) + type_vars = callee_type.variables + return solve_constraints(type_vars, constraints, strict, allow_polymorphic) def infer_type_arguments( - type_var_ids: list[TypeVarId], template: Type, actual: Type, is_supertype: bool = False + type_vars: Sequence[TypeVarLikeType], template: Type, actual: Type, is_supertype: bool = False ) -> list[Type | None]: # Like infer_function_type_arguments, but only match a single type # against a generic type. constraints = infer_constraints(template, actual, SUPERTYPE_OF if is_supertype else SUBTYPE_OF) - return solve_constraints(type_var_ids, constraints) + return solve_constraints(type_vars, constraints)[0] diff --git a/mypy/ipc.py b/mypy/ipc.py index 21ef61918de5..d026f2429a0f 100644 --- a/mypy/ipc.py +++ b/mypy/ipc.py @@ -12,8 +12,7 @@ import sys import tempfile from types import TracebackType -from typing import Callable -from typing_extensions import Final +from typing import Callable, Final if sys.platform == "win32": # This may be private, but it is needed for IPC on Windows, and is basically stable diff --git a/mypy/join.py b/mypy/join.py index 62d256f4440f..806c644a680c 100644 --- a/mypy/join.py +++ b/mypy/join.py @@ -29,7 +29,6 @@ Parameters, ParamSpecType, PartialType, - PlaceholderType, ProperType, TupleType, Type, @@ -246,14 +245,6 @@ def join_types(s: Type, t: Type, instance_joiner: InstanceJoiner | None = None) if isinstance(s, UninhabitedType) and not isinstance(t, UninhabitedType): s, t = t, s - # We shouldn't run into PlaceholderTypes here, but in practice we can encounter them - # here in the presence of undefined names - if isinstance(t, PlaceholderType) and not isinstance(s, PlaceholderType): - # mypyc does not allow switching the values like above. - return s.accept(TypeJoinVisitor(t)) - elif isinstance(t, PlaceholderType): - return AnyType(TypeOfAny.from_error) - # Meets/joins require callable type normalization. s, t = normalize_callables(s, t) @@ -324,8 +315,14 @@ def visit_unpack_type(self, t: UnpackType) -> UnpackType: raise NotImplementedError def visit_parameters(self, t: Parameters) -> ProperType: - if self.s == t: - return t + if isinstance(self.s, Parameters): + if len(t.arg_types) != len(self.s.arg_types): + return self.default(self.s) + return t.copy_modified( + # Note that since during constraint inference we already treat whole ParamSpec as + # contravariant, we should join individual items, not meet them like for Callables + arg_types=[join_types(s_a, t_a) for s_a, t_a in zip(self.s.arg_types, t.arg_types)] + ) else: return self.default(self.s) diff --git a/mypy/literals.py b/mypy/literals.py index 53ba559c56bb..cba5712644be 100644 --- a/mypy/literals.py +++ b/mypy/literals.py @@ -1,7 +1,7 @@ from __future__ import annotations -from typing import Any, Iterable, Optional, Tuple -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Final, Iterable, Optional, Tuple +from typing_extensions import TypeAlias as _TypeAlias from mypy.nodes import ( LITERAL_NO, diff --git a/mypy/main.py b/mypy/main.py index 81a0a045745b..30f6cfe97455 100644 --- a/mypy/main.py +++ b/mypy/main.py @@ -8,11 +8,15 @@ import sys import time from gettext import gettext -from typing import IO, Any, NoReturn, Sequence, TextIO -from typing_extensions import Final +from typing import IO, Any, Final, NoReturn, Sequence, TextIO from mypy import build, defaults, state, util -from mypy.config_parser import get_config_module_names, parse_config_file, parse_version +from mypy.config_parser import ( + get_config_module_names, + parse_config_file, + parse_version, + validate_package_allow_list, +) from mypy.errorcodes import error_codes from mypy.errors import CompileError from mypy.find_sources import InvalidSourceList, create_source_list @@ -595,14 +599,6 @@ def add_invertible_flag( help="Type check code assuming it will be running on Python x.y", dest="special-opts:python_version", ) - platform_group.add_argument( - "-2", - "--py2", - dest="special-opts:python_version", - action="https://github.com/store_const", - const=defaults.PYTHON2_VERSION, - help="Use Python 2 mode (same as --python-version 2.7)", - ) platform_group.add_argument( "--platform", action="https://github.com/store", @@ -684,6 +680,14 @@ def add_invertible_flag( " from functions with type annotations", group=untyped_group, ) + untyped_group.add_argument( + "--untyped-calls-exclude", + metavar="MODULE", + action="https://github.com/append", + default=[], + help="Disable --disallow-untyped-calls for functions/methods coming" + " from specific package, module, or class", + ) add_invertible_flag( "--disallow-untyped-defs", default=False, @@ -826,10 +830,12 @@ def add_invertible_flag( ) add_invertible_flag( - "--strict-concatenate", + "--extra-checks", default=False, strict_flag=True, - help="Make arguments prepended via Concatenate be truly positional-only", + help="Enable additional checks that are technically correct but may be impractical " + "in real code. For example, this prohibits partial overlap in TypedDict updates, " + "and makes arguments prepended via Concatenate positional-only", group=strictness_group, ) @@ -885,6 +891,12 @@ def add_invertible_flag( help="Hide error codes in error messages", group=error_group, ) + add_invertible_flag( + "--show-error-code-links", + default=False, + help="Show links to error code documentation", + group=error_group, + ) add_invertible_flag( "--pretty", default=False, @@ -983,6 +995,11 @@ def add_invertible_flag( dest="custom_typing_module", help="Use a custom typing module", ) + internals_group.add_argument( + "--new-type-inference", + action="https://github.com/store_true", + help="Enable new experimental type inference algorithm", + ) internals_group.add_argument( "--disable-recursive-aliases", action="https://github.com/store_true", @@ -1150,6 +1167,8 @@ def add_invertible_flag( parser.add_argument( "--disable-memoryview-promotion", action="https://github.com/store_true", help=argparse.SUPPRESS ) + # This flag is deprecated, it has been moved to --extra-checks + parser.add_argument("--strict-concatenate", action="https://github.com/store_true", help=argparse.SUPPRESS) # options specifying code to check code_group = parser.add_argument_group( @@ -1221,8 +1240,11 @@ def add_invertible_flag( parser.error(f"Cannot find config file '{config_file}'") options = Options() + strict_option_set = False def set_strict_flags() -> None: + nonlocal strict_option_set + strict_option_set = True for dest, value in strict_flag_assignments: setattr(options, dest, value) @@ -1298,6 +1320,8 @@ def set_strict_flags() -> None: % ", ".join(sorted(overlap)) ) + validate_package_allow_list(options.untyped_calls_exclude) + # Process `--enable-error-code` and `--disable-error-code` flags disabled_codes = set(options.disable_error_code) enabled_codes = set(options.enable_error_code) @@ -1331,12 +1355,12 @@ def set_strict_flags() -> None: # Set build flags. if special_opts.find_occurrences: - state.find_occurrences = special_opts.find_occurrences.split(".") - assert state.find_occurrences is not None - if len(state.find_occurrences) < 2: + _find_occurrences = tuple(special_opts.find_occurrences.split(".")) + if len(_find_occurrences) < 2: parser.error("Can only find occurrences of class members.") - if len(state.find_occurrences) != 2: + if len(_find_occurrences) != 2: parser.error("Can only find occurrences of non-nested class members.") + state.find_occurrences = _find_occurrences # type: ignore[assignment] # Set reports. for flag, val in vars(special_opts).items(): @@ -1374,6 +1398,8 @@ def set_strict_flags() -> None: "Warning: --enable-recursive-aliases is deprecated;" " recursive types are enabled by default" ) + if options.strict_concatenate and not strict_option_set: + print("Warning: --strict-concatenate is deprecated; use --extra-checks instead") # Set target. if special_opts.modules + special_opts.packages: diff --git a/mypy/maptype.py b/mypy/maptype.py index cae904469fed..4951306573c2 100644 --- a/mypy/maptype.py +++ b/mypy/maptype.py @@ -113,6 +113,5 @@ def instance_to_type_environment(instance: Instance) -> dict[TypeVarId, Type]: required number of type arguments. So this environment consists of the class's type variables mapped to the Instance's actual arguments. The type variables are mapped by their `id`. - """ return {binder.id: arg for binder, arg in zip(instance.type.defn.type_vars, instance.args)} diff --git a/mypy/meet.py b/mypy/meet.py index 29c4d3663503..e3a22a226575 100644 --- a/mypy/meet.py +++ b/mypy/meet.py @@ -701,11 +701,12 @@ def visit_unpack_type(self, t: UnpackType) -> ProperType: raise NotImplementedError def visit_parameters(self, t: Parameters) -> ProperType: - # TODO: is this the right variance? - if isinstance(self.s, (Parameters, CallableType)): + if isinstance(self.s, Parameters): if len(t.arg_types) != len(self.s.arg_types): return self.default(self.s) return t.copy_modified( + # Note that since during constraint inference we already treat whole ParamSpec as + # contravariant, we should meet individual items, not join them like for Callables arg_types=[meet_types(s_a, t_a) for s_a, t_a in zip(self.s.arg_types, t.arg_types)] ) else: diff --git a/mypy/message_registry.py b/mypy/message_registry.py index c5164d48fd13..713ec2e3c759 100644 --- a/mypy/message_registry.py +++ b/mypy/message_registry.py @@ -8,8 +8,7 @@ from __future__ import annotations -from typing import NamedTuple -from typing_extensions import Final +from typing import Final, NamedTuple from mypy import errorcodes as codes @@ -172,7 +171,8 @@ def with_additional_msg(self, info: str) -> ErrorMessage: IMPLICIT_GENERIC_ANY_BUILTIN: Final = ( 'Implicit generic "Any". Use "{}" and specify generic parameters' ) -INVALID_UNPACK = "{} cannot be unpacked (must be tuple or TypeVarTuple)" +INVALID_UNPACK: Final = "{} cannot be unpacked (must be tuple or TypeVarTuple)" +INVALID_UNPACK_POSITION: Final = "Unpack is only valid in a variadic position" # TypeVar INCOMPATIBLE_TYPEVAR_VALUE: Final = 'Value of type variable "{}" of {} cannot be {}' @@ -277,6 +277,7 @@ def with_additional_msg(self, info: str) -> ErrorMessage: DATACLASS_FIELD_ALIAS_MUST_BE_LITERAL: Final = ( '"alias" argument to dataclass field must be a string literal' ) +DATACLASS_POST_INIT_MUST_BE_A_FUNCTION: Final = '"__post_init__" method must be an instance method' # fastparse FAILED_TO_MERGE_OVERLOADS: Final = ErrorMessage( diff --git a/mypy/messages.py b/mypy/messages.py index a732d612123c..aab30ee29108 100644 --- a/mypy/messages.py +++ b/mypy/messages.py @@ -16,8 +16,7 @@ import re from contextlib import contextmanager from textwrap import dedent -from typing import Any, Callable, Collection, Iterable, Iterator, List, Sequence, cast -from typing_extensions import Final +from typing import Any, Callable, Collection, Final, Iterable, Iterator, List, Sequence, cast import mypy.typeops from mypy import errorcodes as codes, message_registry @@ -235,6 +234,8 @@ def span_from_context(ctx: Context) -> Iterable[int]: Current logic is a bit tricky, to keep as much backwards compatibility as possible. We may reconsider this to always be a single line (or otherwise simplify it) when we drop Python 3.7. + + TODO: address this in follow up PR """ if isinstance(ctx, (ClassDef, FuncDef)): return range(ctx.deco_line or ctx.line, ctx.line + 1) @@ -1253,18 +1254,21 @@ def argument_incompatible_with_supertype( code=codes.OVERRIDE, secondary_context=secondary_context, ) - self.note( - "This violates the Liskov substitution principle", - context, - code=codes.OVERRIDE, - secondary_context=secondary_context, - ) - self.note( - "See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides", - context, - code=codes.OVERRIDE, - secondary_context=secondary_context, - ) + if name != "__post_init__": + # `__post_init__` is special, it can be incompatible by design. + # So, this note is misleading. + self.note( + "This violates the Liskov substitution principle", + context, + code=codes.OVERRIDE, + secondary_context=secondary_context, + ) + self.note( + "See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides", + context, + code=codes.OVERRIDE, + secondary_context=secondary_context, + ) if name == "__eq__" and type_name: multiline_msg = self.comparison_method_example_msg(class_name=type_name) @@ -1521,6 +1525,16 @@ def no_overridable_method(self, name: str, context: Context) -> None: context, ) + def explicit_override_decorator_missing( + self, name: str, base_name: str, context: Context + ) -> None: + self.fail( + f'Method "{name}" is not using @override ' + f'but is overriding a method in class "{base_name}"', + context, + code=codes.EXPLICIT_OVERRIDE_REQUIRED, + ) + def final_cant_override_writable(self, name: str, ctx: Context) -> None: self.fail(f'Cannot override writable attribute "{name}" with a final one', ctx) @@ -1714,7 +1728,6 @@ def need_annotation_for_var( self, node: SymbolNode, context: Context, python_version: tuple[int, int] | None = None ) -> None: hint = "" - has_variable_annotations = not python_version or python_version >= (3, 6) pep604_supported = not python_version or python_version >= (3, 10) # type to recommend the user adds recommended_type = None @@ -1735,18 +1748,10 @@ def need_annotation_for_var( type_dec = f"{type_dec}, {type_dec}" recommended_type = f"{alias}[{type_dec}]" if recommended_type is not None: - if has_variable_annotations: - hint = f' (hint: "{node.name}: {recommended_type} = ...")' - else: - hint = f' (hint: "{node.name} = ... # type: {recommended_type}")' - - if has_variable_annotations: - needed = "annotation" - else: - needed = "comment" + hint = f' (hint: "{node.name}: {recommended_type} = ...")' self.fail( - f'Need type {needed} for "{unmangle(node.name)}"{hint}', + f'Need type annotation for "{unmangle(node.name)}"{hint}', context, code=codes.VAR_ANNOTATED, ) @@ -1754,6 +1759,24 @@ def need_annotation_for_var( def explicit_any(self, ctx: Context) -> None: self.fail('Explicit "Any" is not allowed', ctx) + def unsupported_target_for_star_typeddict(self, typ: Type, ctx: Context) -> None: + self.fail( + "Unsupported type {} for ** expansion in TypedDict".format( + format_type(typ, self.options) + ), + ctx, + code=codes.TYPEDDICT_ITEM, + ) + + def non_required_keys_absent_with_star(self, keys: list[str], ctx: Context) -> None: + self.fail( + "Non-required {} not explicitly found in any ** item".format( + format_key_list(keys, short=True) + ), + ctx, + code=codes.TYPEDDICT_ITEM, + ) + def unexpected_typeddict_keys( self, typ: TypedDictType, @@ -2093,9 +2116,11 @@ def report_protocol_problems( return # Report member type conflicts - conflict_types = get_conflict_protocol_types(subtype, supertype, class_obj=class_obj) + conflict_types = get_conflict_protocol_types( + subtype, supertype, class_obj=class_obj, options=self.options + ) if conflict_types and ( - not is_subtype(subtype, erase_type(supertype)) + not is_subtype(subtype, erase_type(supertype), options=self.options) or not subtype.type.defn.type_vars or not supertype.type.defn.type_vars ): @@ -2437,7 +2462,7 @@ def format_literal_value(typ: LiteralType) -> str: if isinstance(typ, Instance): itype = typ # Get the short name of the type. - if itype.type.fullname in ("types.ModuleType", "_importlib_modulespec.ModuleType"): + if itype.type.fullname == "types.ModuleType": # Make some common error messages simpler and tidier. base_str = "Module" if itype.extra_attrs and itype.extra_attrs.mod_name and module_names: @@ -2484,10 +2509,11 @@ def format_literal_value(typ: LiteralType) -> str: # Prefer the name of the fallback class (if not tuple), as it's more informative. if typ.partial_fallback.type.fullname != "builtins.tuple": return format(typ.partial_fallback) + type_items = format_list(typ.items) or "()" if options.use_lowercase_names(): - s = f"tuple[{format_list(typ.items)}]" + s = f"tuple[{type_items}]" else: - s = f"Tuple[{format_list(typ.items)}]" + s = f"Tuple[{type_items}]" return s elif isinstance(typ, TypedDictType): # If the TypedDictType is named, return the name @@ -2756,7 +2782,11 @@ def [T <: int] f(self, x: int, y: T) -> None slash = True # If we got a "special arg" (i.e: self, cls, etc...), prepend it to the arg list - if isinstance(tp.definition, FuncDef) and hasattr(tp.definition, "arguments"): + if ( + isinstance(tp.definition, FuncDef) + and hasattr(tp.definition, "arguments") + and not tp.from_concatenate + ): definition_arg_names = [arg.variable.name for arg in tp.definition.arguments] if ( len(definition_arg_names) > len(tp.arg_names) @@ -2833,7 +2863,7 @@ def get_missing_protocol_members(left: Instance, right: Instance, skip: list[str def get_conflict_protocol_types( - left: Instance, right: Instance, class_obj: bool = False + left: Instance, right: Instance, class_obj: bool = False, options: Options | None = None ) -> list[tuple[str, Type, Type]]: """Find members that are defined in 'left' but have incompatible types. Return them as a list of ('member', 'got', 'expected'). @@ -2848,9 +2878,9 @@ def get_conflict_protocol_types( subtype = mypy.typeops.get_protocol_member(left, member, class_obj) if not subtype: continue - is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=True) + is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=True, options=options) if IS_SETTABLE in get_member_flags(member, right): - is_compat = is_compat and is_subtype(supertype, subtype) + is_compat = is_compat and is_subtype(supertype, subtype, options=options) if not is_compat: conflicts.append((member, subtype, supertype)) return conflicts @@ -2989,8 +3019,9 @@ def _real_quick_ratio(a: str, b: str) -> float: def best_matches(current: str, options: Collection[str], n: int) -> list[str]: + if not current: + return [] # narrow down options cheaply - assert current options = [o for o in options if _real_quick_ratio(current, o) > 0.75] if len(options) >= 50: options = [o for o in options if abs(len(o) - len(current)) <= 1] diff --git a/mypy/mixedtraverser.py b/mypy/mixedtraverser.py index 771f87fc6bd6..dfde41859c67 100644 --- a/mypy/mixedtraverser.py +++ b/mypy/mixedtraverser.py @@ -49,7 +49,7 @@ def visit_class_def(self, o: ClassDef) -> None: def visit_type_alias_expr(self, o: TypeAliasExpr) -> None: super().visit_type_alias_expr(o) self.in_type_alias_expr = True - o.type.accept(self) + o.node.target.accept(self) self.in_type_alias_expr = False def visit_type_var_expr(self, o: TypeVarExpr) -> None: diff --git a/mypy/modulefinder.py b/mypy/modulefinder.py index e0406bffcc7b..c36a382848bf 100644 --- a/mypy/modulefinder.py +++ b/mypy/modulefinder.py @@ -21,8 +21,8 @@ else: import tomli as tomllib -from typing import Dict, List, NamedTuple, Optional, Tuple, Union -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Dict, Final, List, NamedTuple, Optional, Tuple, Union +from typing_extensions import TypeAlias as _TypeAlias from mypy import pyinfo from mypy.fscache import FileSystemCache @@ -337,14 +337,9 @@ def _find_module_non_stub_helper( # If this is not a directory then we can't traverse further into it if not self.fscache.isdir(dir_path): break - if approved_stub_package_exists(components[0]): - if len(components) == 1 or ( - self.find_module(components[0]) - is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED - ): + for i in range(len(components), 0, -1): + if approved_stub_package_exists(".".join(components[:i])): return ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED - if approved_stub_package_exists(".".join(components[:2])): - return ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED if plausible_match: return ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS else: diff --git a/mypy/nodes.py b/mypy/nodes.py index 8457e39b6aa1..9b4ba5e76667 100644 --- a/mypy/nodes.py +++ b/mypy/nodes.py @@ -11,6 +11,7 @@ Any, Callable, Dict, + Final, Iterator, List, Optional, @@ -20,7 +21,7 @@ Union, cast, ) -from typing_extensions import Final, TypeAlias as _TypeAlias, TypeGuard +from typing_extensions import TypeAlias as _TypeAlias, TypeGuard from mypy_extensions import trait @@ -202,7 +203,7 @@ def str_with_options(self, options: Options) -> str: return ans def accept(self, visitor: NodeVisitor[T]) -> T: - raise RuntimeError("Not implemented") + raise RuntimeError("Not implemented", type(self)) @trait @@ -212,7 +213,7 @@ class Statement(Node): __slots__ = () def accept(self, visitor: StatementVisitor[T]) -> T: - raise RuntimeError("Not implemented") + raise RuntimeError("Not implemented", type(self)) @trait @@ -222,7 +223,7 @@ class Expression(Node): __slots__ = () def accept(self, visitor: ExpressionVisitor[T]) -> T: - raise RuntimeError("Not implemented") + raise RuntimeError("Not implemented", type(self)) class FakeExpression(Expression): @@ -287,7 +288,7 @@ class MypyFile(SymbolNode): "names", "imports", "ignored_lines", - "unreachable_lines", + "skipped_lines", "is_stub", "is_cache_skeleton", "is_partial_stub_package", @@ -314,8 +315,9 @@ class MypyFile(SymbolNode): # If the value is empty, ignore all errors; otherwise, the list contains all # error codes to ignore. ignored_lines: dict[int, list[str]] - # Lines that are statically unreachable (e.g. due to platform/version check). - unreachable_lines: set[int] + # Lines that were skipped during semantic analysis e.g. due to ALWAYS_FALSE, MYPY_FALSE, + # or platform/version checks. Those lines would not be type-checked. + skipped_lines: set[int] # Is this file represented by a stub file (.pyi)? is_stub: bool # Is this loaded from the cache and thus missing the actual body of the file? @@ -348,7 +350,7 @@ def __init__( self.ignored_lines = ignored_lines else: self.ignored_lines = {} - self.unreachable_lines = set() + self.skipped_lines = set() self.path = "" self.is_stub = False @@ -510,7 +512,7 @@ class FuncBase(Node): "info", "is_property", "is_class", # Uses "@classmethod" (explicit or implicit) - "is_static", # Uses "@staticmethod" + "is_static", # Uses "@staticmethod" (explicit or implicit) "is_final", # Uses "@final" "is_explicit_override", # Uses "@override" "_fullname", @@ -749,6 +751,7 @@ class FuncDef(FuncItem, SymbolNode, Statement): "is_mypy_only", # Present only when a function is decorated with @typing.datasclass_transform or similar "dataclass_transform_spec", + "docstring", ) __match_args__ = ("name", "arguments", "type", "body") @@ -777,6 +780,7 @@ def __init__( # Definitions that appear in if TYPE_CHECKING are marked with this flag. self.is_mypy_only = False self.dataclass_transform_spec: DataclassTransformSpec | None = None + self.docstring: str | None = None @property def name(self) -> str: @@ -1001,7 +1005,7 @@ def __init__(self, name: str, type: mypy.types.Type | None = None) -> None: # If constant value is a simple literal, # store the literal value (unboxed) for the benefit of # tools like mypyc. - self.final_value: int | float | bool | str | None = None + self.final_value: int | float | complex | bool | str | None = None # Where the value was set (only for class attributes) self.final_unset_in_class = False self.final_set_in_init = False @@ -1079,6 +1083,7 @@ class ClassDef(Statement): "analyzed", "has_incompatible_baseclass", "deco_line", + "docstring", "removed_statements", ) @@ -1125,6 +1130,7 @@ def __init__( self.has_incompatible_baseclass = False # Used for error reporting (to keep backwad compatibility with pre-3.8) self.deco_line: int | None = None + self.docstring: str | None = None self.removed_statements = [] @property @@ -2493,7 +2499,7 @@ class TypeVarExpr(TypeVarLikeExpr): __slots__ = ("values",) - __match_args__ = ("name", "values", "upper_bound") + __match_args__ = ("name", "values", "upper_bound", "default") # Value restriction: only types in the list are valid as values. If the # list is empty, there is no restriction. @@ -2541,7 +2547,7 @@ def deserialize(cls, data: JsonDict) -> TypeVarExpr: class ParamSpecExpr(TypeVarLikeExpr): __slots__ = () - __match_args__ = ("name", "upper_bound") + __match_args__ = ("name", "upper_bound", "default") def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_paramspec_expr(self) @@ -2575,7 +2581,7 @@ class TypeVarTupleExpr(TypeVarLikeExpr): tuple_fallback: mypy.types.Instance - __match_args__ = ("name", "upper_bound") + __match_args__ = ("name", "upper_bound", "default") def __init__( self, @@ -2619,27 +2625,14 @@ def deserialize(cls, data: JsonDict) -> TypeVarTupleExpr: class TypeAliasExpr(Expression): """Type alias expression (rvalue).""" - __slots__ = ("type", "tvars", "no_args", "node") + __slots__ = ("node",) - __match_args__ = ("type", "tvars", "no_args", "node") + __match_args__ = ("node",) - # The target type. - type: mypy.types.Type - # Names of type variables used to define the alias - tvars: list[str] - # Whether this alias was defined in bare form. Used to distinguish - # between - # A = List - # and - # A = List[Any] - no_args: bool node: TypeAlias def __init__(self, node: TypeAlias) -> None: super().__init__() - self.type = node.target - self.tvars = [v.name for v in node.alias_tvars] - self.no_args = node.no_args self.node = node def accept(self, visitor: ExpressionVisitor[T]) -> T: @@ -2801,6 +2794,25 @@ def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_temp_node(self) +# Special attributes not collected as protocol members by Python 3.12 +# See typing._SPECIAL_NAMES +EXCLUDED_PROTOCOL_ATTRIBUTES: Final = frozenset( + { + "__abstractmethods__", + "__annotations__", + "__dict__", + "__doc__", + "__init__", + "__module__", + "__new__", + "__slots__", + "__subclasshook__", + "__weakref__", + "__class_getitem__", # Since Python 3.9 + } +) + + class TypeInfo(SymbolNode): """The type structure of a single class. @@ -3115,6 +3127,8 @@ def protocol_members(self) -> list[str]: if isinstance(node.node, (TypeAlias, TypeVarExpr, MypyFile)): # These are auxiliary definitions (and type aliases are prohibited). continue + if name in EXCLUDED_PROTOCOL_ATTRIBUTES: + continue members.add(name) return sorted(list(members)) @@ -3532,7 +3546,12 @@ def from_tuple_type(cls, info: TypeInfo) -> TypeAlias: assert info.tuple_type # TODO: is it possible to refactor this to set the correct type vars here? return TypeAlias( - info.tuple_type.copy_modified(fallback=mypy.types.Instance(info, info.defn.type_vars)), + info.tuple_type.copy_modified( + # Create an Instance similar to fill_typevars(). + fallback=mypy.types.Instance( + info, mypy.types.type_vars_as_args(info.defn.type_vars) + ) + ), info.fullname, info.line, info.column, @@ -3549,7 +3568,10 @@ def from_typeddict_type(cls, info: TypeInfo) -> TypeAlias: # TODO: is it possible to refactor this to set the correct type vars here? return TypeAlias( info.typeddict_type.copy_modified( - fallback=mypy.types.Instance(info, info.defn.type_vars) + # Create an Instance similar to fill_typevars(). + fallback=mypy.types.Instance( + info, mypy.types.type_vars_as_args(info.defn.type_vars) + ) ), info.fullname, info.line, diff --git a/mypy/operators.py b/mypy/operators.py index 2b383ef199bb..07ec5a24fa77 100644 --- a/mypy/operators.py +++ b/mypy/operators.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing_extensions import Final +from typing import Final # Map from binary operator id to related method name (in Python 3). op_methods: Final = { diff --git a/mypy/options.py b/mypy/options.py index 45591597ba69..007ae0a78aa1 100644 --- a/mypy/options.py +++ b/mypy/options.py @@ -4,8 +4,7 @@ import re import sys import sysconfig -from typing import Any, Callable, Dict, Mapping, Pattern -from typing_extensions import Final +from typing import Any, Callable, Final, Mapping, Pattern from mypy import defaults from mypy.errorcodes import ErrorCode, error_codes @@ -40,6 +39,7 @@ class BuildType: "disallow_untyped_defs", "enable_error_code", "enabled_error_codes", + "extra_checks", "follow_imports_for_stubs", "follow_imports", "ignore_errors", @@ -62,6 +62,7 @@ class BuildType: | { "platform", "bazel", + "new_type_inference", "plugins", "disable_bytearray_promotion", "disable_memoryview_promotion", @@ -136,6 +137,10 @@ def __init__(self) -> None: # Disallow calling untyped functions from typed ones self.disallow_untyped_calls = False + # Always allow untyped calls for function coming from modules/packages + # in this list (each item effectively acts as a prefix match) + self.untyped_calls_exclude: list[str] = [] + # Disallow defining untyped (or incompletely typed) functions self.disallow_untyped_defs = False @@ -200,9 +205,12 @@ def __init__(self) -> None: # This makes 1 == '1', 1 in ['1'], and 1 is '1' errors. self.strict_equality = False - # Make arguments prepended via Concatenate be truly positional-only. + # Deprecated, use extra_checks instead. self.strict_concatenate = False + # Enable additional checks that are technically correct but impractical. + self.extra_checks = False + # Report an error for any branches inferred to be unreachable as a result of # type analysis. self.warn_unreachable = False @@ -276,6 +284,12 @@ def __init__(self) -> None: # mypy. (Like mypyc.) self.preserve_asts = False + # If True, function and class docstrings will be extracted and retained. + # This isn't exposed as a command line option + # because it is intended for software integrating with + # mypy. (Like stubgen.) + self.include_docstrings = False + # Paths of user plugins self.plugins: list[str] = [] @@ -309,6 +323,7 @@ def __init__(self) -> None: self.show_column_numbers: bool = False self.show_error_end: bool = False self.hide_error_codes = False + self.show_error_code_links = False # Use soft word wrap and show trimmed source snippets with error location markers. self.pretty = False self.dump_graph = False @@ -344,6 +359,8 @@ def __init__(self) -> None: # skip most errors after this many messages have been reported. # -1 means unlimited. self.many_errors_threshold = defaults.MANY_ERRORS_THRESHOLD + # Enable new experimental type inference algorithm. + self.new_type_inference = False # Disable recursive type aliases (currently experimental) self.disable_recursive_aliases = False # Deprecated reverse version of the above, do not use. @@ -373,7 +390,7 @@ def use_or_syntax(self) -> bool: def new_semantic_analyzer(self) -> bool: return True - def snapshot(self) -> object: + def snapshot(self) -> dict[str, object]: """Produce a comparable snapshot of this Option""" # Under mypyc, we don't have a __dict__, so we need to do worse things. d = dict(getattr(self, "__dict__", ())) @@ -388,6 +405,7 @@ def __repr__(self) -> str: return f"Options({pprint.pformat(self.snapshot())})" def apply_changes(self, changes: dict[str, object]) -> Options: + # Note: effects of this method *must* be idempotent. new_options = Options() # Under mypyc, we don't have a __dict__, so we need to do worse things. replace_object_state(new_options, self, copy_dict=True) @@ -413,6 +431,17 @@ def apply_changes(self, changes: dict[str, object]) -> Options: return new_options + def compare_stable(self, other_snapshot: dict[str, object]) -> bool: + """Compare options in a way that is stable for snapshot() -> apply_changes() roundtrip. + + This is needed because apply_changes() has non-trivial effects for some flags, so + Options().apply_changes(options.snapshot()) may result in a (slightly) different object. + """ + return ( + Options().apply_changes(self.snapshot()).snapshot() + == Options().apply_changes(other_snapshot).snapshot() + ) + def build_per_module_cache(self) -> None: self._per_module_cache = {} @@ -510,7 +539,7 @@ def compile_glob(self, s: str) -> Pattern[str]: return re.compile(expr + "\\Z") def select_options_affecting_cache(self) -> Mapping[str, object]: - result: Dict[str, object] = {} + result: dict[str, object] = {} for opt in OPTIONS_AFFECTING_CACHE: val = getattr(self, opt) if opt in ("disabled_error_codes", "enabled_error_codes"): diff --git a/mypy/patterns.py b/mypy/patterns.py index 32c27d2a5b3c..839864ef5879 100644 --- a/mypy/patterns.py +++ b/mypy/patterns.py @@ -19,7 +19,7 @@ class Pattern(Node): __slots__ = () def accept(self, visitor: PatternVisitor[T]) -> T: - raise RuntimeError("Not implemented") + raise RuntimeError("Not implemented", type(self)) class AsPattern(Pattern): diff --git a/mypy/plugin.py b/mypy/plugin.py index cf124b45d04f..4d62c2bd184b 100644 --- a/mypy/plugin.py +++ b/mypy/plugin.py @@ -250,6 +250,11 @@ def named_generic_type(self, name: str, args: list[Type]) -> Instance: """Construct an instance of a builtin type with given type arguments.""" raise NotImplementedError + @abstractmethod + def get_expression_type(self, node: Expression, type_context: Type | None = None) -> Type: + """Checks the type of the given expression.""" + raise NotImplementedError + @trait class SemanticAnalyzerPluginInterface: diff --git a/mypy/plugins/attrs.py b/mypy/plugins/attrs.py index afd9423d6820..d444c18852dd 100644 --- a/mypy/plugins/attrs.py +++ b/mypy/plugins/attrs.py @@ -4,12 +4,11 @@ from collections import defaultdict from functools import reduce -from typing import Iterable, List, Mapping, cast -from typing_extensions import Final, Literal +from typing import Final, Iterable, List, Mapping, cast +from typing_extensions import Literal import mypy.plugin # To avoid circular imports. from mypy.applytype import apply_generic_arguments -from mypy.checker import TypeChecker from mypy.errorcodes import LITERAL_REQ from mypy.expandtype import expand_type, expand_type_by_instance from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type @@ -295,6 +294,7 @@ def attr_class_maker_callback( ctx: mypy.plugin.ClassDefContext, auto_attribs_default: bool | None = False, frozen_default: bool = False, + slots_default: bool = False, ) -> bool: """Add necessary dunder methods to classes decorated with attr.s. @@ -315,7 +315,7 @@ def attr_class_maker_callback( init = _get_decorator_bool_argument(ctx, "init", True) frozen = _get_frozen(ctx, frozen_default) order = _determine_eq_order(ctx) - slots = _get_decorator_bool_argument(ctx, "slots", False) + slots = _get_decorator_bool_argument(ctx, "slots", slots_default) auto_attribs = _get_decorator_optional_bool_argument(ctx, "auto_attribs", auto_attribs_default) kw_only = _get_decorator_bool_argument(ctx, "kw_only", False) @@ -803,7 +803,7 @@ def _make_frozen(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) else: # This variable belongs to a super class so create new Var so we # can modify it. - var = Var(attribute.name, ctx.cls.info[attribute.name].type) + var = Var(attribute.name, attribute.init_type) var.info = ctx.cls.info var._fullname = f"{ctx.cls.info.fullname}.{var.name}" ctx.cls.info.names[var.name] = SymbolTableNode(MDEF, var) @@ -896,6 +896,13 @@ def _add_slots(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> # Unlike `@dataclasses.dataclass`, `__slots__` is rewritten here. ctx.cls.info.slots = {attr.name for attr in attributes} + # Also, inject `__slots__` attribute to class namespace: + slots_type = TupleType( + [ctx.api.named_type("builtins.str") for _ in attributes], + fallback=ctx.api.named_type("builtins.tuple"), + ) + add_attribute_to_class(api=ctx.api, cls=ctx.cls, name="__slots__", typ=slots_type) + def _add_match_args(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> None: if ( @@ -1048,13 +1055,7 @@ def evolve_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> Callabl return ctx.default_signature # leave it to the type checker to complain inst_arg = ctx.args[0][0] - - # - assert isinstance(ctx.api, TypeChecker) - inst_type = ctx.api.expr_checker.accept(inst_arg) - # - - inst_type = get_proper_type(inst_type) + inst_type = get_proper_type(ctx.api.get_expression_type(inst_arg)) inst_type_str = format_type_bare(inst_type, ctx.api.options) attr_types = _get_expanded_attr_types(ctx, inst_type, inst_type, inst_type) @@ -1074,14 +1075,10 @@ def evolve_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> Callabl def fields_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> CallableType: """Provide the signature for `attrs.fields`.""" - if not ctx.args or len(ctx.args) != 1 or not ctx.args[0] or not ctx.args[0][0]: + if len(ctx.args) != 1 or len(ctx.args[0]) != 1: return ctx.default_signature - # - assert isinstance(ctx.api, TypeChecker) - inst_type = ctx.api.expr_checker.accept(ctx.args[0][0]) - # - proper_type = get_proper_type(inst_type) + proper_type = get_proper_type(ctx.api.get_expression_type(ctx.args[0][0])) # fields(Any) -> Any, fields(type[Any]) -> Any if ( @@ -1098,7 +1095,7 @@ def fields_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> Callabl inner = get_proper_type(proper_type.upper_bound) if isinstance(inner, Instance): # We need to work arg_types to compensate for the attrs stubs. - arg_types = [inst_type] + arg_types = [proper_type] cls = inner.type elif isinstance(proper_type, CallableType): cls = proper_type.type_object() diff --git a/mypy/plugins/common.py b/mypy/plugins/common.py index f803387cde8b..55f2870cadb4 100644 --- a/mypy/plugins/common.py +++ b/mypy/plugins/common.py @@ -28,9 +28,7 @@ require_bool_literal_argument, set_callable_name, ) -from mypy.typeops import ( # noqa: F401 # Part of public API - try_getting_str_literals as try_getting_str_literals, -) +from mypy.typeops import try_getting_str_literals as try_getting_str_literals from mypy.types import ( AnyType, CallableType, @@ -45,7 +43,7 @@ deserialize_type, get_proper_type, ) -from mypy.types_utils import is_optional +from mypy.types_utils import is_overlapping_none from mypy.typevars import fill_typevars from mypy.util import get_unique_redefinition_name @@ -143,7 +141,7 @@ def find_shallow_matching_overload_item(overload: Overloaded, call: CallExpr) -> break elif ( arg_none - and not is_optional(arg_type) + and not is_overlapping_none(arg_type) and not ( isinstance(arg_type, Instance) and arg_type.type.fullname == "builtins.object" diff --git a/mypy/plugins/dataclasses.py b/mypy/plugins/dataclasses.py index 2fd903f2f8b9..d782acf50af5 100644 --- a/mypy/plugins/dataclasses.py +++ b/mypy/plugins/dataclasses.py @@ -2,11 +2,12 @@ from __future__ import annotations -from typing import Iterator, Optional -from typing_extensions import Final +from typing import TYPE_CHECKING, Final, Iterator, Literal from mypy import errorcodes, message_registry from mypy.expandtype import expand_type, expand_type_by_instance +from mypy.meet import meet_types +from mypy.messages import format_type_bare from mypy.nodes import ( ARG_NAMED, ARG_NAMED_OPT, @@ -24,6 +25,7 @@ DataclassTransformSpec, Expression, FuncDef, + FuncItem, IfStmt, JsonDict, NameExpr, @@ -38,7 +40,7 @@ TypeVarExpr, Var, ) -from mypy.plugin import ClassDefContext, SemanticAnalyzerPluginInterface +from mypy.plugin import ClassDefContext, FunctionSigContext, SemanticAnalyzerPluginInterface from mypy.plugins.common import ( _get_callee_type, _get_decorator_bool_argument, @@ -53,29 +55,38 @@ from mypy.types import ( AnyType, CallableType, + FunctionLike, Instance, LiteralType, NoneType, + ProperType, TupleType, Type, TypeOfAny, TypeVarType, + UninhabitedType, + UnionType, get_proper_type, ) from mypy.typevars import fill_typevars +if TYPE_CHECKING: + from mypy.checker import TypeChecker + # The set of decorators that generate dataclasses. dataclass_makers: Final = {"dataclass", "dataclasses.dataclass"} SELF_TVAR_NAME: Final = "_DT" -_TRANSFORM_SPEC_FOR_DATACLASSES = DataclassTransformSpec( +_TRANSFORM_SPEC_FOR_DATACLASSES: Final = DataclassTransformSpec( eq_default=True, order_default=False, kw_only_default=False, frozen_default=False, field_specifiers=("dataclasses.Field", "dataclasses.field"), ) +_INTERNAL_REPLACE_SYM_NAME: Final = "__mypy-replace" +_INTERNAL_POST_INIT_SYM_NAME: Final = "__mypy-post_init" class DataclassAttribute: @@ -92,6 +103,7 @@ def __init__( info: TypeInfo, kw_only: bool, is_neither_frozen_nor_nonfrozen: bool, + api: SemanticAnalyzerPluginInterface, ) -> None: self.name = name self.alias = alias @@ -104,15 +116,35 @@ def __init__( self.info = info self.kw_only = kw_only self.is_neither_frozen_nor_nonfrozen = is_neither_frozen_nor_nonfrozen + self._api = api - def to_argument(self, current_info: TypeInfo) -> Argument: - arg_kind = ARG_POS - if self.kw_only and self.has_default: - arg_kind = ARG_NAMED_OPT - elif self.kw_only and not self.has_default: - arg_kind = ARG_NAMED - elif not self.kw_only and self.has_default: - arg_kind = ARG_OPT + def to_argument( + self, current_info: TypeInfo, *, of: Literal["__init__", "replace", "__post_init__"] + ) -> Argument: + if of == "__init__": + arg_kind = ARG_POS + if self.kw_only and self.has_default: + arg_kind = ARG_NAMED_OPT + elif self.kw_only and not self.has_default: + arg_kind = ARG_NAMED + elif not self.kw_only and self.has_default: + arg_kind = ARG_OPT + elif of == "replace": + arg_kind = ARG_NAMED if self.is_init_var and not self.has_default else ARG_NAMED_OPT + elif of == "__post_init__": + # We always use `ARG_POS` without a default value, because it is practical. + # Consider this case: + # + # @dataclass + # class My: + # y: dataclasses.InitVar[str] = 'a' + # def __post_init__(self, y: str) -> None: ... + # + # We would be *required* to specify `y: str = ...` if default is added here. + # But, most people won't care about adding default values to `__post_init__`, + # because it is not designed to be called directly, and duplicating default values + # for the sake of type-checking is unpleasant. + arg_kind = ARG_POS return Argument( variable=self.to_var(current_info), type_annotation=self.expand_type(current_info), @@ -120,13 +152,16 @@ def to_argument(self, current_info: TypeInfo) -> Argument: kind=arg_kind, ) - def expand_type(self, current_info: TypeInfo) -> Optional[Type]: + def expand_type(self, current_info: TypeInfo) -> Type | None: if self.type is not None and self.info.self_type is not None: # In general, it is not safe to call `expand_type()` during semantic analyzis, # however this plugin is called very late, so all types should be fully ready. # Also, it is tricky to avoid eager expansion of Self types here (e.g. because # we serialize attributes). - return expand_type(self.type, {self.info.self_type.id: fill_typevars(current_info)}) + with state.strict_optional_set(self._api.options.strict_optional): + return expand_type( + self.type, {self.info.self_type.id: fill_typevars(current_info)} + ) return self.type def to_var(self, current_info: TypeInfo) -> Var: @@ -152,16 +187,15 @@ def deserialize( cls, info: TypeInfo, data: JsonDict, api: SemanticAnalyzerPluginInterface ) -> DataclassAttribute: data = data.copy() - if data.get("kw_only") is None: - data["kw_only"] = False typ = deserialize_and_fixup_type(data.pop("type"), api) - return cls(type=typ, info=info, **data) + return cls(type=typ, info=info, **data, api=api) def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None: """Expands type vars in the context of a subtype when an attribute is inherited from a generic super type.""" if self.type is not None: - self.type = map_type_from_supertype(self.type, sub_type, self.info) + with state.strict_optional_set(self._api.options.strict_optional): + self.type = map_type_from_supertype(self.type, sub_type, self.info) class DataclassTransformer: @@ -220,12 +254,11 @@ def transform(self) -> bool: and ("__init__" not in info.names or info.names["__init__"].plugin_generated) and attributes ): - with state.strict_optional_set(self._api.options.strict_optional): - args = [ - attr.to_argument(info) - for attr in attributes - if attr.is_in_init and not self._is_kw_only_type(attr.type) - ] + args = [ + attr.to_argument(info, of="__init__") + for attr in attributes + if attr.is_in_init and not self._is_kw_only_type(attr.type) + ] if info.fallback_to_any: # Make positional args optional since we don't know their order. @@ -332,7 +365,6 @@ def transform(self) -> bool: and ( "__match_args__" not in info.names or info.names["__match_args__"].plugin_generated ) - and attributes and py_version >= (3, 10) ): str_type = self._api.named_type("builtins.str") @@ -344,6 +376,11 @@ def transform(self) -> bool: self._add_dataclass_fields_magic_attribute() + if self._spec is _TRANSFORM_SPEC_FOR_DATACLASSES: + self._add_internal_replace_method(attributes) + if "__post_init__" in info.names: + self._add_internal_post_init_method(attributes) + info.metadata["dataclass"] = { "attributes": [attr.serialize() for attr in attributes], "frozen": decorator_arguments["frozen"], @@ -351,6 +388,33 @@ def transform(self) -> bool: return True + def _add_internal_replace_method(self, attributes: list[DataclassAttribute]) -> None: + """ + Stashes the signature of 'dataclasses.replace(...)' for this specific dataclass + to be used later whenever 'dataclasses.replace' is called for this dataclass. + """ + add_method_to_class( + self._api, + self._cls, + _INTERNAL_REPLACE_SYM_NAME, + args=[attr.to_argument(self._cls.info, of="replace") for attr in attributes], + return_type=NoneType(), + is_staticmethod=True, + ) + + def _add_internal_post_init_method(self, attributes: list[DataclassAttribute]) -> None: + add_method_to_class( + self._api, + self._cls, + _INTERNAL_POST_INIT_SYM_NAME, + args=[ + attr.to_argument(self._cls.info, of="__post_init__") + for attr in attributes + if attr.is_init_var + ], + return_type=NoneType(), + ) + def add_slots( self, info: TypeInfo, attributes: list[DataclassAttribute], *, correct_version: bool ) -> None: @@ -379,9 +443,15 @@ def add_slots( self._cls, ) return - info.slots = generated_slots + # Now, insert `.__slots__` attribute to class namespace: + slots_type = TupleType( + [self._api.named_type("builtins.str") for _ in generated_slots], + self._api.named_type("builtins.tuple"), + ) + add_attribute_to_class(self._api, self._cls, "__slots__", slots_type) + def reset_init_only_vars(self, info: TypeInfo, attributes: list[DataclassAttribute]) -> None: """Remove init-only vars from the class and reset init var declarations.""" for attr in attributes: @@ -459,8 +529,7 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: # TODO: We shouldn't be performing type operations during the main # semantic analysis pass, since some TypeInfo attributes might # still be in flux. This should be performed in a later phase. - with state.strict_optional_set(self._api.options.strict_optional): - attr.expand_typevar_from_subtype(cls.info) + attr.expand_typevar_from_subtype(cls.info) found_attrs[name] = attr sym_node = cls.info.names.get(name) @@ -591,7 +660,8 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: ) current_attr_names.add(lhs.name) - init_type = self._infer_dataclass_attr_init_type(sym, lhs.name, stmt) + with state.strict_optional_set(self._api.options.strict_optional): + init_type = self._infer_dataclass_attr_init_type(sym, lhs.name, stmt) found_attrs[lhs.name] = DataclassAttribute( name=lhs.name, alias=alias, @@ -606,6 +676,7 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: is_neither_frozen_nor_nonfrozen=_has_direct_dataclass_transform_metaclass( cls.info ), + api=self._api, ) all_attrs = list(found_attrs.values()) @@ -893,3 +964,152 @@ def _has_direct_dataclass_transform_metaclass(info: TypeInfo) -> bool: info.declared_metaclass is not None and info.declared_metaclass.type.dataclass_transform_spec is not None ) + + +def _fail_not_dataclass(ctx: FunctionSigContext, t: Type, parent_t: Type) -> None: + t_name = format_type_bare(t, ctx.api.options) + if parent_t is t: + msg = ( + f'Argument 1 to "replace" has a variable type "{t_name}" not bound to a dataclass' + if isinstance(t, TypeVarType) + else f'Argument 1 to "replace" has incompatible type "{t_name}"; expected a dataclass' + ) + else: + pt_name = format_type_bare(parent_t, ctx.api.options) + msg = ( + f'Argument 1 to "replace" has type "{pt_name}" whose item "{t_name}" is not bound to a dataclass' + if isinstance(t, TypeVarType) + else f'Argument 1 to "replace" has incompatible type "{pt_name}" whose item "{t_name}" is not a dataclass' + ) + + ctx.api.fail(msg, ctx.context) + + +def _get_expanded_dataclasses_fields( + ctx: FunctionSigContext, typ: ProperType, display_typ: ProperType, parent_typ: ProperType +) -> list[CallableType] | None: + """ + For a given type, determine what dataclasses it can be: for each class, return the field types. + For generic classes, the field types are expanded. + If the type contains Any or a non-dataclass, returns None; in the latter case, also reports an error. + """ + if isinstance(typ, AnyType): + return None + elif isinstance(typ, UnionType): + ret: list[CallableType] | None = [] + for item in typ.relevant_items(): + item = get_proper_type(item) + item_types = _get_expanded_dataclasses_fields(ctx, item, item, parent_typ) + if ret is not None and item_types is not None: + ret += item_types + else: + ret = None # but keep iterating to emit all errors + return ret + elif isinstance(typ, TypeVarType): + return _get_expanded_dataclasses_fields( + ctx, get_proper_type(typ.upper_bound), display_typ, parent_typ + ) + elif isinstance(typ, Instance): + replace_sym = typ.type.get_method(_INTERNAL_REPLACE_SYM_NAME) + if replace_sym is None: + _fail_not_dataclass(ctx, display_typ, parent_typ) + return None + replace_sig = replace_sym.type + assert isinstance(replace_sig, ProperType) + assert isinstance(replace_sig, CallableType) + return [expand_type_by_instance(replace_sig, typ)] + else: + _fail_not_dataclass(ctx, display_typ, parent_typ) + return None + + +# TODO: we can potentially get the function signature hook to allow returning a union +# and leave this to the regular machinery of resolving a union of callables +# (https://github.com/python/mypy/issues/15457) +def _meet_replace_sigs(sigs: list[CallableType]) -> CallableType: + """ + Produces the lowest bound of the 'replace' signatures of multiple dataclasses. + """ + args = { + name: (typ, kind) + for name, typ, kind in zip(sigs[0].arg_names, sigs[0].arg_types, sigs[0].arg_kinds) + } + + for sig in sigs[1:]: + sig_args = { + name: (typ, kind) + for name, typ, kind in zip(sig.arg_names, sig.arg_types, sig.arg_kinds) + } + for name in (*args.keys(), *sig_args.keys()): + sig_typ, sig_kind = args.get(name, (UninhabitedType(), ARG_NAMED_OPT)) + sig2_typ, sig2_kind = sig_args.get(name, (UninhabitedType(), ARG_NAMED_OPT)) + args[name] = ( + meet_types(sig_typ, sig2_typ), + ARG_NAMED_OPT if sig_kind == sig2_kind == ARG_NAMED_OPT else ARG_NAMED, + ) + + return sigs[0].copy_modified( + arg_names=list(args.keys()), + arg_types=[typ for typ, _ in args.values()], + arg_kinds=[kind for _, kind in args.values()], + ) + + +def replace_function_sig_callback(ctx: FunctionSigContext) -> CallableType: + """ + Returns a signature for the 'dataclasses.replace' function that's dependent on the type + of the first positional argument. + """ + if len(ctx.args) != 2: + # Ideally the name and context should be callee's, but we don't have it in FunctionSigContext. + ctx.api.fail(f'"{ctx.default_signature.name}" has unexpected type annotation', ctx.context) + return ctx.default_signature + + if len(ctx.args[0]) != 1: + return ctx.default_signature # leave it to the type checker to complain + + obj_arg = ctx.args[0][0] + obj_type = get_proper_type(ctx.api.get_expression_type(obj_arg)) + inst_type_str = format_type_bare(obj_type, ctx.api.options) + + replace_sigs = _get_expanded_dataclasses_fields(ctx, obj_type, obj_type, obj_type) + if replace_sigs is None: + return ctx.default_signature + replace_sig = _meet_replace_sigs(replace_sigs) + + return replace_sig.copy_modified( + arg_names=[None, *replace_sig.arg_names], + arg_kinds=[ARG_POS, *replace_sig.arg_kinds], + arg_types=[obj_type, *replace_sig.arg_types], + ret_type=obj_type, + fallback=ctx.default_signature.fallback, + name=f"{ctx.default_signature.name} of {inst_type_str}", + ) + + +def is_processed_dataclass(info: TypeInfo | None) -> bool: + return info is not None and "dataclass" in info.metadata + + +def check_post_init(api: TypeChecker, defn: FuncItem, info: TypeInfo) -> None: + if defn.type is None: + return + assert isinstance(defn.type, FunctionLike) + + ideal_sig_method = info.get_method(_INTERNAL_POST_INIT_SYM_NAME) + assert ideal_sig_method is not None and ideal_sig_method.type is not None + ideal_sig = ideal_sig_method.type + assert isinstance(ideal_sig, ProperType) # we set it ourselves + assert isinstance(ideal_sig, CallableType) + ideal_sig = ideal_sig.copy_modified(name="__post_init__") + + api.check_override( + override=defn.type, + original=ideal_sig, + name="__post_init__", + name_in_super="__post_init__", + supertype="dataclass", + original_class_or_static=False, + override_class_or_static=False, + node=defn, + ) diff --git a/mypy/plugins/default.py b/mypy/plugins/default.py index 500eef76a9d9..b60fc3873c04 100644 --- a/mypy/plugins/default.py +++ b/mypy/plugins/default.py @@ -31,7 +31,9 @@ TypedDictType, TypeOfAny, TypeVarType, + UnionType, get_proper_type, + get_proper_types, ) @@ -51,12 +53,14 @@ def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] def get_function_signature_hook( self, fullname: str ) -> Callable[[FunctionSigContext], FunctionLike] | None: - from mypy.plugins import attrs + from mypy.plugins import attrs, dataclasses if fullname in ("attr.evolve", "attrs.evolve", "attr.assoc", "attrs.assoc"): return attrs.evolve_function_sig_callback elif fullname in ("attr.fields", "attrs.fields"): return attrs.fields_function_sig_callback + elif fullname == "dataclasses.replace": + return dataclasses.replace_function_sig_callback return None def get_method_signature_hook( @@ -155,7 +159,9 @@ def get_class_decorator_hook_2( attrs.attr_class_maker_callback, auto_attribs_default=None, frozen_default=True ) elif fullname in attrs.attr_define_makers: - return partial(attrs.attr_class_maker_callback, auto_attribs_default=None) + return partial( + attrs.attr_class_maker_callback, auto_attribs_default=None, slots_default=True + ) return None @@ -402,6 +408,31 @@ def typed_dict_update_signature_callback(ctx: MethodSigContext) -> CallableType: assert isinstance(arg_type, TypedDictType) arg_type = arg_type.as_anonymous() arg_type = arg_type.copy_modified(required_keys=set()) + if ctx.args and ctx.args[0]: + with ctx.api.msg.filter_errors(): + inferred = get_proper_type( + ctx.api.get_expression_type(ctx.args[0][0], type_context=arg_type) + ) + possible_tds = [] + if isinstance(inferred, TypedDictType): + possible_tds = [inferred] + elif isinstance(inferred, UnionType): + possible_tds = [ + t + for t in get_proper_types(inferred.relevant_items()) + if isinstance(t, TypedDictType) + ] + items = [] + for td in possible_tds: + item = arg_type.copy_modified( + required_keys=(arg_type.required_keys | td.required_keys) + & arg_type.items.keys() + ) + if not ctx.api.options.extra_checks: + item = item.copy_modified(item_names=list(td.items)) + items.append(item) + if items: + arg_type = make_simplified_union(items) return signature.copy_modified(arg_types=[arg_type]) return signature diff --git a/mypy/plugins/enums.py b/mypy/plugins/enums.py index 1acf42d11ee6..7869a8b5cdfa 100644 --- a/mypy/plugins/enums.py +++ b/mypy/plugins/enums.py @@ -12,8 +12,7 @@ """ from __future__ import annotations -from typing import Iterable, Sequence, TypeVar, cast -from typing_extensions import Final +from typing import Final, Iterable, Sequence, TypeVar, cast import mypy.plugin # To avoid circular imports. from mypy.nodes import TypeInfo diff --git a/mypy/plugins/functools.py b/mypy/plugins/functools.py index eba4d77f2343..0aa2824c9b51 100644 --- a/mypy/plugins/functools.py +++ b/mypy/plugins/functools.py @@ -1,8 +1,7 @@ """Plugin for supporting the functools standard library module.""" from __future__ import annotations -from typing import NamedTuple -from typing_extensions import Final +from typing import Final, NamedTuple import mypy.plugin from mypy.nodes import ARG_POS, ARG_STAR2, Argument, FuncItem, Var diff --git a/mypy/plugins/singledispatch.py b/mypy/plugins/singledispatch.py index a44493f900b1..c5ce20233a0a 100644 --- a/mypy/plugins/singledispatch.py +++ b/mypy/plugins/singledispatch.py @@ -1,7 +1,7 @@ from __future__ import annotations -from typing import NamedTuple, Sequence, TypeVar, Union -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Final, NamedTuple, Sequence, TypeVar, Union +from typing_extensions import TypeAlias as _TypeAlias from mypy.messages import format_type from mypy.nodes import ARG_POS, Argument, Block, ClassDef, Context, SymbolTable, TypeInfo, Var diff --git a/mypy/pyinfo.py b/mypy/pyinfo.py index 778b0b163ce6..f262ac8b2132 100644 --- a/mypy/pyinfo.py +++ b/mypy/pyinfo.py @@ -2,10 +2,10 @@ """Utilities to find the site and prefix information of a Python executable. -This file MUST remain compatible with all Python 3.7+ versions. Since we cannot make any assumptions about the -Python being executed, this module should not use *any* dependencies outside of the standard -library found in Python 3.7. This file is run each mypy run, so it should be kept as fast as -possible. +This file MUST remain compatible with all Python 3.8+ versions. Since we cannot make any +assumptions about the Python being executed, this module should not use *any* dependencies outside +of the standard library found in Python 3.8. This file is run each mypy run, so it should be kept +as fast as possible. """ import sys diff --git a/mypy/reachability.py b/mypy/reachability.py index 8602fc645e2b..a25b9dff4581 100644 --- a/mypy/reachability.py +++ b/mypy/reachability.py @@ -2,8 +2,7 @@ from __future__ import annotations -from typing import Tuple, TypeVar -from typing_extensions import Final +from typing import Final, Tuple, TypeVar from mypy.literals import literal from mypy.nodes import ( diff --git a/mypy/renaming.py b/mypy/renaming.py index 2fa3ef168a66..c960eb4b1ce8 100644 --- a/mypy/renaming.py +++ b/mypy/renaming.py @@ -1,8 +1,7 @@ from __future__ import annotations from contextlib import contextmanager -from typing import Iterator -from typing_extensions import Final +from typing import Final, Iterator from mypy.nodes import ( AssignmentStmt, diff --git a/mypy/report.py b/mypy/report.py index 81d49baf50da..d5f16464c0fb 100644 --- a/mypy/report.py +++ b/mypy/report.py @@ -12,8 +12,8 @@ import tokenize from abc import ABCMeta, abstractmethod from operator import attrgetter -from typing import Any, Callable, Dict, Iterator, Tuple -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Callable, Dict, Final, Iterator, Tuple +from typing_extensions import TypeAlias as _TypeAlias from urllib.request import pathname2url from mypy import stats @@ -25,7 +25,7 @@ from mypy.version import __version__ try: - from lxml import etree # type: ignore[import] + from lxml import etree # type: ignore[import-untyped] LXML_INSTALLED = True except ImportError: diff --git a/mypy/semanal.py b/mypy/semanal.py index c5a6989f4f61..be7e733a0816 100644 --- a/mypy/semanal.py +++ b/mypy/semanal.py @@ -51,8 +51,8 @@ from __future__ import annotations from contextlib import contextmanager -from typing import Any, Callable, Collection, Iterable, Iterator, List, TypeVar, cast -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Callable, Collection, Final, Iterable, Iterator, List, TypeVar, cast +from typing_extensions import TypeAlias as _TypeAlias from mypy import errorcodes as codes, message_registry from mypy.constant_fold import constant_fold_expr @@ -234,7 +234,6 @@ fix_instance_types, has_any_from_unimported_type, no_subscript_builtin_alias, - remove_dups, type_constructors, ) from mypy.typeops import function_type, get_type_vars, try_getting_str_literals_from_type @@ -277,6 +276,8 @@ get_proper_type, get_proper_types, is_named_instance, + remove_dups, + type_vars_as_args, ) from mypy.types_utils import is_invalid_recursive_alias, store_argument_type from mypy.typevars import fill_typevars @@ -375,7 +376,7 @@ class SemanticAnalyzer( missing_names: list[set[str]] # Callbacks that will be called after semantic analysis to tweak things. patches: list[tuple[int, Callable[[], None]]] - loop_depth = 0 # Depth of breakable loops + loop_depth: list[int] # Depth of breakable loops cur_mod_id = "" # Current module id (or None) (phase 2) _is_stub_file = False # Are we analyzing a stub file? _is_typeshed_stub_file = False # Are we analyzing a typeshed stub file? @@ -428,7 +429,7 @@ def __init__( self.tvar_scope = TypeVarLikeScope() self.function_stack = [] self.block_depth = [0] - self.loop_depth = 0 + self.loop_depth = [0] self.errors = errors self.modules = modules self.msg = MessageBuilder(errors, modules) @@ -959,9 +960,11 @@ def remove_unpack_kwargs(self, defn: FuncDef, typ: CallableType) -> CallableType def prepare_method_signature(self, func: FuncDef, info: TypeInfo, has_self_type: bool) -> None: """Check basic signature validity and tweak annotation of self/cls argument.""" - # Only non-static methods are special. + # Only non-static methods are special, as well as __new__. functype = func.type - if not func.is_static: + if func.name == "__new__": + func.is_static = True + if not func.is_static or func.name == "__new__": if func.name in ["__init_subclass__", "__class_getitem__"]: func.is_class = True if not func.arguments: @@ -1008,7 +1011,21 @@ def is_expected_self_type(self, typ: Type, is_classmethod: bool) -> bool: return self.is_expected_self_type(typ.item, is_classmethod=False) if isinstance(typ, UnboundType): sym = self.lookup_qualified(typ.name, typ, suppress_errors=True) - if sym is not None and sym.fullname == "typing.Type" and typ.args: + if ( + sym is not None + and ( + sym.fullname == "typing.Type" + or ( + sym.fullname == "builtins.type" + and ( + self.is_stub_file + or self.is_future_flag_set("annotations") + or self.options.python_version >= (3, 9) + ) + ) + ) + and typ.args + ): return self.is_expected_self_type(typ.args[0], is_classmethod=False) return False if isinstance(typ, TypeVarType): @@ -1137,7 +1154,16 @@ def analyze_overloaded_func_def(self, defn: OverloadedFuncDef) -> None: elif not non_overload_indexes: self.handle_missing_overload_implementation(defn) - if types: + if types and not any( + # If some overload items are decorated with other decorators, then + # the overload type will be determined during type checking. + isinstance(it, Decorator) and len(it.decorators) > 1 + for it in defn.items + ): + # TODO: should we enforce decorated overloads consistency somehow? + # Some existing code uses both styles: + # * Put decorator only on implementation, use "effective" types in overloads + # * Put decorator everywhere, use "bare" types in overloads. defn.type = Overloaded(types) defn.type.line = defn.line @@ -1337,6 +1363,8 @@ def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) - first_item.var.is_settable_property = True # Get abstractness from the original definition. item.func.abstract_status = first_item.func.abstract_status + if node.name == "deleter": + item.func.abstract_status = first_item.func.abstract_status else: self.fail( f"Only supported top decorator is @{first_item.func.name}.setter", item @@ -1381,7 +1409,7 @@ def analyze_function_body(self, defn: FuncItem) -> None: # The first argument of a non-static, non-class method is like 'self' # (though the name could be different), having the enclosing class's # instance type. - if is_method and not defn.is_static and defn.arguments: + if is_method and (not defn.is_static or defn.name == "__new__") and defn.arguments: if not defn.is_class: defn.arguments[0].variable.is_self = True else: @@ -1675,12 +1703,17 @@ def setup_type_vars(self, defn: ClassDef, tvar_defs: list[TypeVarLikeType]) -> N def setup_alias_type_vars(self, defn: ClassDef) -> None: assert defn.info.special_alias is not None defn.info.special_alias.alias_tvars = list(defn.type_vars) + # It is a bit unfortunate that we need to inline some logic from TypeAlias constructor, + # but it is required, since type variables may change during semantic analyzer passes. + for i, t in enumerate(defn.type_vars): + if isinstance(t, TypeVarTupleType): + defn.info.special_alias.tvar_tuple_index = i target = defn.info.special_alias.target assert isinstance(target, ProperType) if isinstance(target, TypedDictType): - target.fallback.args = tuple(defn.type_vars) + target.fallback.args = type_vars_as_args(defn.type_vars) elif isinstance(target, TupleType): - target.partial_fallback.args = tuple(defn.type_vars) + target.partial_fallback.args = type_vars_as_args(defn.type_vars) else: assert False, f"Unexpected special alias type: {type(target)}" @@ -1690,6 +1723,8 @@ def is_core_builtin_class(self, defn: ClassDef) -> bool: def analyze_class_body_common(self, defn: ClassDef) -> None: """Parts of class body analysis that are common to all kinds of class defs.""" self.enter_class(defn.info) + if any(b.self_type is not None for b in defn.info.mro): + self.setup_self_type() defn.defs.accept(self) self.apply_class_plugin_hooks(defn) self.leave_class() @@ -1742,6 +1777,10 @@ def analyze_namedtuple_classdef( self.setup_type_vars(defn, tvar_defs) self.setup_alias_type_vars(defn) with self.scope.class_scope(defn.info): + for deco in defn.decorators: + deco.accept(self) + if isinstance(deco, RefExpr) and deco.fullname in FINAL_DECORATOR_NAMES: + info.is_final = True with self.named_tuple_analyzer.save_namedtuple_body(info): self.analyze_class_body_common(defn) return True @@ -1808,12 +1847,14 @@ def enter_class(self, info: TypeInfo) -> None: self.locals.append(None) # Add class scope self.is_comprehension_stack.append(False) self.block_depth.append(-1) # The class body increments this to 0 + self.loop_depth.append(0) self._type = info self.missing_names.append(set()) def leave_class(self) -> None: """Restore analyzer state.""" self.block_depth.pop() + self.loop_depth.pop() self.locals.pop() self.is_comprehension_stack.pop() self._type = self.type_stack.pop() @@ -2495,12 +2536,7 @@ def visit_import_from(self, imp: ImportFrom) -> None: elif fullname in self.missing_modules: missing_submodule = True # If it is still not resolved, check for a module level __getattr__ - if ( - module - and not node - and (module.is_stub or self.options.python_version >= (3, 7)) - and "__getattr__" in module.names - ): + if module and not node and "__getattr__" in module.names: # We store the fullname of the original definition so that we can # detect whether two imported names refer to the same thing. fullname = module_id + "." + id @@ -3065,6 +3101,12 @@ def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool: if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)): return False lvalue = s.lvalues[0] + if isinstance(lvalue, MemberExpr): + if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.callee, RefExpr): + fullname = s.rvalue.callee.fullname + if fullname == "collections.namedtuple" or fullname in TYPED_NAMEDTUPLE_NAMES: + self.fail("NamedTuple type as an attribute is not supported", lvalue) + return False name = lvalue.name namespace = self.qualified_name(name) with self.tvar_scope_frame(self.tvar_scope.class_frame(namespace)): @@ -3073,9 +3115,6 @@ def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool: ) if internal_name is None: return False - if isinstance(lvalue, MemberExpr): - self.fail("NamedTuple type as an attribute is not supported", lvalue) - return False if internal_name != name: self.fail( 'First argument to namedtuple() should be "{}", not "{}"'.format( @@ -3219,7 +3258,7 @@ def unwrap_final(self, s: AssignmentStmt) -> bool: if lval.is_new_def: lval.is_inferred_def = s.type is None - if self.loop_depth > 0: + if self.loop_depth[-1] > 0: self.fail("Cannot use Final inside a loop", s) if self.type and self.type.is_protocol: self.msg.protocol_members_cant_be_final(s) @@ -3387,7 +3426,7 @@ def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Typ return None value = constant_fold_expr(rvalue, self.cur_mod_id) - if value is None: + if value is None or isinstance(value, complex): return None if isinstance(value, bool): @@ -3647,7 +3686,10 @@ def disable_invalid_recursive_aliases( """Prohibit and fix recursive type aliases that are invalid/unsupported.""" messages = [] if is_invalid_recursive_alias({current_node}, current_node.target): - messages.append("Invalid recursive alias: a union item of itself") + target = ( + "tuple" if isinstance(get_proper_type(current_node.target), TupleType) else "union" + ) + messages.append(f"Invalid recursive alias: a {target} item of itself") if detect_diverging_alias( current_node, current_node.target, self.lookup_qualified, self.tvar_scope ): @@ -4180,6 +4222,7 @@ def get_typevarlike_argument( *, allow_unbound_tvars: bool = False, allow_param_spec_literals: bool = False, + allow_unpack: bool = False, report_invalid_typevar_arg: bool = True, ) -> ProperType | None: try: @@ -4191,6 +4234,7 @@ def get_typevarlike_argument( report_invalid_types=False, allow_unbound_tvars=allow_unbound_tvars, allow_param_spec_literals=allow_param_spec_literals, + allow_unpack=allow_unpack, ) if analyzed is None: # Type variables are special: we need to place them in the symbol table @@ -4342,6 +4386,7 @@ def process_typevartuple_declaration(self, s: AssignmentStmt) -> bool: s, allow_unbound_tvars=True, report_invalid_typevar_arg=False, + allow_unpack=True, ) default = tv_arg or AnyType(TypeOfAny.from_error) if not isinstance(default, UnpackType): @@ -4698,9 +4743,9 @@ def visit_operator_assignment_stmt(self, s: OperatorAssignmentStmt) -> None: def visit_while_stmt(self, s: WhileStmt) -> None: self.statement = s s.expr.accept(self) - self.loop_depth += 1 + self.loop_depth[-1] += 1 s.body.accept(self) - self.loop_depth -= 1 + self.loop_depth[-1] -= 1 self.visit_block_maybe(s.else_body) def visit_for_stmt(self, s: ForStmt) -> None: @@ -4722,20 +4767,20 @@ def visit_for_stmt(self, s: ForStmt) -> None: self.store_declared_types(s.index, analyzed) s.index_type = analyzed - self.loop_depth += 1 + self.loop_depth[-1] += 1 self.visit_block(s.body) - self.loop_depth -= 1 + self.loop_depth[-1] -= 1 self.visit_block_maybe(s.else_body) def visit_break_stmt(self, s: BreakStmt) -> None: self.statement = s - if self.loop_depth == 0: + if self.loop_depth[-1] == 0: self.fail('"break" outside loop', s, serious=True, blocker=True) def visit_continue_stmt(self, s: ContinueStmt) -> None: self.statement = s - if self.loop_depth == 0: + if self.loop_depth[-1] == 0: self.fail('"continue" outside loop', s, serious=True, blocker=True) def visit_if_stmt(self, s: IfStmt) -> None: @@ -5077,14 +5122,14 @@ def translate_dict_call(self, call: CallExpr) -> DictExpr | None: For other variants of dict(...), return None. """ - if not all(kind == ARG_NAMED for kind in call.arg_kinds): + if not all(kind in (ARG_NAMED, ARG_STAR2) for kind in call.arg_kinds): # Must still accept those args. for a in call.args: a.accept(self) return None expr = DictExpr( [ - (StrExpr(cast(str, key)), value) # since they are all ARG_NAMED + (StrExpr(key) if key is not None else None, value) for key, value in zip(call.arg_names, call.args) ] ) @@ -5252,23 +5297,24 @@ def analyze_type_application_args(self, expr: IndexExpr) -> list[Type] | None: else: items = [index] - # whether param spec literals be allowed here - # TODO: should this be computed once and passed in? - # or is there a better way to do this? + # TODO: this needs a clean-up. + # Probably always allow Parameters literals, and validate in semanal_typeargs.py base = expr.base if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias): + allow_unpack = base.node.tvar_tuple_index is not None alias = base.node - target = get_proper_type(alias.target) - if isinstance(target, Instance): - has_param_spec = target.type.has_param_spec_type - num_args = len(target.type.type_vars) + if any(isinstance(t, ParamSpecType) for t in alias.alias_tvars): + has_param_spec = True + num_args = len(alias.alias_tvars) else: has_param_spec = False num_args = -1 - elif isinstance(base, NameExpr) and isinstance(base.node, TypeInfo): + elif isinstance(base, RefExpr) and isinstance(base.node, TypeInfo): + allow_unpack = base.node.has_type_var_tuple_type has_param_spec = base.node.has_param_spec_type num_args = len(base.node.type_vars) else: + allow_unpack = False has_param_spec = False num_args = -1 @@ -5286,6 +5332,7 @@ def analyze_type_application_args(self, expr: IndexExpr) -> list[Type] | None: allow_unbound_tvars=self.allow_unbound_tvars, allow_placeholder=True, allow_param_spec_literals=has_param_spec, + allow_unpack=allow_unpack, ) if analyzed is None: return None @@ -5417,11 +5464,8 @@ def visit_yield_expr(self, e: YieldExpr) -> None: blocker=True, ) elif self.function_stack[-1].is_coroutine: - if self.options.python_version < (3, 6): - self.fail('"yield" in async function', e, serious=True, blocker=True) - else: - self.function_stack[-1].is_generator = True - self.function_stack[-1].is_async_generator = True + self.function_stack[-1].is_generator = True + self.function_stack[-1].is_async_generator = True else: self.function_stack[-1].is_generator = True if e.expr: @@ -5434,7 +5478,12 @@ def visit_await_expr(self, expr: AwaitExpr) -> None: # support top level awaits. self.fail('"await" outside function', expr, serious=True, code=codes.TOP_LEVEL_AWAIT) elif not self.function_stack[-1].is_coroutine: - self.fail('"await" outside coroutine ("async def")', expr, serious=True, blocker=True) + self.fail( + '"await" outside coroutine ("async def")', + expr, + serious=True, + code=codes.AWAIT_NOT_ASYNC, + ) expr.expr.accept(self) # @@ -5692,9 +5741,7 @@ def get_module_symbol(self, node: MypyFile, name: str) -> SymbolTableNode | None sym = SymbolTableNode(GDEF, self.modules[fullname]) elif self.is_incomplete_namespace(module): self.record_incomplete_ref() - elif "__getattr__" in names and ( - node.is_stub or self.options.python_version >= (3, 7) - ): + elif "__getattr__" in names: gvar = self.create_getattr_var(names["__getattr__"], name, fullname) if gvar: sym = SymbolTableNode(GDEF, gvar) @@ -6230,6 +6277,7 @@ def enter( self.nonlocal_decls.append(set()) # -1 since entering block will increment this to 0. self.block_depth.append(-1) + self.loop_depth.append(0) self.missing_names.append(set()) try: yield @@ -6239,6 +6287,7 @@ def enter( self.global_decls.pop() self.nonlocal_decls.pop() self.block_depth.pop() + self.loop_depth.pop() self.missing_names.pop() def is_func_scope(self) -> bool: @@ -6453,6 +6502,7 @@ def expr_to_analyzed_type( allow_type_any: bool = False, allow_unbound_tvars: bool = False, allow_param_spec_literals: bool = False, + allow_unpack: bool = False, ) -> Type | None: if isinstance(expr, CallExpr): # This is a legacy syntax intended mostly for Python 2, we keep it for @@ -6483,6 +6533,7 @@ def expr_to_analyzed_type( allow_type_any=allow_type_any, allow_unbound_tvars=allow_unbound_tvars, allow_param_spec_literals=allow_param_spec_literals, + allow_unpack=allow_unpack, ) def analyze_type_expr(self, expr: Expression) -> None: @@ -6504,6 +6555,7 @@ def type_analyzer( allow_placeholder: bool = False, allow_required: bool = False, allow_param_spec_literals: bool = False, + allow_unpack: bool = False, report_invalid_types: bool = True, prohibit_self_type: str | None = None, allow_type_any: bool = False, @@ -6522,6 +6574,7 @@ def type_analyzer( allow_placeholder=allow_placeholder, allow_required=allow_required, allow_param_spec_literals=allow_param_spec_literals, + allow_unpack=allow_unpack, prohibit_self_type=prohibit_self_type, allow_type_any=allow_type_any, ) @@ -6542,6 +6595,7 @@ def anal_type( allow_placeholder: bool = False, allow_required: bool = False, allow_param_spec_literals: bool = False, + allow_unpack: bool = False, report_invalid_types: bool = True, prohibit_self_type: str | None = None, allow_type_any: bool = False, @@ -6579,6 +6633,7 @@ def anal_type( allow_placeholder=allow_placeholder, allow_required=allow_required, allow_param_spec_literals=allow_param_spec_literals, + allow_unpack=allow_unpack, report_invalid_types=report_invalid_types, prohibit_self_type=prohibit_self_type, allow_type_any=allow_type_any, diff --git a/mypy/semanal_classprop.py b/mypy/semanal_classprop.py index 3f5bc9c4c2de..dfd4e5b6f122 100644 --- a/mypy/semanal_classprop.py +++ b/mypy/semanal_classprop.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing_extensions import Final +from typing import Final from mypy.errors import Errors from mypy.nodes import ( diff --git a/mypy/semanal_enum.py b/mypy/semanal_enum.py index f8d321ffada9..cd11204c3bcc 100644 --- a/mypy/semanal_enum.py +++ b/mypy/semanal_enum.py @@ -5,8 +5,7 @@ from __future__ import annotations -from typing import cast -from typing_extensions import Final +from typing import Final, cast from mypy.nodes import ( ARG_NAMED, diff --git a/mypy/semanal_main.py b/mypy/semanal_main.py index 8e8c455dd686..ec09deb0952f 100644 --- a/mypy/semanal_main.py +++ b/mypy/semanal_main.py @@ -27,8 +27,8 @@ from __future__ import annotations from contextlib import nullcontext -from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import TYPE_CHECKING, Callable, Final, List, Optional, Tuple, Union +from typing_extensions import TypeAlias as _TypeAlias import mypy.build import mypy.state @@ -75,7 +75,6 @@ "abc", "collections", "collections.abc", - "typing_extensions", ] @@ -382,6 +381,7 @@ def check_type_arguments(graph: Graph, scc: list[str], errors: Errors) -> None: errors, state.options, is_typeshed_file(state.options.abs_custom_typeshed_dir, state.path or ""), + state.manager.semantic_analyzer.named_type, ) with state.wrap_context(): with mypy.state.state.strict_optional_set(state.options.strict_optional): @@ -400,6 +400,7 @@ def check_type_arguments_in_targets( errors, state.options, is_typeshed_file(state.options.abs_custom_typeshed_dir, state.path or ""), + state.manager.semantic_analyzer.named_type, ) with state.wrap_context(): with mypy.state.state.strict_optional_set(state.options.strict_optional): diff --git a/mypy/semanal_namedtuple.py b/mypy/semanal_namedtuple.py index c690d4ec6d20..51ea90e07f3d 100644 --- a/mypy/semanal_namedtuple.py +++ b/mypy/semanal_namedtuple.py @@ -6,8 +6,7 @@ from __future__ import annotations from contextlib import contextmanager -from typing import Iterator, List, Mapping, cast -from typing_extensions import Final +from typing import Final, Iterator, List, Mapping, cast from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type from mypy.nodes import ( @@ -143,9 +142,6 @@ def check_namedtuple_classdef( * valid statements or None, if any of the types are not ready. """ - if self.options.python_version < (3, 6) and not is_stub_file: - self.fail("NamedTuple class syntax is only supported in Python 3.6", defn) - return [], [], {}, [] if len(defn.base_type_exprs) > 1: self.fail("NamedTuple should be a single base", defn) items: list[str] = [] @@ -605,16 +601,11 @@ def make_init_arg(var: Var) -> Argument: add_method("__new__", ret=selftype, args=[make_init_arg(var) for var in vars], is_new=True) add_method("_asdict", args=[], ret=ordereddictype) - special_form_any = AnyType(TypeOfAny.special_form) add_method( "_make", ret=selftype, is_classmethod=True, - args=[ - Argument(Var("iterable", iterable_type), iterable_type, None, ARG_POS), - Argument(Var("new"), special_form_any, EllipsisExpr(), ARG_NAMED_OPT), - Argument(Var("len"), special_form_any, EllipsisExpr(), ARG_NAMED_OPT), - ], + args=[Argument(Var("iterable", iterable_type), iterable_type, None, ARG_POS)], ) self_tvar_expr = TypeVarExpr( diff --git a/mypy/semanal_pass1.py b/mypy/semanal_pass1.py index 659f33e65ead..aaa01969217a 100644 --- a/mypy/semanal_pass1.py +++ b/mypy/semanal_pass1.py @@ -45,10 +45,9 @@ class SemanticAnalyzerPreAnalysis(TraverserVisitor): import sys - def do_stuff(): - # type: () -> None: - if sys.python_version < (3,): - import xyz # Only available in Python 2 + def do_stuff() -> None: + if sys.version_info >= (3, 10): + import xyz # Only available in Python 3.10+ xyz.whatever() ... @@ -62,7 +61,7 @@ def visit_file(self, file: MypyFile, fnam: str, mod_id: str, options: Options) - self.cur_mod_node = file self.options = options self.is_global_scope = True - self.unreachable_lines: set[int] = set() + self.skipped_lines: set[int] = set() for i, defn in enumerate(file.defs): defn.accept(self) @@ -74,10 +73,10 @@ def visit_file(self, file: MypyFile, fnam: str, mod_id: str, options: Options) - next_def, last = file.defs[i + 1], file.defs[-1] if last.end_line is not None: # We are on a Python version recent enough to support end lines. - self.unreachable_lines |= set(range(next_def.line, last.end_line + 1)) + self.skipped_lines |= set(range(next_def.line, last.end_line + 1)) del file.defs[i + 1 :] break - file.unreachable_lines = self.unreachable_lines + file.skipped_lines = self.skipped_lines def visit_func_def(self, node: FuncDef) -> None: old_global_scope = self.is_global_scope @@ -127,7 +126,7 @@ def visit_block(self, b: Block) -> None: if b.is_unreachable: if b.end_line is not None: # We are on a Python version recent enough to support end lines. - self.unreachable_lines |= set(range(b.line, b.end_line + 1)) + self.skipped_lines |= set(range(b.line, b.end_line + 1)) return super().visit_block(b) diff --git a/mypy/semanal_shared.py b/mypy/semanal_shared.py index d097e1fb08dc..425e5906926a 100644 --- a/mypy/semanal_shared.py +++ b/mypy/semanal_shared.py @@ -3,8 +3,8 @@ from __future__ import annotations from abc import abstractmethod -from typing import Callable, overload -from typing_extensions import Final, Literal, Protocol +from typing import Callable, Final, overload +from typing_extensions import Literal, Protocol from mypy_extensions import trait diff --git a/mypy/semanal_typeargs.py b/mypy/semanal_typeargs.py index e188955dabbb..749b02391e06 100644 --- a/mypy/semanal_typeargs.py +++ b/mypy/semanal_typeargs.py @@ -7,20 +7,21 @@ from __future__ import annotations -from typing import Sequence +from typing import Callable, Sequence from mypy import errorcodes as codes, message_registry from mypy.errorcodes import ErrorCode from mypy.errors import Errors from mypy.messages import format_type from mypy.mixedtraverser import MixedTraverserVisitor -from mypy.nodes import Block, ClassDef, Context, FakeInfo, FuncItem, MypyFile +from mypy.nodes import ARG_STAR, Block, ClassDef, Context, FakeInfo, FuncItem, MypyFile from mypy.options import Options from mypy.scope import Scope from mypy.subtypes import is_same_type, is_subtype -from mypy.typeanal import set_any_tvars +from mypy.typeanal import fix_type_var_tuple_argument, set_any_tvars from mypy.types import ( AnyType, + CallableType, Instance, Parameters, ParamSpecType, @@ -41,11 +42,18 @@ class TypeArgumentAnalyzer(MixedTraverserVisitor): - def __init__(self, errors: Errors, options: Options, is_typeshed_file: bool) -> None: + def __init__( + self, + errors: Errors, + options: Options, + is_typeshed_file: bool, + named_type: Callable[[str, list[Type]], Instance], + ) -> None: super().__init__() self.errors = errors self.options = options self.is_typeshed_file = is_typeshed_file + self.named_type = named_type self.scope = Scope() # Should we also analyze function definitions, or only module top-levels? self.recurse_into_functions = True @@ -85,51 +93,89 @@ def visit_type_alias_type(self, t: TypeAliasType) -> None: # correct aliases. Also, variadic aliases are better to check when fully analyzed, # so we do this here. assert t.alias is not None, f"Unfixed type alias {t.type_ref}" - args = flatten_nested_tuples(t.args) + # TODO: consider moving this validation to typeanal.py, expanding invalid aliases + # during semantic analysis may cause crashes. if t.alias.tvar_tuple_index is not None: - correct = len(args) >= len(t.alias.alias_tvars) - 1 + correct = len(t.args) >= len(t.alias.alias_tvars) - 1 if any( isinstance(a, UnpackType) and isinstance(get_proper_type(a.type), Instance) - for a in args + for a in t.args ): correct = True else: - correct = len(args) == len(t.alias.alias_tvars) + correct = len(t.args) == len(t.alias.alias_tvars) if not correct: if t.alias.tvar_tuple_index is not None: exp_len = f"at least {len(t.alias.alias_tvars) - 1}" else: exp_len = f"{len(t.alias.alias_tvars)}" self.fail( - f"Bad number of arguments for type alias, expected: {exp_len}, given: {len(args)}", + "Bad number of arguments for type alias," + f" expected: {exp_len}, given: {len(t.args)}", t, code=codes.TYPE_ARG, ) t.args = set_any_tvars( t.alias, t.line, t.column, self.options, from_error=True, fail=self.fail ).args - else: - t.args = args is_error = self.validate_args(t.alias.name, t.args, t.alias.alias_tvars, t) if not is_error: # If there was already an error for the alias itself, there is no point in checking # the expansion, most likely it will result in the same kind of error. get_proper_type(t).accept(self) + def visit_tuple_type(self, t: TupleType) -> None: + t.items = flatten_nested_tuples(t.items) + # We could also normalize Tuple[*tuple[X, ...]] -> tuple[X, ...] like in + # expand_type() but we can't do this here since it is not a translator visitor, + # and we need to return an Instance instead of TupleType. + super().visit_tuple_type(t) + + def visit_callable_type(self, t: CallableType) -> None: + super().visit_callable_type(t) + # Normalize trivial unpack in var args as *args: *tuple[X, ...] -> *args: X + if t.is_var_arg: + star_index = t.arg_kinds.index(ARG_STAR) + star_type = t.arg_types[star_index] + if isinstance(star_type, UnpackType): + p_type = get_proper_type(star_type.type) + if isinstance(p_type, Instance): + assert p_type.type.fullname == "builtins.tuple" + t.arg_types[star_index] = p_type.args[0] + def visit_instance(self, t: Instance) -> None: # Type argument counts were checked in the main semantic analyzer pass. We assume # that the counts are correct here. info = t.type if isinstance(info, FakeInfo): return # https://github.com/python/mypy/issues/11079 + t.args = tuple(flatten_nested_tuples(t.args)) + if t.type.has_type_var_tuple_type: + # Regular Instances are already validated in typeanal.py. + # TODO: do something with partial overlap (probably just reject). + # also in other places where split_with_prefix_and_suffix() is used. + correct = len(t.args) >= len(t.type.type_vars) - 1 + if any( + isinstance(a, UnpackType) and isinstance(get_proper_type(a.type), Instance) + for a in t.args + ): + correct = True + if not correct: + exp_len = f"at least {len(t.type.type_vars) - 1}" + self.fail( + f"Bad number of arguments, expected: {exp_len}, given: {len(t.args)}", + t, + code=codes.TYPE_ARG, + ) + any_type = AnyType(TypeOfAny.from_error) + t.args = (any_type,) * len(t.type.type_vars) + fix_type_var_tuple_argument(any_type, t) self.validate_args(info.name, t.args, info.defn.type_vars, t) super().visit_instance(t) def validate_args( self, name: str, args: Sequence[Type], type_vars: list[TypeVarLikeType], ctx: Context ) -> bool: - # TODO: we need to do flatten_nested_tuples and validate arg count for instances - # similar to how do we do this for type aliases above, but this may have perf penalty. if any(isinstance(v, TypeVarTupleType) for v in type_vars): prefix = next(i for (i, v) in enumerate(type_vars) if isinstance(v, TypeVarTupleType)) tvt = type_vars[prefix] @@ -198,25 +244,22 @@ def validate_args( return is_error def visit_unpack_type(self, typ: UnpackType) -> None: + super().visit_unpack_type(typ) proper_type = get_proper_type(typ.type) if isinstance(proper_type, TupleType): return if isinstance(proper_type, TypeVarTupleType): return + # TODO: this should probably be .has_base("builtins.tuple"), also elsewhere. if isinstance(proper_type, Instance) and proper_type.type.fullname == "builtins.tuple": return - if ( - isinstance(proper_type, UnboundType) - or isinstance(proper_type, AnyType) - and proper_type.type_of_any == TypeOfAny.from_error - ): - return - - # TODO: Infer something when it can't be unpacked to allow rest of - # typechecking to work. - self.fail( - message_registry.INVALID_UNPACK.format(format_type(proper_type, self.options)), typ - ) + if not isinstance(proper_type, (UnboundType, AnyType)): + # Avoid extra errors if there were some errors already. Also interpret plain Any + # as tuple[Any, ...] (this is better for the code in type checker). + self.fail( + message_registry.INVALID_UNPACK.format(format_type(proper_type, self.options)), typ + ) + typ.type = self.named_type("builtins.tuple", [AnyType(TypeOfAny.from_error)]) def check_type_var_values( self, name: str, actuals: list[Type], arg_name: str, valids: list[Type], context: Context diff --git a/mypy/semanal_typeddict.py b/mypy/semanal_typeddict.py index 47a3f558aa13..fb3fa713e3fb 100644 --- a/mypy/semanal_typeddict.py +++ b/mypy/semanal_typeddict.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing_extensions import Final +from typing import Final from mypy import errorcodes as codes, message_registry from mypy.errorcodes import ErrorCode @@ -252,6 +252,7 @@ def map_items_to_base( if not tvars: mapped_items[key] = type_in_base continue + # TODO: simple zip can't be used for variadic types. mapped_items[key] = expand_type( type_in_base, {t.id: a for (t, a) in zip(tvars, base_args)} ) diff --git a/mypy/server/astmerge.py b/mypy/server/astmerge.py index 5e3759227c7b..862c3898a383 100644 --- a/mypy/server/astmerge.py +++ b/mypy/server/astmerge.py @@ -73,7 +73,6 @@ SymbolNode, SymbolTable, TypeAlias, - TypeAliasExpr, TypedDictExpr, TypeInfo, Var, @@ -326,10 +325,6 @@ def visit_enum_call_expr(self, node: EnumCallExpr) -> None: self.process_synthetic_type_info(node.info) super().visit_enum_call_expr(node) - def visit_type_alias_expr(self, node: TypeAliasExpr) -> None: - self.fixup_type(node.type) - super().visit_type_alias_expr(node) - # Others def visit_var(self, node: Var) -> None: @@ -467,13 +462,13 @@ def visit_overloaded(self, t: Overloaded) -> None: def visit_erased_type(self, t: ErasedType) -> None: # This type should exist only temporarily during type inference - raise RuntimeError + raise RuntimeError("Cannot handle erased type") def visit_deleted_type(self, typ: DeletedType) -> None: pass def visit_partial_type(self, typ: PartialType) -> None: - raise RuntimeError + raise RuntimeError("Cannot handle partial type") def visit_tuple_type(self, typ: TupleType) -> None: for item in typ.items: diff --git a/mypy/server/deps.py b/mypy/server/deps.py index ed85b74f2206..9ed2d4549629 100644 --- a/mypy/server/deps.py +++ b/mypy/server/deps.py @@ -472,7 +472,7 @@ def visit_assignment_stmt(self, o: AssignmentStmt) -> None: self.add_dependency(make_trigger(class_name + ".__init__")) self.add_dependency(make_trigger(class_name + ".__new__")) if isinstance(rvalue, IndexExpr) and isinstance(rvalue.analyzed, TypeAliasExpr): - self.add_type_dependencies(rvalue.analyzed.type) + self.add_type_dependencies(rvalue.analyzed.node.target) elif typ: self.add_type_dependencies(typ) else: diff --git a/mypy/server/mergecheck.py b/mypy/server/mergecheck.py index ef6f5b86c8f3..6f044a5ea8b9 100644 --- a/mypy/server/mergecheck.py +++ b/mypy/server/mergecheck.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing_extensions import Final +from typing import Final from mypy.nodes import Decorator, FakeInfo, FuncDef, SymbolNode, Var from mypy.server.objgraph import get_path, get_reachable_graph diff --git a/mypy/server/objgraph.py b/mypy/server/objgraph.py index 89a086b8a0ab..a13fd8412934 100644 --- a/mypy/server/objgraph.py +++ b/mypy/server/objgraph.py @@ -5,8 +5,7 @@ import types import weakref from collections.abc import Iterable -from typing import Iterator, Mapping -from typing_extensions import Final +from typing import Final, Iterator, Mapping method_descriptor_type: Final = type(object.__dir__) method_wrapper_type: Final = type(object().__ne__) @@ -46,7 +45,7 @@ def get_edge_candidates(o: object) -> Iterator[tuple[object, object]]: try: if attr not in ATTR_BLACKLIST and hasattr(o, attr) and not isproperty(o, attr): e = getattr(o, attr) - if not type(e) in ATOMIC_TYPE_BLACKLIST: + if type(e) not in ATOMIC_TYPE_BLACKLIST: yield attr, e except AssertionError: pass @@ -70,7 +69,7 @@ def get_edges(o: object) -> Iterator[tuple[object, object]]: if se is not o and se is not type(o) and hasattr(s, "__self__"): yield s.__self__, se else: - if not type(e) in TYPE_BLACKLIST: + if type(e) not in TYPE_BLACKLIST: yield s, e diff --git a/mypy/server/trigger.py b/mypy/server/trigger.py index 5f2115739d38..97b5f89cd3ba 100644 --- a/mypy/server/trigger.py +++ b/mypy/server/trigger.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing_extensions import Final +from typing import Final # Used as a suffix for triggers to handle "from m import *" dependencies (see also # make_wildcard_trigger) diff --git a/mypy/server/update.py b/mypy/server/update.py index 7b439eb0ab9f..0cc7a2229514 100644 --- a/mypy/server/update.py +++ b/mypy/server/update.py @@ -118,8 +118,8 @@ import re import sys import time -from typing import Callable, NamedTuple, Sequence, Union -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Callable, Final, NamedTuple, Sequence, Union +from typing_extensions import TypeAlias as _TypeAlias from mypy.build import ( DEBUG_FINE_GRAINED, @@ -986,7 +986,7 @@ def key(node: FineGrainedDeferredNode) -> int: manager.errors.set_file_ignored_lines( file_node.path, file_node.ignored_lines, options.ignore_errors or state.ignore_all ) - manager.errors.set_unreachable_lines(file_node.path, file_node.unreachable_lines) + manager.errors.set_skipped_lines(file_node.path, file_node.skipped_lines) targets = set() for node in nodes: diff --git a/mypy/sharedparse.py b/mypy/sharedparse.py index 6f864ccce816..ef2e4f720664 100644 --- a/mypy/sharedparse.py +++ b/mypy/sharedparse.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing_extensions import Final +from typing import Final """Shared logic between our three mypy parser files.""" diff --git a/mypy/solve.py b/mypy/solve.py index b8304d29c1ce..5945d97ed85a 100644 --- a/mypy/solve.py +++ b/mypy/solve.py @@ -3,92 +3,469 @@ from __future__ import annotations from collections import defaultdict +from typing import Iterable, Sequence, Tuple +from typing_extensions import TypeAlias as _TypeAlias -from mypy.constraints import SUPERTYPE_OF, Constraint +from mypy.constraints import SUBTYPE_OF, SUPERTYPE_OF, Constraint, infer_constraints +from mypy.expandtype import expand_type +from mypy.graph_utils import prepare_sccs, strongly_connected_components, topsort from mypy.join import join_types -from mypy.meet import meet_types +from mypy.meet import meet_type_list, meet_types from mypy.subtypes import is_subtype +from mypy.typeops import get_all_type_vars from mypy.types import ( AnyType, + Instance, + NoneType, + ParamSpecType, ProperType, + TupleType, Type, TypeOfAny, TypeVarId, + TypeVarLikeType, + TypeVarTupleType, + TypeVarType, UninhabitedType, UnionType, + UnpackType, get_proper_type, ) from mypy.typestate import type_state +Bounds: _TypeAlias = "dict[TypeVarId, set[Type]]" +Graph: _TypeAlias = "set[tuple[TypeVarId, TypeVarId]]" +Solutions: _TypeAlias = "dict[TypeVarId, Type | None]" + def solve_constraints( - vars: list[TypeVarId], constraints: list[Constraint], strict: bool = True -) -> list[Type | None]: + original_vars: Sequence[TypeVarLikeType], + constraints: list[Constraint], + strict: bool = True, + allow_polymorphic: bool = False, +) -> tuple[list[Type | None], list[TypeVarLikeType]]: """Solve type constraints. - Return the best type(s) for type variables; each type can be None if the value of the variable - could not be solved. + Return the best type(s) for type variables; each type can be None if the value of + the variable could not be solved. If a variable has no constraints, if strict=True then arbitrarily - pick NoneType as the value of the type variable. If strict=False, - pick AnyType. + pick UninhabitedType as the value of the type variable. If strict=False, pick AnyType. + If allow_polymorphic=True, then use the full algorithm that can potentially return + free type variables in solutions (these require special care when applying). Otherwise, + use a simplified algorithm that just solves each type variable individually if possible. """ + vars = [tv.id for tv in original_vars] + if not vars: + return [], [] + + originals = {tv.id: tv for tv in original_vars} + extra_vars: list[TypeVarId] = [] + # Get additional type variables from generic actuals. + for c in constraints: + extra_vars.extend([v.id for v in c.extra_tvars if v.id not in vars + extra_vars]) + originals.update({v.id: v for v in c.extra_tvars if v.id not in originals}) + # Collect a list of constraints for each type variable. - cmap: dict[TypeVarId, list[Constraint]] = defaultdict(list) + cmap: dict[TypeVarId, list[Constraint]] = {tv: [] for tv in vars + extra_vars} for con in constraints: - cmap[con.type_var].append(con) + if con.type_var in vars + extra_vars: + cmap[con.type_var].append(con) - res: list[Type | None] = [] + if allow_polymorphic: + if constraints: + solutions, free_vars = solve_with_dependent( + vars + extra_vars, constraints, vars, originals + ) + else: + solutions = {} + free_vars = [] + else: + solutions = {} + free_vars = [] + for tv, cs in cmap.items(): + if not cs: + continue + lowers = [c.target for c in cs if c.op == SUPERTYPE_OF] + uppers = [c.target for c in cs if c.op == SUBTYPE_OF] + solution = solve_one(lowers, uppers) - # Solve each type variable separately. - for tvar in vars: - bottom: Type | None = None - top: Type | None = None - candidate: Type | None = None - - # Process each constraint separately, and calculate the lower and upper - # bounds based on constraints. Note that we assume that the constraint - # targets do not have constraint references. - for c in cmap.get(tvar, []): - if c.op == SUPERTYPE_OF: - if bottom is None: - bottom = c.target - else: - if type_state.infer_unions: - # This deviates from the general mypy semantics because - # recursive types are union-heavy in 95% of cases. - bottom = UnionType.make_union([bottom, c.target]) - else: - bottom = join_types(bottom, c.target) + # Do not leak type variables in non-polymorphic solutions. + if solution is None or not get_vars( + solution, [tv for tv in extra_vars if tv not in vars] + ): + solutions[tv] = solution + + res: list[Type | None] = [] + for v in vars: + if v in solutions: + res.append(solutions[v]) + else: + # No constraints for type variable -- 'UninhabitedType' is the most specific type. + candidate: Type + if strict: + candidate = UninhabitedType() + candidate.ambiguous = True else: - if top is None: - top = c.target - else: - top = meet_types(top, c.target) - - p_top = get_proper_type(top) - p_bottom = get_proper_type(bottom) - if isinstance(p_top, AnyType) or isinstance(p_bottom, AnyType): - source_any = top if isinstance(p_top, AnyType) else bottom - assert isinstance(source_any, ProperType) and isinstance(source_any, AnyType) - res.append(AnyType(TypeOfAny.from_another_any, source_any=source_any)) + candidate = AnyType(TypeOfAny.special_form) + res.append(candidate) + return res, free_vars + + +def solve_with_dependent( + vars: list[TypeVarId], + constraints: list[Constraint], + original_vars: list[TypeVarId], + originals: dict[TypeVarId, TypeVarLikeType], +) -> tuple[Solutions, list[TypeVarLikeType]]: + """Solve set of constraints that may depend on each other, like T <: List[S]. + + The whole algorithm consists of five steps: + * Propagate via linear constraints and use secondary constraints to get transitive closure + * Find dependencies between type variables, group them in SCCs, and sort topologically + * Check that all SCC are intrinsically linear, we can't solve (express) T <: List[T] + * Variables in leaf SCCs that don't have constant bounds are free (choose one per SCC) + * Solve constraints iteratively starting from leafs, updating bounds after each step. + """ + graph, lowers, uppers = transitive_closure(vars, constraints) + + dmap = compute_dependencies(vars, graph, lowers, uppers) + sccs = list(strongly_connected_components(set(vars), dmap)) + if not all(check_linear(scc, lowers, uppers) for scc in sccs): + return {}, [] + raw_batches = list(topsort(prepare_sccs(sccs, dmap))) + + free_vars = [] + free_solutions = {} + for scc in raw_batches[0]: + # If there are no bounds on this SCC, then the only meaningful solution we can + # express, is that each variable is equal to a new free variable. For example, + # if we have T <: S, S <: U, we deduce: T = S = U = . + if all(not lowers[tv] and not uppers[tv] for tv in scc): + best_free = choose_free([originals[tv] for tv in scc], original_vars) + if best_free: + free_vars.append(best_free.id) + free_solutions[best_free.id] = best_free + + # Update lowers/uppers with free vars, so these can now be used + # as valid solutions. + for l, u in graph: + if l in free_vars: + lowers[u].add(free_solutions[l]) + if u in free_vars: + uppers[l].add(free_solutions[u]) + + # Flatten the SCCs that are independent, we can solve them together, + # since we don't need to update any targets in between. + batches = [] + for batch in raw_batches: + next_bc = [] + for scc in batch: + next_bc.extend(list(scc)) + batches.append(next_bc) + + solutions: dict[TypeVarId, Type | None] = {} + for flat_batch in batches: + res = solve_iteratively(flat_batch, graph, lowers, uppers) + solutions.update(res) + return solutions, [free_solutions[tv] for tv in free_vars] + + +def solve_iteratively( + batch: list[TypeVarId], graph: Graph, lowers: Bounds, uppers: Bounds +) -> Solutions: + """Solve transitive closure sequentially, updating upper/lower bounds after each step. + + Transitive closure is represented as a linear graph plus lower/upper bounds for each + type variable, see transitive_closure() docstring for details. + + We solve for type variables that appear in `batch`. If a bound is not constant (i.e. it + looks like T :> F[S, ...]), we substitute solutions found so far in the target F[S, ...] + after solving the batch. + + Importantly, after solving each variable in a batch, we move it from linear graph to + upper/lower bounds, this way we can guarantee consistency of solutions (see comment below + for an example when this is important). + """ + solutions = {} + s_batch = set(batch) + while s_batch: + for tv in sorted(s_batch, key=lambda x: x.raw_id): + if lowers[tv] or uppers[tv]: + solvable_tv = tv + break + else: + break + # Solve each solvable type variable separately. + s_batch.remove(solvable_tv) + result = solve_one(lowers[solvable_tv], uppers[solvable_tv]) + solutions[solvable_tv] = result + if result is None: + # TODO: support backtracking lower/upper bound choices and order within SCCs. + # (will require switching this function from iterative to recursive). continue - elif bottom is None: - if top: - candidate = top + + # Update the (transitive) bounds from graph if there is a solution. + # This is needed to guarantee solutions will never contradict the initial + # constraints. For example, consider {T <: S, T <: A, S :> B} with A :> B. + # If we would not update the uppers/lowers from graph, we would infer T = A, S = B + # which is not correct. + for l, u in graph.copy(): + if l == u: + continue + if l == solvable_tv: + lowers[u].add(result) + graph.remove((l, u)) + if u == solvable_tv: + uppers[l].add(result) + graph.remove((l, u)) + + # We can update uppers/lowers only once after solving the whole SCC, + # since uppers/lowers can't depend on type variables in the SCC + # (and we would reject such SCC as non-linear and therefore not solvable). + subs = {tv: s for (tv, s) in solutions.items() if s is not None} + for tv in lowers: + lowers[tv] = {expand_type(lt, subs) for lt in lowers[tv]} + for tv in uppers: + uppers[tv] = {expand_type(ut, subs) for ut in uppers[tv]} + return solutions + + +def solve_one(lowers: Iterable[Type], uppers: Iterable[Type]) -> Type | None: + """Solve constraints by finding by using meets of upper bounds, and joins of lower bounds.""" + bottom: Type | None = None + top: Type | None = None + candidate: Type | None = None + + # Process each bound separately, and calculate the lower and upper + # bounds based on constraints. Note that we assume that the constraint + # targets do not have constraint references. + for target in lowers: + if bottom is None: + bottom = target + else: + if type_state.infer_unions: + # This deviates from the general mypy semantics because + # recursive types are union-heavy in 95% of cases. + bottom = UnionType.make_union([bottom, target]) + else: + bottom = join_types(bottom, target) + + for target in uppers: + if top is None: + top = target + else: + top = meet_types(top, target) + + p_top = get_proper_type(top) + p_bottom = get_proper_type(bottom) + if isinstance(p_top, AnyType) or isinstance(p_bottom, AnyType): + source_any = top if isinstance(p_top, AnyType) else bottom + assert isinstance(source_any, ProperType) and isinstance(source_any, AnyType) + return AnyType(TypeOfAny.from_another_any, source_any=source_any) + elif bottom is None: + if top: + candidate = top + else: + # No constraints for type variable + return None + elif top is None: + candidate = bottom + elif is_subtype(bottom, top): + candidate = bottom + else: + candidate = None + return candidate + + +def choose_free( + scc: list[TypeVarLikeType], original_vars: list[TypeVarId] +) -> TypeVarLikeType | None: + """Choose the best solution for an SCC containing only type variables. + + This is needed to preserve e.g. the upper bound in a situation like this: + def dec(f: Callable[[T], S]) -> Callable[[T], S]: ... + + @dec + def test(x: U) -> U: ... + + where U <: A. + """ + + if len(scc) == 1: + # Fast path, choice is trivial. + return scc[0] + + common_upper_bound = meet_type_list([t.upper_bound for t in scc]) + common_upper_bound_p = get_proper_type(common_upper_bound) + # We include None for when strict-optional is disabled. + if isinstance(common_upper_bound_p, (UninhabitedType, NoneType)): + # This will cause to infer , which is better than a free TypeVar + # that has an upper bound . + return None + + values: list[Type] = [] + for tv in scc: + if isinstance(tv, TypeVarType) and tv.values: + if values: + # It is too tricky to support multiple TypeVars with values + # within the same SCC. + return None + values = tv.values.copy() + + if values and not is_trivial_bound(common_upper_bound_p): + # If there are both values and upper bound present, we give up, + # since type variables having both are not supported. + return None + + # For convenience with current type application machinery, we use a stable + # choice that prefers the original type variables (not polymorphic ones) in SCC. + best = sorted(scc, key=lambda x: (x.id not in original_vars, x.id.raw_id))[0] + if isinstance(best, TypeVarType): + return best.copy_modified(values=values, upper_bound=common_upper_bound) + if is_trivial_bound(common_upper_bound_p): + # TODO: support more cases for ParamSpecs/TypeVarTuples + return best + return None + + +def is_trivial_bound(tp: ProperType) -> bool: + return isinstance(tp, Instance) and tp.type.fullname == "builtins.object" + + +def find_linear(c: Constraint) -> Tuple[bool, TypeVarId | None]: + """Find out if this constraint represent a linear relationship, return target id if yes.""" + if isinstance(c.origin_type_var, TypeVarType): + if isinstance(c.target, TypeVarType): + return True, c.target.id + if isinstance(c.origin_type_var, ParamSpecType): + if isinstance(c.target, ParamSpecType) and not c.target.prefix.arg_types: + return True, c.target.id + if isinstance(c.origin_type_var, TypeVarTupleType): + target = get_proper_type(c.target) + if isinstance(target, TupleType) and len(target.items) == 1: + item = target.items[0] + if isinstance(item, UnpackType) and isinstance(item.type, TypeVarTupleType): + return True, item.type.id + return False, None + + +def transitive_closure( + tvars: list[TypeVarId], constraints: list[Constraint] +) -> tuple[Graph, Bounds, Bounds]: + """Find transitive closure for given constraints on type variables. + + Transitive closure gives maximal set of lower/upper bounds for each type variable, + such that we cannot deduce any further bounds by chaining other existing bounds. + + The transitive closure is represented by: + * A set of lower and upper bounds for each type variable, where only constant and + non-linear terms are included in the bounds. + * A graph of linear constraints between type variables (represented as a set of pairs) + Such separation simplifies reasoning, and allows an efficient and simple incremental + transitive closure algorithm that we use here. + + For example if we have initial constraints [T <: S, S <: U, U <: int], the transitive + closure is given by: + * {} <: T <: {int} + * {} <: S <: {int} + * {} <: U <: {int} + * {T <: S, S <: U, T <: U} + """ + uppers: Bounds = defaultdict(set) + lowers: Bounds = defaultdict(set) + graph: Graph = {(tv, tv) for tv in tvars} + + remaining = set(constraints) + while remaining: + c = remaining.pop() + # Note that ParamSpec constraint P <: Q may be considered linear only if Q has no prefix, + # for cases like P <: Concatenate[T, Q] we should consider this non-linear and put {P} and + # {T, Q} into separate SCCs. Similarly, Ts <: Tuple[*Us] considered linear, while + # Ts <: Tuple[*Us, U] is non-linear. + is_linear, target_id = find_linear(c) + if is_linear and target_id in tvars: + assert target_id is not None + if c.op == SUBTYPE_OF: + lower, upper = c.type_var, target_id else: - # No constraints for type variable -- 'UninhabitedType' is the most specific type. - if strict: - candidate = UninhabitedType() - candidate.ambiguous = True - else: - candidate = AnyType(TypeOfAny.special_form) - elif top is None: - candidate = bottom - elif is_subtype(bottom, top): - candidate = bottom + lower, upper = target_id, c.type_var + if (lower, upper) in graph: + continue + graph |= { + (l, u) for l in tvars for u in tvars if (l, lower) in graph and (upper, u) in graph + } + for u in tvars: + if (upper, u) in graph: + lowers[u] |= lowers[lower] + for l in tvars: + if (l, lower) in graph: + uppers[l] |= uppers[upper] + for lt in lowers[lower]: + for ut in uppers[upper]: + # TODO: what if secondary constraints result in inference + # against polymorphic actual (also in below branches)? + remaining |= set(infer_constraints(lt, ut, SUBTYPE_OF)) + remaining |= set(infer_constraints(ut, lt, SUPERTYPE_OF)) + elif c.op == SUBTYPE_OF: + if c.target in uppers[c.type_var]: + continue + for l in tvars: + if (l, c.type_var) in graph: + uppers[l].add(c.target) + for lt in lowers[c.type_var]: + remaining |= set(infer_constraints(lt, c.target, SUBTYPE_OF)) + remaining |= set(infer_constraints(c.target, lt, SUPERTYPE_OF)) else: - candidate = None - res.append(candidate) + assert c.op == SUPERTYPE_OF + if c.target in lowers[c.type_var]: + continue + for u in tvars: + if (c.type_var, u) in graph: + lowers[u].add(c.target) + for ut in uppers[c.type_var]: + remaining |= set(infer_constraints(ut, c.target, SUPERTYPE_OF)) + remaining |= set(infer_constraints(c.target, ut, SUBTYPE_OF)) + return graph, lowers, uppers + +def compute_dependencies( + tvars: list[TypeVarId], graph: Graph, lowers: Bounds, uppers: Bounds +) -> dict[TypeVarId, list[TypeVarId]]: + """Compute dependencies between type variables induced by constraints. + + If we have a constraint like T <: List[S], we say that T depends on S, since + we will need to solve for S first before we can solve for T. + """ + res = {} + for tv in tvars: + deps = set() + for lt in lowers[tv]: + deps |= get_vars(lt, tvars) + for ut in uppers[tv]: + deps |= get_vars(ut, tvars) + for other in tvars: + if other == tv: + continue + if (tv, other) in graph or (other, tv) in graph: + deps.add(other) + res[tv] = list(deps) return res + + +def check_linear(scc: set[TypeVarId], lowers: Bounds, uppers: Bounds) -> bool: + """Check there are only linear constraints between type variables in SCC. + + Linear are constraints like T <: S (while T <: F[S] are non-linear). + """ + for tv in scc: + if any(get_vars(lt, list(scc)) for lt in lowers[tv]): + return False + if any(get_vars(ut, list(scc)) for ut in uppers[tv]): + return False + return True + + +def get_vars(target: Type, vars: list[TypeVarId]) -> set[TypeVarId]: + """Find type variables for which we are solving in a target type.""" + return {tv.id for tv in get_all_type_vars(target)} & set(vars) diff --git a/mypy/state.py b/mypy/state.py index 2e44a936f819..cd3a360dd15f 100644 --- a/mypy/state.py +++ b/mypy/state.py @@ -1,8 +1,7 @@ from __future__ import annotations from contextlib import contextmanager -from typing import Iterator -from typing_extensions import Final +from typing import Final, Iterator # These are global mutable state. Don't add anything here unless there's a very # good reason. diff --git a/mypy/stats.py b/mypy/stats.py index 5f4b9d4d201f..b8803e03b9d2 100644 --- a/mypy/stats.py +++ b/mypy/stats.py @@ -5,8 +5,7 @@ import os from collections import Counter from contextlib import contextmanager -from typing import Iterator -from typing_extensions import Final +from typing import Final, Iterator from mypy import nodes from mypy.argmap import map_formals_to_actuals diff --git a/mypy/strconv.py b/mypy/strconv.py index c428addd43aa..42a07c7f62fa 100644 --- a/mypy/strconv.py +++ b/mypy/strconv.py @@ -511,7 +511,7 @@ def visit_type_var_tuple_expr(self, o: mypy.nodes.TypeVarTupleExpr) -> str: return self.dump(a, o) def visit_type_alias_expr(self, o: mypy.nodes.TypeAliasExpr) -> str: - return f"TypeAliasExpr({self.stringify_type(o.type)})" + return f"TypeAliasExpr({self.stringify_type(o.node.target)})" def visit_namedtuple_expr(self, o: mypy.nodes.NamedTupleExpr) -> str: return f"NamedTupleExpr:{o.line}({o.info.name}, {self.stringify_type(o.info.tuple_type) if o.info.tuple_type is not None else None})" diff --git a/mypy/stubdoc.py b/mypy/stubdoc.py index 7c8751bbd6ed..145f57fd7751 100644 --- a/mypy/stubdoc.py +++ b/mypy/stubdoc.py @@ -10,8 +10,8 @@ import io import re import tokenize -from typing import Any, MutableMapping, MutableSequence, NamedTuple, Sequence, Tuple -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Final, MutableMapping, MutableSequence, NamedTuple, Sequence, Tuple +from typing_extensions import TypeAlias as _TypeAlias # Type alias for signatures strings in format ('func_name', '(arg, opt_arg=False)'). Sig: _TypeAlias = Tuple[str, str] @@ -254,7 +254,7 @@ def infer_sig_from_docstring(docstr: str | None, name: str) -> list[FunctionSig] * docstr: docstring * name: name of function for which signatures are to be found """ - if not docstr: + if not (isinstance(docstr, str) and docstr): return None state = DocStringParser(name) diff --git a/mypy/stubgen.py b/mypy/stubgen.py index ba71456af4a4..aca836c52ce8 100755 --- a/mypy/stubgen.py +++ b/mypy/stubgen.py @@ -49,8 +49,7 @@ import sys import traceback from collections import defaultdict -from typing import Iterable, Mapping -from typing_extensions import Final +from typing import Final, Iterable, Mapping import mypy.build import mypy.mixedtraverser @@ -81,6 +80,7 @@ CallExpr, ClassDef, ComparisonExpr, + ComplexExpr, Decorator, DictExpr, EllipsisExpr, @@ -102,6 +102,7 @@ OverloadedFuncDef, Statement, StrExpr, + TempNode, TupleExpr, TypeInfo, UnaryExpr, @@ -242,6 +243,7 @@ def __init__( verbose: bool, quiet: bool, export_less: bool, + include_docstrings: bool, ) -> None: # See parse_options for descriptions of the flags. self.pyversion = pyversion @@ -260,6 +262,7 @@ def __init__( self.verbose = verbose self.quiet = quiet self.export_less = export_less + self.include_docstrings = include_docstrings class StubSource: @@ -623,6 +626,7 @@ def __init__( include_private: bool = False, analyzed: bool = False, export_less: bool = False, + include_docstrings: bool = False, ) -> None: # Best known value of __all__. self._all_ = _all_ @@ -637,6 +641,8 @@ def __init__( self._state = EMPTY self._toplevel_names: list[str] = [] self._include_private = include_private + self._include_docstrings = include_docstrings + self._current_class: ClassDef | None = None self.import_tracker = ImportTracker() # Was the tree semantically analysed before? self.analyzed = analyzed @@ -661,7 +667,7 @@ def visit_mypy_file(self, o: MypyFile) -> None: "_typeshed": ["Incomplete"], "typing": ["Any", "TypeVar", "NamedTuple"], "collections.abc": ["Generator"], - "typing_extensions": ["TypedDict"], + "typing_extensions": ["TypedDict", "ParamSpec", "TypeVarTuple"], } for pkg, imports in known_imports.items(): for t in imports: @@ -780,25 +786,20 @@ def visit_func_def(self, o: FuncDef) -> None: elif o.name in KNOWN_MAGIC_METHODS_RETURN_TYPES: retname = KNOWN_MAGIC_METHODS_RETURN_TYPES[o.name] elif has_yield_expression(o) or has_yield_from_expression(o): - self.add_typing_import("Generator") + generator_name = self.add_typing_import("Generator") yield_name = "None" send_name = "None" return_name = "None" if has_yield_from_expression(o): - self.add_typing_import("Incomplete") - yield_name = send_name = self.typing_name("Incomplete") + yield_name = send_name = self.add_typing_import("Incomplete") else: for expr, in_assignment in all_yield_expressions(o): if expr.expr is not None and not self.is_none_expr(expr.expr): - self.add_typing_import("Incomplete") - yield_name = self.typing_name("Incomplete") + yield_name = self.add_typing_import("Incomplete") if in_assignment: - self.add_typing_import("Incomplete") - send_name = self.typing_name("Incomplete") + send_name = self.add_typing_import("Incomplete") if has_return_statement(o): - self.add_typing_import("Incomplete") - return_name = self.typing_name("Incomplete") - generator_name = self.typing_name("Generator") + return_name = self.add_typing_import("Incomplete") retname = f"{generator_name}[{yield_name}, {send_name}, {return_name}]" elif not has_return_statement(o) and o.abstract_status == NOT_ABSTRACT: retname = "None" @@ -807,7 +808,13 @@ def visit_func_def(self, o: FuncDef) -> None: retfield = " -> " + retname self.add(", ".join(args)) - self.add(f"){retfield}: ...\n") + self.add(f"){retfield}:") + if self._include_docstrings and o.docstring: + docstring = mypy.util.quote_docstring(o.docstring) + self.add(f"\n{self._indent} {docstring}\n") + else: + self.add(" ...\n") + self._state = FUNC def is_none_expr(self, expr: Expression) -> bool: @@ -886,6 +893,7 @@ def get_fullname(self, expr: Expression) -> str: return resolved_name def visit_class_def(self, o: ClassDef) -> None: + self._current_class = o self.method_names = find_method_names(o.defs.body) sep: int | None = None if not self._indent and self._state != EMPTY: @@ -907,8 +915,11 @@ def visit_class_def(self, o: ClassDef) -> None: if base_types: self.add(f"({', '.join(base_types)})") self.add(":\n") - n = len(self._output) self._indent += " " + if self._include_docstrings and o.docstring: + docstring = mypy.util.quote_docstring(o.docstring) + self.add(f"{self._indent}{docstring}\n") + n = len(self._output) self._vars.append([]) super().visit_class_def(o) self._indent = self._indent[:-4] @@ -917,11 +928,13 @@ def visit_class_def(self, o: ClassDef) -> None: if len(self._output) == n: if self._state == EMPTY_CLASS and sep is not None: self._output[sep] = "" - self._output[-1] = self._output[-1][:-1] + " ...\n" + if not (self._include_docstrings and o.docstring): + self._output[-1] = self._output[-1][:-1] + " ...\n" self._state = EMPTY_CLASS else: self._state = CLASS self.method_names = set() + self._current_class = None def get_base_types(self, cdef: ClassDef) -> list[str]: """Get list of base classes for a class.""" @@ -947,21 +960,19 @@ def get_base_types(self, cdef: ClassDef) -> list[str]: nt_fields = self._get_namedtuple_fields(base) assert isinstance(base.args[0], StrExpr) typename = base.args[0].value - if nt_fields is not None: - fields_str = ", ".join(f"({f!r}, {t})" for f, t in nt_fields) - namedtuple_name = self.typing_name("NamedTuple") - base_types.append(f"{namedtuple_name}({typename!r}, [{fields_str}])") - self.add_typing_import("NamedTuple") - else: + if nt_fields is None: # Invalid namedtuple() call, cannot determine fields - base_types.append(self.typing_name("Incomplete")) + base_types.append(self.add_typing_import("Incomplete")) + continue + fields_str = ", ".join(f"({f!r}, {t})" for f, t in nt_fields) + namedtuple_name = self.add_typing_import("NamedTuple") + base_types.append(f"{namedtuple_name}({typename!r}, [{fields_str}])") elif self.is_typed_namedtuple(base): base_types.append(base.accept(p)) else: # At this point, we don't know what the base class is, so we # just use Incomplete as the base class. - base_types.append(self.typing_name("Incomplete")) - self.add_typing_import("Incomplete") + base_types.append(self.add_typing_import("Incomplete")) for name, value in cdef.keywords.items(): if name == "metaclass": continue # handled separately @@ -1041,9 +1052,9 @@ def _get_namedtuple_fields(self, call: CallExpr) -> list[tuple[str, str]] | None field_names.append(field.value) else: return None # Invalid namedtuple fields type - if field_names: - self.add_typing_import("Incomplete") - incomplete = self.typing_name("Incomplete") + if not field_names: + return [] + incomplete = self.add_typing_import("Incomplete") return [(field_name, incomplete) for field_name in field_names] elif self.is_typed_namedtuple(call): fields_arg = call.args[1] @@ -1074,8 +1085,7 @@ def process_namedtuple(self, lvalue: NameExpr, rvalue: CallExpr) -> None: if fields is None: self.annotate_as_incomplete(lvalue) return - self.add_typing_import("NamedTuple") - bases = self.typing_name("NamedTuple") + bases = self.add_typing_import("NamedTuple") # TODO: Add support for generic NamedTuples. Requires `Generic` as base class. class_def = f"{self._indent}class {lvalue.name}({bases}):" if len(fields) == 0: @@ -1125,14 +1135,13 @@ def process_typeddict(self, lvalue: NameExpr, rvalue: CallExpr) -> None: total = arg else: items.append((arg_name, arg)) - self.add_typing_import("TypedDict") + bases = self.add_typing_import("TypedDict") p = AliasPrinter(self) if any(not key.isidentifier() or keyword.iskeyword(key) for key, _ in items): # Keep the call syntax if there are non-identifier or reserved keyword keys. self.add(f"{self._indent}{lvalue.name} = {rvalue.accept(p)}\n") self._state = VAR else: - bases = self.typing_name("TypedDict") # TODO: Add support for generic TypedDicts. Requires `Generic` as base class. if total is not None: bases += f", total={total.accept(p)}" @@ -1149,8 +1158,7 @@ def process_typeddict(self, lvalue: NameExpr, rvalue: CallExpr) -> None: self._state = CLASS def annotate_as_incomplete(self, lvalue: NameExpr) -> None: - self.add_typing_import("Incomplete") - self.add(f"{self._indent}{lvalue.name}: {self.typing_name('Incomplete')}\n") + self.add(f"{self._indent}{lvalue.name}: {self.add_typing_import('Incomplete')}\n") self._state = VAR def is_alias_expression(self, expr: Expression, top_level: bool = True) -> bool: @@ -1159,10 +1167,14 @@ def is_alias_expression(self, expr: Expression, top_level: bool = True) -> bool: Used to know if assignments look like type aliases, function alias, or module alias. """ - # Assignment of TypeVar(...) are passed through + # Assignment of TypeVar(...) and other typevar-likes are passed through if isinstance(expr, CallExpr) and self.get_fullname(expr.callee) in ( "typing.TypeVar", "typing_extensions.TypeVar", + "typing.ParamSpec", + "typing_extensions.ParamSpec", + "typing.TypeVarTuple", + "typing_extensions.TypeVarTuple", ): return True elif isinstance(expr, EllipsisExpr): @@ -1326,7 +1338,20 @@ def get_init( typename += f"[{final_arg}]" else: typename = self.get_str_type_of_node(rvalue) - return f"{self._indent}{lvalue}: {typename}\n" + initializer = self.get_assign_initializer(rvalue) + return f"{self._indent}{lvalue}: {typename}{initializer}\n" + + def get_assign_initializer(self, rvalue: Expression) -> str: + """Does this rvalue need some special initializer value?""" + if self._current_class and self._current_class.info: + # Current rules + # 1. Return `...` if we are dealing with `NamedTuple` and it has an existing default value + if self._current_class.info.is_named_tuple and not isinstance(rvalue, TempNode): + return " = ..." + # TODO: support other possible cases, where initializer is important + + # By default, no initializer is required: + return "" def add(self, string: str) -> None: """Add text to generated stub.""" @@ -1349,13 +1374,14 @@ def typing_name(self, name: str) -> str: else: return name - def add_typing_import(self, name: str) -> None: + def add_typing_import(self, name: str) -> str: """Add a name to be imported for typing, unless it's imported already. The import will be internal to the stub. """ name = self.typing_name(name) self.import_tracker.require_name(name) + return name def add_import_line(self, line: str) -> None: """Add a line of text to the import section, unless it's already there.""" @@ -1393,6 +1419,8 @@ def is_private_member(self, fullname: str) -> bool: def get_str_type_of_node( self, rvalue: Expression, can_infer_optional: bool = False, can_be_any: bool = True ) -> str: + rvalue = self.maybe_unwrap_unary_expr(rvalue) + if isinstance(rvalue, IntExpr): return "int" if isinstance(rvalue, StrExpr): @@ -1401,19 +1429,56 @@ def get_str_type_of_node( return "bytes" if isinstance(rvalue, FloatExpr): return "float" - if isinstance(rvalue, UnaryExpr) and isinstance(rvalue.expr, IntExpr): - return "int" + if isinstance(rvalue, ComplexExpr): # 1j + return "complex" + if isinstance(rvalue, OpExpr) and rvalue.op in ("-", "+"): # -1j + 1 + if isinstance(self.maybe_unwrap_unary_expr(rvalue.left), ComplexExpr) or isinstance( + self.maybe_unwrap_unary_expr(rvalue.right), ComplexExpr + ): + return "complex" if isinstance(rvalue, NameExpr) and rvalue.name in ("True", "False"): return "bool" if can_infer_optional and isinstance(rvalue, NameExpr) and rvalue.name == "None": - self.add_typing_import("Incomplete") - return f"{self.typing_name('Incomplete')} | None" + return f"{self.add_typing_import('Incomplete')} | None" if can_be_any: - self.add_typing_import("Incomplete") - return self.typing_name("Incomplete") + return self.add_typing_import("Incomplete") else: return "" + def maybe_unwrap_unary_expr(self, expr: Expression) -> Expression: + """Unwrap (possibly nested) unary expressions. + + But, some unary expressions can change the type of expression. + While we want to preserve it. For example, `~True` is `int`. + So, we only allow a subset of unary expressions to be unwrapped. + """ + if not isinstance(expr, UnaryExpr): + return expr + + # First, try to unwrap `[+-]+ (int|float|complex)` expr: + math_ops = ("+", "-") + if expr.op in math_ops: + while isinstance(expr, UnaryExpr): + if expr.op not in math_ops or not isinstance( + expr.expr, (IntExpr, FloatExpr, ComplexExpr, UnaryExpr) + ): + break + expr = expr.expr + return expr + + # Next, try `not bool` expr: + if expr.op == "not": + while isinstance(expr, UnaryExpr): + if expr.op != "not" or not isinstance(expr.expr, (NameExpr, UnaryExpr)): + break + if isinstance(expr.expr, NameExpr) and expr.expr.name not in ("True", "False"): + break + expr = expr.expr + return expr + + # This is some other unary expr, we cannot do anything with it (yet?). + return expr + def print_annotation(self, t: Type) -> str: printer = AnnotationPrinter(self) return t.accept(printer) @@ -1648,6 +1713,7 @@ def mypy_options(stubgen_options: Options) -> MypyOptions: options.show_traceback = True options.transform_source = remove_misplaced_type_comments options.preserve_asts = True + options.include_docstrings = stubgen_options.include_docstrings # Override cache_dir if provided in the environment environ_cache_dir = os.getenv("MYPY_CACHE_DIR", "") @@ -1711,6 +1777,7 @@ def generate_stub_from_ast( parse_only: bool = False, include_private: bool = False, export_less: bool = False, + include_docstrings: bool = False, ) -> None: """Use analysed (or just parsed) AST to generate type stub for single file. @@ -1722,6 +1789,7 @@ def generate_stub_from_ast( include_private=include_private, analyzed=not parse_only, export_less=export_less, + include_docstrings=include_docstrings, ) assert mod.ast is not None, "This function must be used only with analyzed modules" mod.ast.accept(gen) @@ -1783,7 +1851,12 @@ def generate_stubs(options: Options) -> None: files.append(target) with generate_guarded(mod.module, target, options.ignore_errors, options.verbose): generate_stub_from_ast( - mod, target, options.parse_only, options.include_private, options.export_less + mod, + target, + options.parse_only, + options.include_private, + options.export_less, + include_docstrings=options.include_docstrings, ) # Separately analyse C modules using different logic. @@ -1797,7 +1870,11 @@ def generate_stubs(options: Options) -> None: files.append(target) with generate_guarded(mod.module, target, options.ignore_errors, options.verbose): generate_stub_for_c_module( - mod.module, target, known_modules=all_modules, sig_generators=sig_generators + mod.module, + target, + known_modules=all_modules, + sig_generators=sig_generators, + include_docstrings=options.include_docstrings, ) num_modules = len(py_modules) + len(c_modules) if not options.quiet and num_modules > 0: @@ -1851,6 +1928,11 @@ def parse_options(args: list[str]) -> Options: action="https://github.com/store_true", help="don't implicitly export all names imported from other modules in the same package", ) + parser.add_argument( + "--include-docstrings", + action="https://github.com/store_true", + help="include existing docstrings with the stubs", + ) parser.add_argument("-v", "--verbose", action="https://github.com/store_true", help="show more verbose messages") parser.add_argument("-q", "--quiet", action="https://github.com/store_true", help="show fewer messages") parser.add_argument( @@ -1931,6 +2013,7 @@ def parse_options(args: list[str]) -> Options: verbose=ns.verbose, quiet=ns.quiet, export_less=ns.export_less, + include_docstrings=ns.include_docstrings, ) diff --git a/mypy/stubgenc.py b/mypy/stubgenc.py index 4fc9f8c6fdfa..31487f9d0dcf 100755 --- a/mypy/stubgenc.py +++ b/mypy/stubgenc.py @@ -12,9 +12,9 @@ import re from abc import abstractmethod from types import ModuleType -from typing import Any, Iterable, Mapping -from typing_extensions import Final +from typing import Any, Final, Iterable, Mapping +import mypy.util from mypy.moduleinspect import is_c_module from mypy.stubdoc import ( ArgSig, @@ -170,6 +170,7 @@ def generate_stub_for_c_module( target: str, known_modules: list[str], sig_generators: Iterable[SignatureGenerator], + include_docstrings: bool = False, ) -> None: """Generate stub for C module. @@ -202,6 +203,7 @@ def generate_stub_for_c_module( known_modules=known_modules, imports=imports, sig_generators=sig_generators, + include_docstrings=include_docstrings, ) done.add(name) types: list[str] = [] @@ -217,6 +219,7 @@ def generate_stub_for_c_module( known_modules=known_modules, imports=imports, sig_generators=sig_generators, + include_docstrings=include_docstrings, ) done.add(name) variables = [] @@ -320,15 +323,17 @@ def generate_c_function_stub( self_var: str | None = None, cls: type | None = None, class_name: str | None = None, + include_docstrings: bool = False, ) -> None: """Generate stub for a single function or method. - The result (always a single line) will be appended to 'output'. + The result will be appended to 'output'. If necessary, any required names will be added to 'imports'. The 'class_name' is used to find signature of __init__ or __new__ in 'class_sigs'. """ inferred: list[FunctionSig] | None = None + docstr: str | None = None if class_name: # method: assert cls is not None, "cls should be provided for methods" @@ -380,13 +385,19 @@ def generate_c_function_stub( # a sig generator indicates @classmethod by specifying the cls arg if class_name and signature.args and signature.args[0].name == "cls": output.append("@classmethod") - output.append( - "def {function}({args}) -> {ret}: ...".format( - function=name, - args=", ".join(args), - ret=strip_or_import(signature.ret_type, module, known_modules, imports), - ) + output_signature = "def {function}({args}) -> {ret}:".format( + function=name, + args=", ".join(args), + ret=strip_or_import(signature.ret_type, module, known_modules, imports), ) + if include_docstrings and docstr: + docstr_quoted = mypy.util.quote_docstring(docstr.strip()) + docstr_indented = "\n ".join(docstr_quoted.split("\n")) + output.append(output_signature) + output.extend(f" {docstr_indented}".split("\n")) + else: + output_signature += " ..." + output.append(output_signature) def strip_or_import( @@ -494,6 +505,7 @@ def generate_c_type_stub( known_modules: list[str], imports: list[str], sig_generators: Iterable[SignatureGenerator], + include_docstrings: bool = False, ) -> None: """Generate stub for a single class using runtime introspection. @@ -502,7 +514,7 @@ def generate_c_type_stub( """ raw_lookup = getattr(obj, "__dict__") # noqa: B009 items = sorted(get_members(obj), key=lambda x: method_name_sort_key(x[0])) - names = set(x[0] for x in items) + names = {x[0] for x in items} methods: list[str] = [] types: list[str] = [] static_properties: list[str] = [] @@ -536,6 +548,7 @@ def generate_c_type_stub( cls=obj, class_name=class_name, sig_generators=sig_generators, + include_docstrings=include_docstrings, ) elif is_c_property(raw_value): generate_c_property_stub( @@ -558,6 +571,7 @@ def generate_c_type_stub( imports=imports, known_modules=known_modules, sig_generators=sig_generators, + include_docstrings=include_docstrings, ) else: attrs.append((attr, value)) diff --git a/mypy/stubinfo.py b/mypy/stubinfo.py index e6e549ad280f..0d76a6215238 100644 --- a/mypy/stubinfo.py +++ b/mypy/stubinfo.py @@ -9,15 +9,13 @@ def approved_stub_package_exists(prefix: str) -> bool: return is_legacy_bundled_package(prefix) or prefix in non_bundled_packages -def stub_package_name(prefix: str) -> str: +def stub_distribution_name(prefix: str) -> str: return legacy_bundled_packages.get(prefix) or non_bundled_packages[prefix] # Stubs for these third-party packages used to be shipped with mypy. # # Map package name to PyPI stub distribution name. -# -# Package name can have one or two components ('a' or 'a.b'). legacy_bundled_packages = { "aiofiles": "types-aiofiles", "bleach": "types-bleach", @@ -116,7 +114,7 @@ def stub_package_name(prefix: str) -> str: "flask_sqlalchemy": "types-Flask-SQLAlchemy", "fpdf": "types-fpdf2", "gdb": "types-gdb", - "google.cloud": "types-google-cloud-ndb", + "google.cloud.ndb": "types-google-cloud-ndb", "hdbcli": "types-hdbcli", "html5lib": "types-html5lib", "httplib2": "types-httplib2", diff --git a/mypy/stubtest.py b/mypy/stubtest.py index 4e038cfd75d1..a804835a632b 100644 --- a/mypy/stubtest.py +++ b/mypy/stubtest.py @@ -11,6 +11,7 @@ import copy import enum import importlib +import importlib.machinery import inspect import os import pkgutil @@ -22,10 +23,11 @@ import typing import typing_extensions import warnings +from collections import defaultdict from contextlib import redirect_stderr, redirect_stdout from functools import singledispatch from pathlib import Path -from typing import Any, Generic, Iterator, TypeVar, Union +from typing import AbstractSet, Any, Generic, Iterator, TypeVar, Union from typing_extensions import get_origin, is_typeddict import mypy.build @@ -209,7 +211,12 @@ def test_module(module_name: str) -> Iterator[Error]: except KeyboardInterrupt: raise except BaseException as e: - yield Error([module_name], f"failed to import, {type(e).__name__}: {e}", stub, MISSING) + note = "" + if isinstance(e, ModuleNotFoundError): + note = " Maybe install the runtime package or alter PYTHONPATH?" + yield Error( + [module_name], f"failed to import.{note} {type(e).__name__}: {e}", stub, MISSING + ) return with warnings.catch_warnings(): @@ -491,7 +498,11 @@ def verify_typeinfo( ) # Check everything already defined on the stub class itself (i.e. not inherited) - to_check = set(stub.names) + # + # Filter out non-identifier names, as these are (hopefully always?) whacky/fictional things + # (like __mypy-replace or __mypy-post_init, etc.) that don't exist at runtime, + # and exist purely for internal mypy reasons + to_check = {name for name in stub.names if name.isidentifier()} # Check all public things on the runtime class to_check.update( m for m in vars(runtime) if not is_probably_private(m) and m not in IGNORABLE_CLASS_DUNDERS @@ -668,7 +679,7 @@ def _verify_arg_default_value( def maybe_strip_cls(name: str, args: list[nodes.Argument]) -> list[nodes.Argument]: - if name in ("__init_subclass__", "__class_getitem__"): + if args and name in ("__init_subclass__", "__class_getitem__"): # These are implicitly classmethods. If the stub chooses not to have @classmethod, we # should remove the cls argument if args[0].variable.name == "cls": @@ -1542,10 +1553,10 @@ def anytype() -> mypy.types.AnyType: fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars]) value: bool | int | str - if isinstance(runtime, bytes): - value = bytes_to_human_readable_repr(runtime) - elif isinstance(runtime, enum.Enum): + if isinstance(runtime, enum.Enum) and isinstance(runtime.name, str): value = runtime.name + elif isinstance(runtime, bytes): + value = bytes_to_human_readable_repr(runtime) elif isinstance(runtime, (bool, int, str)): value = runtime else: @@ -1630,7 +1641,7 @@ def get_stub(module: str) -> nodes.MypyFile | None: def get_typeshed_stdlib_modules( custom_typeshed_dir: str | None, version_info: tuple[int, int] | None = None -) -> list[str]: +) -> set[str]: """Returns a list of stdlib modules in typeshed (for current Python version).""" stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir) if version_info is None: @@ -1652,14 +1663,93 @@ def exists_in_version(module: str) -> bool: typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed" stdlib_dir = typeshed_dir / "stdlib" - modules = [] + modules: set[str] = set() for path in stdlib_dir.rglob("*.pyi"): if path.stem == "__init__": path = path.parent module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,)) if exists_in_version(module): - modules.append(module) - return sorted(modules) + modules.add(module) + return modules + + +def get_importable_stdlib_modules() -> set[str]: + """Return all importable stdlib modules at runtime.""" + all_stdlib_modules: AbstractSet[str] + if sys.version_info >= (3, 10): + all_stdlib_modules = sys.stdlib_module_names + else: + all_stdlib_modules = set(sys.builtin_module_names) + modules_by_finder: defaultdict[importlib.machinery.FileFinder, set[str]] = defaultdict(set) + for m in pkgutil.iter_modules(): + if isinstance(m.module_finder, importlib.machinery.FileFinder): + modules_by_finder[m.module_finder].add(m.name) + for finder, module_group in modules_by_finder.items(): + if ( + "site-packages" not in Path(finder.path).parents + # if "_queue" is present, it's most likely the module finder + # for stdlib extension modules; + # if "queue" is present, it's most likely the module finder + # for pure-Python stdlib modules. + # In either case, we'll want to add all the modules that the finder has to offer us. + # This is a bit hacky, but seems to work well in a cross-platform way. + and {"_queue", "queue"} & module_group + ): + all_stdlib_modules.update(module_group) + + importable_stdlib_modules: set[str] = set() + for module_name in all_stdlib_modules: + if module_name in ANNOYING_STDLIB_MODULES: + continue + + try: + runtime = silent_import_module(module_name) + except ImportError: + continue + else: + importable_stdlib_modules.add(module_name) + + try: + # some stdlib modules (e.g. `nt`) don't have __path__ set... + runtime_path = runtime.__path__ + runtime_name = runtime.__name__ + except AttributeError: + continue + + for submodule in pkgutil.walk_packages(runtime_path, runtime_name + "."): + submodule_name = submodule.name + + # There are many annoying *.__main__ stdlib modules, + # and including stubs for them isn't really that useful anyway: + # tkinter.__main__ opens a tkinter windows; unittest.__main__ raises SystemExit; etc. + # + # The idlelib.* submodules are similarly annoying in opening random tkinter windows, + # and we're unlikely to ever add stubs for idlelib in typeshed + # (see discussion in https://github.com/python/typeshed/pull/9193) + # + # test.* modules do weird things like raising exceptions in __del__ methods, + # leading to unraisable exceptions being logged to the terminal + # as a warning at the end of the stubtest run + if ( + submodule_name.endswith(".__main__") + or submodule_name.startswith("idlelib.") + or submodule_name.startswith("test.") + ): + continue + + try: + silent_import_module(submodule_name) + except KeyboardInterrupt: + raise + # importing multiprocessing.popen_forkserver on Windows raises AttributeError... + # some submodules also appear to raise SystemExit as well on some Python versions + # (not sure exactly which) + except BaseException: + continue + else: + importable_stdlib_modules.add(submodule_name) + + return importable_stdlib_modules def get_allowlist_entries(allowlist_file: str) -> Iterator[str]: @@ -1690,6 +1780,10 @@ class _Arguments: version: str +# typeshed added a stub for __main__, but that causes stubtest to check itself +ANNOYING_STDLIB_MODULES: typing_extensions.Final = frozenset({"antigravity", "this", "__main__"}) + + def test_stubs(args: _Arguments, use_builtins_fixtures: bool = False) -> int: """This is stubtest! It's time to test the stubs!""" # Load the allowlist. This is a series of strings corresponding to Error.object_desc @@ -1712,10 +1806,9 @@ def test_stubs(args: _Arguments, use_builtins_fixtures: bool = False) -> int: "cannot pass both --check-typeshed and a list of modules", ) return 1 - modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir) - # typeshed added a stub for __main__, but that causes stubtest to check itself - annoying_modules = {"antigravity", "this", "__main__"} - modules = [m for m in modules if m not in annoying_modules] + typeshed_modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir) + runtime_modules = get_importable_stdlib_modules() + modules = sorted((typeshed_modules | runtime_modules) - ANNOYING_STDLIB_MODULES) if not modules: print(_style("error:", color="red", bold=True), "no modules to check") diff --git a/mypy/subtypes.py b/mypy/subtypes.py index a3b28a3e24de..58ae4efdf582 100644 --- a/mypy/subtypes.py +++ b/mypy/subtypes.py @@ -1,8 +1,8 @@ from __future__ import annotations from contextlib import contextmanager -from typing import Any, Callable, Iterator, List, TypeVar, cast -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Callable, Final, Iterator, List, TypeVar, cast +from typing_extensions import TypeAlias as _TypeAlias import mypy.applytype import mypy.constraints @@ -590,6 +590,7 @@ def check_mixed( ): nominal = False else: + # TODO: everywhere else ParamSpecs are handled as invariant. if not check_type_parameter( lefta, righta, COVARIANT, self.proper_subtype, self.subtype_context ): @@ -600,7 +601,7 @@ def check_mixed( type_state.record_negative_subtype_cache_entry(self._subtype_kind, left, right) return nominal if right.type.is_protocol and is_protocol_implementation( - left, right, proper_subtype=self.proper_subtype + left, right, proper_subtype=self.proper_subtype, options=self.options ): return True # We record negative cache entry here, and not in the protocol check like we do for @@ -647,7 +648,7 @@ def visit_param_spec(self, left: ParamSpecType) -> bool: and right.id == left.id and right.flavor == left.flavor ): - return True + return self._is_subtype(left.prefix, right.prefix) if isinstance(right, Parameters) and are_trivial_parameters(right): return True return self._is_subtype(left.upper_bound, self.right) @@ -659,6 +660,8 @@ def visit_type_var_tuple(self, left: TypeVarTupleType) -> bool: return self._is_subtype(left.upper_bound, self.right) def visit_unpack_type(self, left: UnpackType) -> bool: + # TODO: Ideally we should not need this (since it is not a real type). + # Instead callers (upper level types) should handle it when it appears in type list. if isinstance(self.right, UnpackType): return self._is_subtype(left.type, self.right.type) if isinstance(self.right, Instance) and self.right.type.fullname == "builtins.object": @@ -666,13 +669,12 @@ def visit_unpack_type(self, left: UnpackType) -> bool: return False def visit_parameters(self, left: Parameters) -> bool: - if isinstance(self.right, (Parameters, CallableType)): - right = self.right - if isinstance(right, CallableType): - right = right.with_unpacked_kwargs() + if isinstance(self.right, Parameters): + # TODO: direction here should be opposite, this function expects + # order of callables, while parameters are contravariant. return are_parameters_compatible( left, - right, + self.right, is_compat=self._is_subtype, ignore_pos_arg_names=self.subtype_context.ignore_pos_arg_names, ) @@ -694,7 +696,9 @@ def visit_callable_type(self, left: CallableType) -> bool: right, is_compat=self._is_subtype, ignore_pos_arg_names=self.subtype_context.ignore_pos_arg_names, - strict_concatenate=self.options.strict_concatenate if self.options else True, + strict_concatenate=(self.options.extra_checks or self.options.strict_concatenate) + if self.options + else False, ) elif isinstance(right, Overloaded): return all(self._is_subtype(left, item) for item in right.items) @@ -721,14 +725,6 @@ def visit_callable_type(self, left: CallableType) -> bool: elif isinstance(right, TypeType): # This is unsound, we don't check the __init__ signature. return left.is_type_obj() and self._is_subtype(left.ret_type, right.item) - elif isinstance(right, Parameters): - # this doesn't check return types.... but is needed for is_equivalent - return are_parameters_compatible( - left.with_unpacked_kwargs(), - right, - is_compat=self._is_subtype, - ignore_pos_arg_names=self.subtype_context.ignore_pos_arg_names, - ) else: return False @@ -750,7 +746,15 @@ def visit_tuple_type(self, left: TupleType) -> bool: # TODO: We shouldn't need this special case. This is currently needed # for isinstance(x, tuple), though it's unclear why. return True - return all(self._is_subtype(li, iter_type) for li in left.items) + for li in left.items: + if isinstance(li, UnpackType): + unpack = get_proper_type(li.type) + if isinstance(unpack, Instance): + assert unpack.type.fullname == "builtins.tuple" + li = unpack.args[0] + if not self._is_subtype(li, iter_type): + return False + return True elif self._is_subtype(left.partial_fallback, right) and self._is_subtype( mypy.typeops.tuple_fallback(left), right ): @@ -758,6 +762,7 @@ def visit_tuple_type(self, left: TupleType) -> bool: return False elif isinstance(right, TupleType): if len(left.items) != len(right.items): + # TODO: handle tuple with variadic items better. return False if any(not self._is_subtype(l, r) for l, r in zip(left.items, right.items)): return False @@ -858,7 +863,11 @@ def visit_overloaded(self, left: Overloaded) -> bool: else: # If this one overlaps with the supertype in any way, but it wasn't # an exact match, then it's a potential error. - strict_concat = self.options.strict_concatenate if self.options else True + strict_concat = ( + (self.options.extra_checks or self.options.strict_concatenate) + if self.options + else False + ) if left_index not in matched_overloads and ( is_callable_compatible( left_item, @@ -997,6 +1006,7 @@ def is_protocol_implementation( proper_subtype: bool = False, class_obj: bool = False, skip: list[str] | None = None, + options: Options | None = None, ) -> bool: """Check whether 'left' implements the protocol 'right'. @@ -1062,7 +1072,9 @@ def f(self) -> A: ... # Nominal check currently ignores arg names # NOTE: If we ever change this, be sure to also change the call to # SubtypeVisitor.build_subtype_kind(...) down below. - is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=ignore_names) + is_compat = is_subtype( + subtype, supertype, ignore_pos_arg_names=ignore_names, options=options + ) else: is_compat = is_proper_subtype(subtype, supertype) if not is_compat: @@ -1074,7 +1086,7 @@ def f(self) -> A: ... superflags = get_member_flags(member, right) if IS_SETTABLE in superflags: # Check opposite direction for settable attributes. - if not is_subtype(supertype, subtype): + if not is_subtype(supertype, subtype, options=options): return False if not class_obj: if IS_SETTABLE not in superflags: @@ -1293,6 +1305,7 @@ def is_callable_compatible( check_args_covariantly: bool = False, allow_partial_overlap: bool = False, strict_concatenate: bool = False, + no_unify_none: bool = False, ) -> bool: """Is the left compatible with the right, using the provided compatibility check? @@ -1383,8 +1396,8 @@ def g(x: int) -> int: ... whether or not we check the args covariantly. """ # Normalize both types before comparing them. - left = left.with_unpacked_kwargs() - right = right.with_unpacked_kwargs() + left = left.with_unpacked_kwargs().with_normalized_var_args() + right = right.with_unpacked_kwargs().with_normalized_var_args() if is_compat_return is None: is_compat_return = is_compat @@ -1409,7 +1422,9 @@ def g(x: int) -> int: ... # (below) treats type variables on the two sides as independent. if left.variables: # Apply generic type variables away in left via type inference. - unified = unify_generic_callable(left, right, ignore_return=ignore_return) + unified = unify_generic_callable( + left, right, ignore_return=ignore_return, no_unify_none=no_unify_none + ) if unified is None: return False left = unified @@ -1421,7 +1436,9 @@ def g(x: int) -> int: ... # So, we repeat the above checks in the opposite direction. This also # lets us preserve the 'symmetry' property of allow_partial_overlap. if allow_partial_overlap and right.variables: - unified = unify_generic_callable(right, left, ignore_return=ignore_return) + unified = unify_generic_callable( + right, left, ignore_return=ignore_return, no_unify_none=no_unify_none + ) if unified is not None: right = unified @@ -1442,7 +1459,6 @@ def g(x: int) -> int: ... right, is_compat=is_compat, ignore_pos_arg_names=ignore_pos_arg_names, - check_args_covariantly=check_args_covariantly, allow_partial_overlap=allow_partial_overlap, strict_concatenate_check=strict_concatenate_check, ) @@ -1466,9 +1482,8 @@ def are_parameters_compatible( *, is_compat: Callable[[Type, Type], bool], ignore_pos_arg_names: bool = False, - check_args_covariantly: bool = False, allow_partial_overlap: bool = False, - strict_concatenate_check: bool = True, + strict_concatenate_check: bool = False, ) -> bool: """Helper function for is_callable_compatible, used for Parameter compatibility""" if right.is_ellipsis_args: @@ -1520,7 +1535,7 @@ def _incompatible(left_arg: FormalArgument | None, right_arg: FormalArgument | N # Phase 1b: Check non-star args: for every arg right can accept, left must # also accept. The only exception is if we are allowing partial - # partial overlaps: in that case, we ignore optional args on the right. + # overlaps: in that case, we ignore optional args on the right. for right_arg in right.formal_arguments(): left_arg = mypy.typeops.callable_corresponding_argument(left, right_arg) if left_arg is None: @@ -1534,7 +1549,8 @@ def _incompatible(left_arg: FormalArgument | None, right_arg: FormalArgument | N # Phase 1c: Check var args. Right has an infinite series of optional positional # arguments. Get all further positional args of left, and make sure - # they're more general then the corresponding member in right. + # they're more general than the corresponding member in right. + # TODO: are we handling UnpackType correctly here? if right_star is not None: # Synthesize an anonymous formal argument for the right right_by_position = right.try_synthesizing_arg_from_vararg(None) @@ -1561,7 +1577,7 @@ def _incompatible(left_arg: FormalArgument | None, right_arg: FormalArgument | N # Phase 1d: Check kw args. Right has an infinite series of optional named # arguments. Get all further named args of left, and make sure - # they're more general then the corresponding member in right. + # they're more general than the corresponding member in right. if right_star2 is not None: right_names = {name for name in right.arg_names if name is not None} left_only_names = set() @@ -1629,6 +1645,10 @@ def are_args_compatible( allow_partial_overlap: bool, is_compat: Callable[[Type, Type], bool], ) -> bool: + if left.required and right.required: + # If both arguments are required allow_partial_overlap has no effect. + allow_partial_overlap = False + def is_different(left_item: object | None, right_item: object | None) -> bool: """Checks if the left and right items are different. @@ -1656,7 +1676,7 @@ def is_different(left_item: object | None, right_item: object | None) -> bool: # If right's argument is optional, left's must also be # (unless we're relaxing the checks to allow potential - # rather then definite compatibility). + # rather than definite compatibility). if not allow_partial_overlap and not right.required and left.required: return False @@ -1681,6 +1701,8 @@ def unify_generic_callable( target: NormalizedCallableType, ignore_return: bool, return_constraint_direction: int | None = None, + *, + no_unify_none: bool = False, ) -> NormalizedCallableType | None: """Try to unify a generic callable type with another callable type. @@ -1692,18 +1714,25 @@ def unify_generic_callable( return_constraint_direction = mypy.constraints.SUBTYPE_OF constraints: list[mypy.constraints.Constraint] = [] - for arg_type, target_arg_type in zip(type.arg_types, target.arg_types): - c = mypy.constraints.infer_constraints( - arg_type, target_arg_type, mypy.constraints.SUPERTYPE_OF - ) - constraints.extend(c) + # There is some special logic for inference in callables, so better use them + # as wholes instead of picking separate arguments. + cs = mypy.constraints.infer_constraints( + type.copy_modified(ret_type=UninhabitedType()), + target.copy_modified(ret_type=UninhabitedType()), + mypy.constraints.SUBTYPE_OF, + skip_neg_op=True, + ) + constraints.extend(cs) if not ignore_return: c = mypy.constraints.infer_constraints( type.ret_type, target.ret_type, return_constraint_direction ) constraints.extend(c) - type_var_ids = [tvar.id for tvar in type.variables] - inferred_vars = mypy.solve.solve_constraints(type_var_ids, constraints) + if no_unify_none: + constraints = [ + c for c in constraints if not isinstance(get_proper_type(c.target), NoneType) + ] + inferred_vars, _ = mypy.solve.solve_constraints(type.variables, constraints) if None in inferred_vars: return None non_none_inferred_vars = cast(List[Type], inferred_vars) diff --git a/mypy/suggestions.py b/mypy/suggestions.py index 8e1225f00a2f..268f3032fc9b 100644 --- a/mypy/suggestions.py +++ b/mypy/suggestions.py @@ -79,7 +79,7 @@ UnionType, get_proper_type, ) -from mypy.types_utils import is_optional, remove_optional +from mypy.types_utils import is_overlapping_none, remove_optional from mypy.util import split_target @@ -752,7 +752,7 @@ def score_type(self, t: Type, arg_pos: bool) -> int: return 20 if any(has_any_type(x) for x in t.items): return 15 - if not is_optional(t): + if not is_overlapping_none(t): return 10 if isinstance(t, CallableType) and (has_any_type(t) or is_tricky_callable(t)): return 10 @@ -868,7 +868,7 @@ def visit_typeddict_type(self, t: TypedDictType) -> str: return t.fallback.accept(self) def visit_union_type(self, t: UnionType) -> str: - if len(t.items) == 2 and is_optional(t): + if len(t.items) == 2 and is_overlapping_none(t): return f"Optional[{remove_optional(t).accept(self)}]" else: return super().visit_union_type(t) diff --git a/mypy/test/data.py b/mypy/test/data.py index daf815dbdbdc..de0267daf918 100644 --- a/mypy/test/data.py +++ b/mypy/test/data.py @@ -12,11 +12,12 @@ from abc import abstractmethod from dataclasses import dataclass from pathlib import Path -from typing import Any, Iterator, NamedTuple, Pattern, Union -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Any, Final, Iterator, NamedTuple, NoReturn, Pattern, Union +from typing_extensions import TypeAlias as _TypeAlias import pytest +from mypy import defaults from mypy.test.config import PREFIX, test_data_prefix, test_temp_dir root_dir = os.path.normpath(PREFIX) @@ -64,7 +65,6 @@ def parse_test_case(case: DataDrivenTestCase) -> None: join = posixpath.join out_section_missing = case.suite.required_out_section - normalize_output = True files: list[tuple[str, str]] = [] # path and contents output_files: list[tuple[str, str | Pattern[str]]] = [] # output path and contents @@ -77,11 +77,19 @@ def parse_test_case(case: DataDrivenTestCase) -> None: targets: dict[int, list[str]] = {} # Fine-grained targets (per fine-grained update) test_modules: list[str] = [] # Modules which are deemed "test" (vs "fixture") + def _case_fail(msg: str) -> NoReturn: + pytest.fail(f"{case.file}:{case.line}: {msg}", pytrace=False) + # Process the parsed items. Each item has a header of form [id args], # optionally followed by lines of text. item = first_item = test_items[0] test_modules.append("__main__") for item in test_items[1:]: + + def _item_fail(msg: str) -> NoReturn: + item_abs_line = case.line + item.line - 2 + pytest.fail(f"{case.file}:{item_abs_line}: {msg}", pytrace=False) + if item.id in {"file", "fixture", "outfile", "outfile-re"}: # Record an extra file needed for the test case. assert item.arg is not None @@ -132,9 +140,11 @@ def parse_test_case(case: DataDrivenTestCase) -> None: # File/directory to delete during a multi-step test case assert item.arg is not None m = re.match(r"(.*)\.([0-9]+)$", item.arg) - assert m, f"Invalid delete section: {item.arg}" + if m is None: + _item_fail(f"Invalid delete section {item.arg!r}") num = int(m.group(2)) - assert num >= 2, f"Can't delete during step {num}" + if num < 2: + _item_fail(f"Can't delete during step {num}") full = join(base_path, m.group(1)) deleted_paths.setdefault(num, set()).add(full) elif re.match(r"out[0-9]*$", item.id): @@ -145,39 +155,34 @@ def parse_test_case(case: DataDrivenTestCase) -> None: version_check = True for arg in args: - if arg == "skip-path-normalization": - normalize_output = False if arg.startswith("version"): compare_op = arg[7:9] if compare_op not in {">=", "=="}: - raise ValueError( - "{}, line {}: Only >= and == version checks are currently supported".format( - case.file, item.line - ) - ) + _item_fail("Only >= and == version checks are currently supported") version_str = arg[9:] try: version = tuple(int(x) for x in version_str.split(".")) except ValueError: - raise ValueError( - '{}, line {}: "{}" is not a valid python version'.format( - case.file, item.line, version_str - ) - ) + _item_fail(f"{version_str!r} is not a valid python version") if compare_op == ">=": + if version <= defaults.PYTHON3_VERSION: + _item_fail( + f"{arg} always true since minimum runtime version is {defaults.PYTHON3_VERSION}" + ) version_check = sys.version_info >= version elif compare_op == "==": + if version < defaults.PYTHON3_VERSION: + _item_fail( + f"{arg} always false since minimum runtime version is {defaults.PYTHON3_VERSION}" + ) if not 1 < len(version) < 4: - raise ValueError( - "{}, line {}: Only minor or patch version checks " - 'are currently supported with "==": "{}"'.format( - case.file, item.line, version_str - ) + _item_fail( + f'Only minor or patch version checks are currently supported with "==": {version_str!r}' ) version_check = sys.version_info[: len(version)] == version if version_check: tmp_output = [expand_variables(line) for line in item.data] - if os.path.sep == "\\" and normalize_output: + if os.path.sep == "\\" and case.normalize_output: tmp_output = [fix_win_path(line) for line in tmp_output] if item.id == "out" or item.id == "out1": output = tmp_output @@ -189,10 +194,11 @@ def parse_test_case(case: DataDrivenTestCase) -> None: elif item.id == "triggered" and item.arg is None: triggered = item.data else: - raise ValueError(f"Invalid section header {item.id} in {case.file}:{item.line}") + section_str = item.id + (f" {item.arg}" if item.arg else "") + _item_fail(f"Invalid section header [{section_str}] in case {case.name!r}") if out_section_missing: - raise ValueError(f"{case.file}, line {first_item.line}: Required output section not found") + _case_fail(f"Required output section not found in case {case.name!r}") for passnum in stale_modules.keys(): if passnum not in rechecked_modules: @@ -204,11 +210,7 @@ def parse_test_case(case: DataDrivenTestCase) -> None: and passnum in rechecked_modules and not stale_modules[passnum].issubset(rechecked_modules[passnum]) ): - raise ValueError( - ( - "Stale modules after pass {} must be a subset of rechecked modules ({}:{})" - ).format(passnum, case.file, first_item.line) - ) + _case_fail(f"Stale modules after pass {passnum} must be a subset of rechecked modules") output_inline_start = len(output) input = first_item.data @@ -219,10 +221,7 @@ def parse_test_case(case: DataDrivenTestCase) -> None: seen_files = set() for file, _ in files: if file in seen_files: - raise ValueError( - f"{case.file}, line {first_item.line}: Duplicated filename {file}. Did you include" - " it multiple times?" - ) + _case_fail(f"Duplicated filename {file}. Did you include it multiple times?") seen_files.add(file) @@ -237,7 +236,6 @@ def parse_test_case(case: DataDrivenTestCase) -> None: case.expected_rechecked_modules = rechecked_modules case.deleted_paths = deleted_paths case.triggered = triggered or [] - case.normalize_output = normalize_output case.expected_fine_grained_targets = targets case.test_modules = test_modules @@ -267,7 +265,7 @@ class DataDrivenTestCase(pytest.Item): # Whether or not we should normalize the output to standardize things like # forward vs backward slashes in file paths for Windows vs Linux. - normalize_output = True + normalize_output: bool # Extra attributes used by some tests. last_line: int @@ -279,10 +277,12 @@ def __init__( self, parent: DataSuiteCollector, suite: DataSuite, + *, file: str, name: str, writescache: bool, only_when: str, + normalize_output: bool, platform: str | None, skip: bool, xfail: bool, @@ -294,6 +294,7 @@ def __init__( self.file = file self.writescache = writescache self.only_when = only_when + self.normalize_output = normalize_output if (platform == "windows" and sys.platform != "win32") or ( platform == "posix" and sys.platform == "win32" ): @@ -367,19 +368,23 @@ def setup(self) -> None: self.steps = [steps.get(num, []) for num in range(2, max_step + 1)] def teardown(self) -> None: - assert self.old_cwd is not None and self.tmpdir is not None, "test was not properly set up" - os.chdir(self.old_cwd) - try: - self.tmpdir.cleanup() - except OSError: - pass + if self.old_cwd is not None: + os.chdir(self.old_cwd) + if self.tmpdir is not None: + try: + self.tmpdir.cleanup() + except OSError: + pass self.old_cwd = None self.tmpdir = None def reportinfo(self) -> tuple[str, int, str]: return self.file, self.line, self.name - def repr_failure(self, excinfo: Any, style: Any | None = None) -> str: + def repr_failure( + self, excinfo: pytest.ExceptionInfo[BaseException], style: Any | None = None + ) -> str: + excrepr: object if isinstance(excinfo.value, SystemExit): # We assume that before doing exit() (which raises SystemExit) we've printed # enough context about what happened so that a stack trace is not useful. @@ -389,7 +394,7 @@ def repr_failure(self, excinfo: Any, style: Any | None = None) -> str: elif isinstance(excinfo.value, pytest.fail.Exception) and not excinfo.value.pytrace: excrepr = excinfo.exconly() else: - self.parent._prunetraceback(excinfo) + excinfo.traceback = self.parent._traceback_filter(excinfo) excrepr = excinfo.getrepr(style="short") return f"data: {self.file}:{self.line}:\n{excrepr}" @@ -615,6 +620,13 @@ def pytest_addoption(parser: Any) -> None: ) +def pytest_configure(config: pytest.Config) -> None: + if config.getoption("--update-data") and config.getoption("--numprocesses", default=1) > 1: + raise pytest.UsageError( + "--update-data incompatible with parallelized tests; re-run with -n 1" + ) + + # This function name is special to pytest. See # https://doc.pytest.org/en/latest/how-to/writing_plugins.html#collection-hooks def pytest_pycollect_makeitem(collector: Any, name: str, obj: object) -> Any | None: @@ -634,6 +646,17 @@ def pytest_pycollect_makeitem(collector: Any, name: str, obj: object) -> Any | N return None +_case_name_pattern = re.compile( + r"(?P[a-zA-Z_0-9]+)" + r"(?P-writescache)?" + r"(?P-only_when_cache|-only_when_nocache)?" + r"(?P-skip_path_normalization)?" + r"(-(?Pposix|windows))?" + r"(?P-skip)?" + r"(?P-xfail)?" +) + + def split_test_cases( parent: DataFileCollector, suite: DataSuite, file: str ) -> Iterator[DataDrivenTestCase]: @@ -644,40 +667,34 @@ def split_test_cases( """ with open(file, encoding="utf-8") as f: data = f.read() - # number of groups in the below regex - NUM_GROUPS = 7 - cases = re.split( - r"^\[case ([a-zA-Z_0-9]+)" - r"(-writescache)?" - r"(-only_when_cache|-only_when_nocache)?" - r"(-posix|-windows)?" - r"(-skip)?" - r"(-xfail)?" - r"\][ \t]*$\n", - data, - flags=re.DOTALL | re.MULTILINE, - ) - line_no = cases[0].count("\n") + 1 + cases = re.split(r"^\[case ([^]+)]+)\][ \t]*$\n", data, flags=re.DOTALL | re.MULTILINE) + cases_iter = iter(cases) + line_no = next(cases_iter).count("\n") + 1 test_names = set() - for i in range(1, len(cases), NUM_GROUPS): - name, writescache, only_when, platform_flag, skip, xfail, data = cases[i : i + NUM_GROUPS] + for case_id in cases_iter: + data = next(cases_iter) + + m = _case_name_pattern.fullmatch(case_id) + if not m: + raise RuntimeError(f"Invalid testcase id {case_id!r}") + name = m.group("name") if name in test_names: raise RuntimeError( 'Found a duplicate test name "{}" in {} on line {}'.format( name, parent.name, line_no ) ) - platform = platform_flag[1:] if platform_flag else None yield DataDrivenTestCase.from_parent( parent=parent, suite=suite, file=file, name=add_test_name_suffix(name, suite.test_name_suffix), - writescache=bool(writescache), - only_when=only_when, - platform=platform, - skip=bool(skip), - xfail=bool(xfail), + writescache=bool(m.group("writescache")), + only_when=m.group("only_when"), + platform=m.group("platform"), + skip=bool(m.group("skip")), + xfail=bool(m.group("xfail")), + normalize_output=not m.group("skip_path_normalization"), data=data, line=line_no, ) diff --git a/mypy/test/helpers.py b/mypy/test/helpers.py index 849ccdc376bd..7447391593d5 100644 --- a/mypy/test/helpers.py +++ b/mypy/test/helpers.py @@ -46,15 +46,9 @@ def run_mypy(args: list[str]) -> None: def assert_string_arrays_equal(expected: list[str], actual: list[str], msg: str) -> None: """Assert that two string arrays are equal. - We consider "can't" and "cannot" equivalent, by replacing the - former with the latter before comparing. - Display any differences in a human-readable form. """ actual = clean_up(actual) - actual = [line.replace("can't", "cannot") for line in actual] - expected = [line.replace("can't", "cannot") for line in expected] - if actual != expected: num_skip_start = num_skipped_prefix_lines(expected, actual) num_skip_end = num_skipped_suffix_lines(expected, actual) @@ -247,7 +241,9 @@ def num_skipped_suffix_lines(a1: list[str], a2: list[str]) -> int: def testfile_pyversion(path: str) -> tuple[int, int]: - if path.endswith("python311.test"): + if path.endswith("python312.test"): + return 3, 12 + elif path.endswith("python311.test"): return 3, 11 elif path.endswith("python310.test"): return 3, 10 @@ -345,15 +341,13 @@ def parse_options( else: flag_list = [] options = Options() - # TODO: Enable strict optional in test cases by default (requires *many* test case changes) - options.strict_optional = False options.error_summary = False options.hide_error_codes = True options.force_uppercase_builtins = True options.force_union_syntax = True # Allow custom python version to override testfile_pyversion. - if all(flag.split("=")[0] not in ["--python-version", "-2", "--py2"] for flag in flag_list): + if all(flag.split("=")[0] != "--python-version" for flag in flag_list): options.python_version = testfile_pyversion(testcase.file) if testcase.config.getoption("--mypy-verbose"): diff --git a/mypy/test/meta/__init__.py b/mypy/test/meta/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mypy/test/meta/test_parse_data.py b/mypy/test/meta/test_parse_data.py new file mode 100644 index 000000000000..6593dbc45704 --- /dev/null +++ b/mypy/test/meta/test_parse_data.py @@ -0,0 +1,103 @@ +""" +A "meta test" which tests the parsing of .test files. This is not meant to become exhaustive +but to ensure we maintain a basic level of ergonomics for mypy contributors. +""" +import subprocess +import sys +import textwrap +import uuid +from pathlib import Path + +from mypy.test.config import test_data_prefix +from mypy.test.helpers import Suite + + +class ParseTestDataSuite(Suite): + def _dedent(self, s: str) -> str: + return textwrap.dedent(s).lstrip() + + def _run_pytest(self, data_suite: str) -> str: + p_test_data = Path(test_data_prefix) + p_root = p_test_data.parent.parent + p = p_test_data / f"check-meta-{uuid.uuid4()}.test" + assert not p.exists() + try: + p.write_text(data_suite) + test_nodeid = f"mypy/test/testcheck.py::TypeCheckSuite::{p.name}" + args = [sys.executable, "-m", "pytest", "-n", "0", "-s", test_nodeid] + proc = subprocess.run(args, cwd=p_root, capture_output=True, check=False) + return proc.stdout.decode() + finally: + p.unlink() + + def test_parse_invalid_case(self) -> None: + # Arrange + data = self._dedent( + """ + [case abc] + s: str + [case foo-XFAIL] + s: str + """ + ) + + # Act + actual = self._run_pytest(data) + + # Assert + assert "Invalid testcase id 'foo-XFAIL'" in actual + + def test_parse_invalid_section(self) -> None: + # Arrange + data = self._dedent( + """ + [case abc] + s: str + [unknownsection] + abc + """ + ) + + # Act + actual = self._run_pytest(data) + + # Assert + expected_lineno = data.splitlines().index("[unknownsection]") + 1 + expected = ( + f".test:{expected_lineno}: Invalid section header [unknownsection] in case 'abc'" + ) + assert expected in actual + + def test_bad_ge_version_check(self) -> None: + # Arrange + data = self._dedent( + """ + [case abc] + s: str + [out version>=3.8] + abc + """ + ) + + # Act + actual = self._run_pytest(data) + + # Assert + assert "version>=3.8 always true since minimum runtime version is (3, 8)" in actual + + def test_bad_eq_version_check(self) -> None: + # Arrange + data = self._dedent( + """ + [case abc] + s: str + [out version==3.7] + abc + """ + ) + + # Act + actual = self._run_pytest(data) + + # Assert + assert "version==3.7 always false since minimum runtime version is (3, 8)" in actual diff --git a/mypy/test/testupdatedata.py b/mypy/test/meta/test_update_data.py similarity index 93% rename from mypy/test/testupdatedata.py rename to mypy/test/meta/test_update_data.py index 54e9622a5e91..4e4bdd193dbf 100644 --- a/mypy/test/testupdatedata.py +++ b/mypy/test/meta/test_update_data.py @@ -1,7 +1,13 @@ +""" +A "meta test" which tests the `--update-data` feature for updating .test files. +Updating the expected output, especially when it's in the form of inline (comment) assertions, +can be brittle, which is why we're "meta-testing" here. +""" import shlex import subprocess import sys import textwrap +import uuid from pathlib import Path from mypy.test.config import test_data_prefix @@ -16,17 +22,14 @@ def _run_pytest_update_data(self, data_suite: str, *, max_attempts: int) -> str: """ p_test_data = Path(test_data_prefix) p_root = p_test_data.parent.parent - p = p_test_data / "check-update-data.test" + p = p_test_data / f"check-meta-{uuid.uuid4()}.test" assert not p.exists() try: p.write_text(textwrap.dedent(data_suite).lstrip()) test_nodeid = f"mypy/test/testcheck.py::TypeCheckSuite::{p.name}" args = [sys.executable, "-m", "pytest", "-n", "0", "-s", "--update-data", test_nodeid] - if sys.version_info >= (3, 8): - cmd = shlex.join(args) - else: - cmd = " ".join(args) + cmd = shlex.join(args) for i in range(max_attempts - 1, -1, -1): res = subprocess.run(args, cwd=p_root) if res.returncode == 0: diff --git a/mypy/test/testcheck.py b/mypy/test/testcheck.py index 58c0ee803359..85fbe5dc2990 100644 --- a/mypy/test/testcheck.py +++ b/mypy/test/testcheck.py @@ -26,7 +26,7 @@ from mypy.test.update_data import update_testcase_output try: - import lxml # type: ignore[import] + import lxml # type: ignore[import-untyped] except ImportError: lxml = None @@ -36,15 +36,15 @@ # Includes all check-* files with the .test extension in the test-data/unit directory typecheck_files = find_test_files(pattern="check-*.test") -# Tests that use Python 3.8-only AST features (like expression-scoped ignores): -if sys.version_info < (3, 8): - typecheck_files.remove("check-python38.test") +# Tests that use Python version specific features: if sys.version_info < (3, 9): typecheck_files.remove("check-python39.test") if sys.version_info < (3, 10): typecheck_files.remove("check-python310.test") if sys.version_info < (3, 11): typecheck_files.remove("check-python311.test") +if sys.version_info < (3, 12): + typecheck_files.remove("check-python312.test") # Special tests for platforms with case-insensitive filesystems. if sys.platform not in ("darwin", "win32"): @@ -130,8 +130,6 @@ def run_case_once( options.show_traceback = True # Enable some options automatically based on test file name. - if "optional" in testcase.file: - options.strict_optional = True if "columns" in testcase.file: options.show_column_numbers = True if "errorcodes" in testcase.file: diff --git a/mypy/test/testcmdline.py b/mypy/test/testcmdline.py index 30ecef07a821..9bc02d319964 100644 --- a/mypy/test/testcmdline.py +++ b/mypy/test/testcmdline.py @@ -20,7 +20,7 @@ ) try: - import lxml # type: ignore[import] + import lxml # type: ignore[import-untyped] except ImportError: lxml = None diff --git a/mypy/test/testconstraints.py b/mypy/test/testconstraints.py index b46f31327150..f40996145cba 100644 --- a/mypy/test/testconstraints.py +++ b/mypy/test/testconstraints.py @@ -1,7 +1,5 @@ from __future__ import annotations -import pytest - from mypy.constraints import SUBTYPE_OF, SUPERTYPE_OF, Constraint, infer_constraints from mypy.test.helpers import Suite from mypy.test.typefixture import TypeFixture @@ -22,7 +20,6 @@ def test_basic_type_variable(self) -> None: Constraint(type_var=fx.t, op=direction, target=fx.a) ] - @pytest.mark.xfail def test_basic_type_var_tuple_subtype(self) -> None: fx = self.fx assert infer_constraints( diff --git a/mypy/test/testdaemon.py b/mypy/test/testdaemon.py index e3cdf44d89f2..7115e682e60d 100644 --- a/mypy/test/testdaemon.py +++ b/mypy/test/testdaemon.py @@ -13,8 +13,6 @@ import tempfile import unittest -import pytest - from mypy.dmypy_server import filter_out_missing_top_level_packages from mypy.fscache import FileSystemCache from mypy.modulefinder import SearchPaths @@ -30,8 +28,6 @@ class DaemonSuite(DataSuite): files = daemon_files def run_case(self, testcase: DataDrivenTestCase) -> None: - if testcase.name.endswith("_python38") and sys.version_info < (3, 8): - pytest.skip("Not supported on this version of Python") try: test_daemon(testcase) finally: diff --git a/mypy/test/testfinegrained.py b/mypy/test/testfinegrained.py index 5b4c816b5c38..ba0526d32558 100644 --- a/mypy/test/testfinegrained.py +++ b/mypy/test/testfinegrained.py @@ -16,7 +16,6 @@ import os import re -import sys import unittest from typing import Any @@ -70,9 +69,6 @@ def should_skip(self, testcase: DataDrivenTestCase) -> bool: else: if testcase.only_when == "-only_when_cache": return True - - if "Inspect" in testcase.name and sys.version_info < (3, 8): - return True return False def run_case(self, testcase: DataDrivenTestCase) -> None: @@ -321,6 +317,7 @@ def maybe_suggest(self, step: int, server: Server, src: str, tmp_dir: str) -> li # JSON contains already escaped \ on Windows, so requires a bit of care. val = val.replace("\\\\", "\\") val = val.replace(os.path.realpath(tmp_dir) + os.path.sep, "") + val = val.replace(os.path.abspath(tmp_dir) + os.path.sep, "") output.extend(val.strip().split("\n")) return normalize_messages(output) diff --git a/mypy/test/testgraph.py b/mypy/test/testgraph.py index ce7697142ff2..b0d148d5ae9c 100644 --- a/mypy/test/testgraph.py +++ b/mypy/test/testgraph.py @@ -5,17 +5,10 @@ import sys from typing import AbstractSet -from mypy.build import ( - BuildManager, - BuildSourceSet, - State, - order_ascc, - sorted_components, - strongly_connected_components, - topsort, -) +from mypy.build import BuildManager, BuildSourceSet, State, order_ascc, sorted_components from mypy.errors import Errors from mypy.fscache import FileSystemCache +from mypy.graph_utils import strongly_connected_components, topsort from mypy.modulefinder import SearchPaths from mypy.options import Options from mypy.plugin import Plugin diff --git a/mypy/test/testpep561.py b/mypy/test/testpep561.py index ed8674e8d5bb..48d0658cd1e9 100644 --- a/mypy/test/testpep561.py +++ b/mypy/test/testpep561.py @@ -46,23 +46,39 @@ def virtualenv(python_executable: str = sys.executable) -> Iterator[tuple[str, s yield venv_dir, os.path.abspath(os.path.join(venv_dir, "bin", "python")) +def upgrade_pip(python_executable: str) -> None: + """Install pip>=21.3.1. Required for editable installs with PEP 660.""" + if ( + sys.version_info >= (3, 11) + or (3, 10, 3) <= sys.version_info < (3, 11) + or (3, 9, 11) <= sys.version_info < (3, 10) + or (3, 8, 13) <= sys.version_info < (3, 9) + ): + # Skip for more recent Python releases which come with pip>=21.3.1 + # out of the box - for performance reasons. + return + + install_cmd = [python_executable, "-m", "pip", "install", "pip>=21.3.1"] + try: + with filelock.FileLock(pip_lock, timeout=pip_timeout): + proc = subprocess.run(install_cmd, capture_output=True, env=os.environ) + except filelock.Timeout as err: + raise Exception(f"Failed to acquire {pip_lock}") from err + if proc.returncode != 0: + raise Exception(proc.stdout.decode("utf-8") + proc.stderr.decode("utf-8")) + + def install_package( - pkg: str, python_executable: str = sys.executable, use_pip: bool = True, editable: bool = False + pkg: str, python_executable: str = sys.executable, editable: bool = False ) -> None: """Install a package from test-data/packages/pkg/""" working_dir = os.path.join(package_path, pkg) with tempfile.TemporaryDirectory() as dir: - if use_pip: - install_cmd = [python_executable, "-m", "pip", "install"] - if editable: - install_cmd.append("-e") - install_cmd.append(".") - else: - install_cmd = [python_executable, "setup.py"] - if editable: - install_cmd.append("develop") - else: - install_cmd.append("install") + install_cmd = [python_executable, "-m", "pip", "install"] + if editable: + install_cmd.append("-e") + install_cmd.append(".") + # Note that newer versions of pip (21.3+) don't # follow this env variable, but this is for compatibility env = {"PIP_BUILD": dir} @@ -85,18 +101,20 @@ def test_pep561(testcase: DataDrivenTestCase) -> None: assert python is not None, "Should be impossible" pkgs, pip_args = parse_pkgs(testcase.input[0]) mypy_args = parse_mypy_args(testcase.input[1]) - use_pip = True editable = False for arg in pip_args: - if arg == "no-pip": - use_pip = False - elif arg == "editable": + if arg == "editable": editable = True + else: + raise ValueError(f"Unknown pip argument: {arg}") assert pkgs, "No packages to install for PEP 561 test?" with virtualenv(python) as venv: venv_dir, python_executable = venv + if editable: + # Editable installs with PEP 660 require pip>=21.3 + upgrade_pip(python_executable) for pkg in pkgs: - install_package(pkg, python_executable, use_pip, editable) + install_package(pkg, python_executable, editable) cmd_line = list(mypy_args) has_program = not ("-p" in cmd_line or "--package" in cmd_line) diff --git a/mypy/test/testpythoneval.py b/mypy/test/testpythoneval.py index 1fd342452102..17baec96cfbc 100644 --- a/mypy/test/testpythoneval.py +++ b/mypy/test/testpythoneval.py @@ -46,10 +46,8 @@ def test_python_evaluation(testcase: DataDrivenTestCase, cache_dir: str) -> None """ assert testcase.old_cwd is not None, "test was not properly set up" # We must enable site packages to get access to installed stubs. - # TODO: Enable strict optional for these tests mypy_cmdline = [ "--show-traceback", - "--no-strict-optional", "--no-silence-site-packages", "--no-error-summary", "--hide-error-codes", diff --git a/mypy/test/testreports.py b/mypy/test/testreports.py index a422b4bb2a7b..5ff315f83ba8 100644 --- a/mypy/test/testreports.py +++ b/mypy/test/testreports.py @@ -7,7 +7,7 @@ from mypy.test.helpers import Suite, assert_equal try: - import lxml # type: ignore[import] + import lxml # type: ignore[import-untyped] except ImportError: lxml = None @@ -22,7 +22,7 @@ def test_get_line_rate(self) -> None: @pytest.mark.skipif(lxml is None, reason="Cannot import lxml. Is it installed?") def test_as_xml(self) -> None: - import lxml.etree as etree # type: ignore[import] + import lxml.etree as etree # type: ignore[import-untyped] cobertura_package = CoberturaPackage("foobar") cobertura_package.covered_lines = 21 diff --git a/mypy/test/testsolve.py b/mypy/test/testsolve.py index d6c585ef4aaa..6566b03ef5e9 100644 --- a/mypy/test/testsolve.py +++ b/mypy/test/testsolve.py @@ -3,10 +3,10 @@ from __future__ import annotations from mypy.constraints import SUBTYPE_OF, SUPERTYPE_OF, Constraint -from mypy.solve import solve_constraints +from mypy.solve import Bounds, Graph, solve_constraints, transitive_closure from mypy.test.helpers import Suite, assert_equal from mypy.test.typefixture import TypeFixture -from mypy.types import Type, TypeVarId, TypeVarType +from mypy.types import Type, TypeVarId, TypeVarLikeType, TypeVarType class SolveSuite(Suite): @@ -17,125 +17,266 @@ def test_empty_input(self) -> None: self.assert_solve([], [], []) def test_simple_supertype_constraints(self) -> None: + self.assert_solve([self.fx.t], [self.supc(self.fx.t, self.fx.a)], [self.fx.a]) self.assert_solve( - [self.fx.t.id], [self.supc(self.fx.t, self.fx.a)], [(self.fx.a, self.fx.o)] - ) - self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.supc(self.fx.t, self.fx.a), self.supc(self.fx.t, self.fx.b)], - [(self.fx.a, self.fx.o)], + [self.fx.a], ) def test_simple_subtype_constraints(self) -> None: - self.assert_solve([self.fx.t.id], [self.subc(self.fx.t, self.fx.a)], [self.fx.a]) + self.assert_solve([self.fx.t], [self.subc(self.fx.t, self.fx.a)], [self.fx.a]) self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.subc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.b)], [self.fx.b], ) def test_both_kinds_of_constraints(self) -> None: self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.supc(self.fx.t, self.fx.b), self.subc(self.fx.t, self.fx.a)], - [(self.fx.b, self.fx.a)], + [self.fx.b], ) def test_unsatisfiable_constraints(self) -> None: # The constraints are impossible to satisfy. self.assert_solve( - [self.fx.t.id], - [self.supc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.b)], - [None], + [self.fx.t], [self.supc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.b)], [None] ) def test_exactly_specified_result(self) -> None: self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.supc(self.fx.t, self.fx.b), self.subc(self.fx.t, self.fx.b)], - [(self.fx.b, self.fx.b)], + [self.fx.b], ) def test_multiple_variables(self) -> None: self.assert_solve( - [self.fx.t.id, self.fx.s.id], + [self.fx.t, self.fx.s], [ self.supc(self.fx.t, self.fx.b), self.supc(self.fx.s, self.fx.c), self.subc(self.fx.t, self.fx.a), ], - [(self.fx.b, self.fx.a), (self.fx.c, self.fx.o)], + [self.fx.b, self.fx.c], ) def test_no_constraints_for_var(self) -> None: - self.assert_solve([self.fx.t.id], [], [self.fx.uninhabited]) + self.assert_solve([self.fx.t], [], [self.fx.uninhabited]) + self.assert_solve([self.fx.t, self.fx.s], [], [self.fx.uninhabited, self.fx.uninhabited]) self.assert_solve( - [self.fx.t.id, self.fx.s.id], [], [self.fx.uninhabited, self.fx.uninhabited] - ) - self.assert_solve( - [self.fx.t.id, self.fx.s.id], + [self.fx.t, self.fx.s], [self.supc(self.fx.s, self.fx.a)], - [self.fx.uninhabited, (self.fx.a, self.fx.o)], + [self.fx.uninhabited, self.fx.a], ) def test_simple_constraints_with_dynamic_type(self) -> None: + self.assert_solve([self.fx.t], [self.supc(self.fx.t, self.fx.anyt)], [self.fx.anyt]) self.assert_solve( - [self.fx.t.id], [self.supc(self.fx.t, self.fx.anyt)], [(self.fx.anyt, self.fx.anyt)] - ) - self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.supc(self.fx.t, self.fx.anyt), self.supc(self.fx.t, self.fx.anyt)], - [(self.fx.anyt, self.fx.anyt)], + [self.fx.anyt], ) self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.supc(self.fx.t, self.fx.anyt), self.supc(self.fx.t, self.fx.a)], - [(self.fx.anyt, self.fx.anyt)], + [self.fx.anyt], ) + self.assert_solve([self.fx.t], [self.subc(self.fx.t, self.fx.anyt)], [self.fx.anyt]) self.assert_solve( - [self.fx.t.id], [self.subc(self.fx.t, self.fx.anyt)], [(self.fx.anyt, self.fx.anyt)] - ) - self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.subc(self.fx.t, self.fx.anyt), self.subc(self.fx.t, self.fx.anyt)], - [(self.fx.anyt, self.fx.anyt)], + [self.fx.anyt], ) - # self.assert_solve([self.fx.t.id], + # self.assert_solve([self.fx.t], # [self.subc(self.fx.t, self.fx.anyt), # self.subc(self.fx.t, self.fx.a)], - # [(self.fx.anyt, self.fx.anyt)]) + # [self.fx.anyt]) # TODO: figure out what this should be after changes to meet(any, X) def test_both_normal_and_any_types_in_results(self) -> None: # If one of the bounds is any, we promote the other bound to # any as well, since otherwise the type range does not make sense. self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.supc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.anyt)], - [(self.fx.anyt, self.fx.anyt)], + [self.fx.anyt], ) self.assert_solve( - [self.fx.t.id], + [self.fx.t], [self.supc(self.fx.t, self.fx.anyt), self.subc(self.fx.t, self.fx.a)], - [(self.fx.anyt, self.fx.anyt)], + [self.fx.anyt], + ) + + def test_poly_no_constraints(self) -> None: + self.assert_solve( + [self.fx.t, self.fx.u], + [], + [self.fx.uninhabited, self.fx.uninhabited], + allow_polymorphic=True, + ) + + def test_poly_trivial_free(self) -> None: + self.assert_solve( + [self.fx.t, self.fx.u], + [self.subc(self.fx.t, self.fx.a)], + [self.fx.a, self.fx.u], + [self.fx.u], + allow_polymorphic=True, + ) + + def test_poly_free_pair(self) -> None: + self.assert_solve( + [self.fx.t, self.fx.u], + [self.subc(self.fx.t, self.fx.u)], + [self.fx.t, self.fx.t], + [self.fx.t], + allow_polymorphic=True, + ) + + def test_poly_free_pair_with_bounds(self) -> None: + t_prime = self.fx.t.copy_modified(upper_bound=self.fx.b) + self.assert_solve( + [self.fx.t, self.fx.ub], + [self.subc(self.fx.t, self.fx.ub)], + [t_prime, t_prime], + [t_prime], + allow_polymorphic=True, + ) + + def test_poly_free_pair_with_bounds_uninhabited(self) -> None: + self.assert_solve( + [self.fx.ub, self.fx.uc], + [self.subc(self.fx.ub, self.fx.uc)], + [self.fx.uninhabited, self.fx.uninhabited], + [], + allow_polymorphic=True, + ) + + def test_poly_bounded_chain(self) -> None: + # B <: T <: U <: S <: A + self.assert_solve( + [self.fx.t, self.fx.u, self.fx.s], + [ + self.supc(self.fx.t, self.fx.b), + self.subc(self.fx.t, self.fx.u), + self.subc(self.fx.u, self.fx.s), + self.subc(self.fx.s, self.fx.a), + ], + [self.fx.b, self.fx.b, self.fx.b], + allow_polymorphic=True, + ) + + def test_poly_reverse_overlapping_chain(self) -> None: + # A :> T <: S :> B + self.assert_solve( + [self.fx.t, self.fx.s], + [ + self.subc(self.fx.t, self.fx.s), + self.subc(self.fx.t, self.fx.a), + self.supc(self.fx.s, self.fx.b), + ], + [self.fx.a, self.fx.a], + allow_polymorphic=True, + ) + + def test_poly_reverse_split_chain(self) -> None: + # B :> T <: S :> A + self.assert_solve( + [self.fx.t, self.fx.s], + [ + self.subc(self.fx.t, self.fx.s), + self.subc(self.fx.t, self.fx.b), + self.supc(self.fx.s, self.fx.a), + ], + [self.fx.b, self.fx.a], + allow_polymorphic=True, + ) + + def test_poly_unsolvable_chain(self) -> None: + # A <: T <: U <: S <: B + self.assert_solve( + [self.fx.t, self.fx.u, self.fx.s], + [ + self.supc(self.fx.t, self.fx.a), + self.subc(self.fx.t, self.fx.u), + self.subc(self.fx.u, self.fx.s), + self.subc(self.fx.s, self.fx.b), + ], + [None, None, None], + allow_polymorphic=True, + ) + + def test_simple_chain_closure(self) -> None: + self.assert_transitive_closure( + [self.fx.t.id, self.fx.s.id], + [ + self.supc(self.fx.t, self.fx.b), + self.subc(self.fx.t, self.fx.s), + self.subc(self.fx.s, self.fx.a), + ], + {(self.fx.t.id, self.fx.s.id)}, + {self.fx.t.id: {self.fx.b}, self.fx.s.id: {self.fx.b}}, + {self.fx.t.id: {self.fx.a}, self.fx.s.id: {self.fx.a}}, + ) + + def test_reverse_chain_closure(self) -> None: + self.assert_transitive_closure( + [self.fx.t.id, self.fx.s.id], + [ + self.subc(self.fx.t, self.fx.s), + self.subc(self.fx.t, self.fx.a), + self.supc(self.fx.s, self.fx.b), + ], + {(self.fx.t.id, self.fx.s.id)}, + {self.fx.t.id: set(), self.fx.s.id: {self.fx.b}}, + {self.fx.t.id: {self.fx.a}, self.fx.s.id: set()}, + ) + + def test_secondary_constraint_closure(self) -> None: + self.assert_transitive_closure( + [self.fx.t.id, self.fx.s.id], + [self.supc(self.fx.s, self.fx.gt), self.subc(self.fx.s, self.fx.ga)], + set(), + {self.fx.t.id: set(), self.fx.s.id: {self.fx.gt}}, + {self.fx.t.id: {self.fx.a}, self.fx.s.id: {self.fx.ga}}, ) def assert_solve( + self, + vars: list[TypeVarLikeType], + constraints: list[Constraint], + results: list[None | Type], + free_vars: list[TypeVarLikeType] | None = None, + allow_polymorphic: bool = False, + ) -> None: + if free_vars is None: + free_vars = [] + actual, actual_free = solve_constraints( + vars, constraints, allow_polymorphic=allow_polymorphic + ) + assert_equal(actual, results) + assert_equal(actual_free, free_vars) + + def assert_transitive_closure( self, vars: list[TypeVarId], constraints: list[Constraint], - results: list[None | Type | tuple[Type, Type]], + graph: Graph, + lowers: Bounds, + uppers: Bounds, ) -> None: - res: list[Type | None] = [] - for r in results: - if isinstance(r, tuple): - res.append(r[0]) - else: - res.append(r) - actual = solve_constraints(vars, constraints) - assert_equal(str(actual), str(res)) + actual_graph, actual_lowers, actual_uppers = transitive_closure(vars, constraints) + # Add trivial elements. + for v in vars: + graph.add((v, v)) + assert_equal(actual_graph, graph) + assert_equal(dict(actual_lowers), lowers) + assert_equal(dict(actual_uppers), uppers) def supc(self, type_var: TypeVarType, bound: Type) -> Constraint: return Constraint(type_var, SUPERTYPE_OF, bound) diff --git a/mypy/test/teststubgen.py b/mypy/test/teststubgen.py index b21e06c0896a..79d380785a39 100644 --- a/mypy/test/teststubgen.py +++ b/mypy/test/teststubgen.py @@ -677,6 +677,7 @@ class StubgenPythonSuite(DataSuite): base_path = "." files = ["stubgen.test"] + @unittest.skipIf(sys.platform == "win32", "clean up fails on Windows") def run_case(self, testcase: DataDrivenTestCase) -> None: with local_sys_path_set(): self.run_case_inner(testcase) @@ -1212,6 +1213,27 @@ def test(arg0: str) -> None: assert_equal(output, ["def test(arg0: foo.bar.Action) -> other.Thing: ..."]) assert_equal(set(imports), {"import foo", "import other"}) + def test_generate_c_function_no_crash_for_non_str_docstring(self) -> None: + def test(arg0: str) -> None: + ... + + test.__doc__ = property(lambda self: "test(arg0: str) -> None") # type: ignore[assignment] + + output: list[str] = [] + imports: list[str] = [] + mod = ModuleType(self.__module__, "") + generate_c_function_stub( + mod, + "test", + test, + output=output, + imports=imports, + known_modules=[mod.__name__], + sig_generators=get_sig_generators(parse_options([])), + ) + assert_equal(output, ["def test(*args, **kwargs) -> Any: ..."]) + assert_equal(imports, []) + def test_generate_c_property_with_pybind11(self) -> None: """Signatures included by PyBind11 inside property.fget are read.""" diff --git a/mypy/test/teststubtest.py b/mypy/test/teststubtest.py index 275b09c3a240..a52d9ef5de31 100644 --- a/mypy/test/teststubtest.py +++ b/mypy/test/teststubtest.py @@ -64,10 +64,12 @@ def __init__(self, name: str) -> None: ... class Coroutine(Generic[_T_co, _S, _R]): ... class Iterable(Generic[_T_co]): ... +class Iterator(Iterable[_T_co]): ... class Mapping(Generic[_K, _V]): ... class Match(Generic[AnyStr]): ... class Sequence(Iterable[_T_co]): ... class Tuple(Sequence[_T_co]): ... +class NamedTuple(tuple[Any, ...]): ... def overload(func: _T) -> _T: ... """ @@ -82,9 +84,12 @@ def overload(func: _T) -> _T: ... class object: __module__: str def __init__(self) -> None: pass + def __repr__(self) -> str: pass class type: ... -class tuple(Sequence[T_co], Generic[T_co]): ... +class tuple(Sequence[T_co], Generic[T_co]): + def __ge__(self, __other: tuple[T_co, ...]) -> bool: pass + class dict(Mapping[KT, VT]): ... class function: pass @@ -103,6 +108,39 @@ def classmethod(f: T) -> T: ... def staticmethod(f: T) -> T: ... """ +stubtest_enum_stub = """ +import sys +from typing import Any, TypeVar, Iterator + +_T = TypeVar('_T') + +class EnumMeta(type): + def __len__(self) -> int: pass + def __iter__(self: type[_T]) -> Iterator[_T]: pass + def __reversed__(self: type[_T]) -> Iterator[_T]: pass + def __getitem__(self: type[_T], name: str) -> _T: pass + +class Enum(metaclass=EnumMeta): + def __new__(cls: type[_T], value: object) -> _T: pass + def __repr__(self) -> str: pass + def __str__(self) -> str: pass + def __format__(self, format_spec: str) -> str: pass + def __hash__(self) -> Any: pass + def __reduce_ex__(self, proto: Any) -> Any: pass + name: str + value: Any + +class Flag(Enum): + def __or__(self: _T, other: _T) -> _T: pass + def __and__(self: _T, other: _T) -> _T: pass + def __xor__(self: _T, other: _T) -> _T: pass + def __invert__(self: _T) -> _T: pass + if sys.version_info >= (3, 11): + __ror__ = __or__ + __rand__ = __and__ + __rxor__ = __xor__ +""" + def run_stubtest( stub: str, runtime: str, options: list[str], config_file: str | None = None @@ -112,6 +150,8 @@ def run_stubtest( f.write(stubtest_builtins_stub) with open("typing.pyi", "w") as f: f.write(stubtest_typing_stub) + with open("enum.pyi", "w") as f: + f.write(stubtest_enum_stub) with open(f"{TEST_MODULE_NAME}.pyi", "w") as f: f.write(stub) with open(f"{TEST_MODULE_NAME}.py", "w") as f: @@ -232,17 +272,16 @@ def test_arg_name(self) -> Iterator[Case]: runtime="def bad(num, text) -> None: pass", error="bad", ) - if sys.version_info >= (3, 8): - yield Case( - stub="def good_posonly(__number: int, text: str) -> None: ...", - runtime="def good_posonly(num, /, text): pass", - error=None, - ) - yield Case( - stub="def bad_posonly(__number: int, text: str) -> None: ...", - runtime="def bad_posonly(flag, /, text): pass", - error="bad_posonly", - ) + yield Case( + stub="def good_posonly(__number: int, text: str) -> None: ...", + runtime="def good_posonly(num, /, text): pass", + error=None, + ) + yield Case( + stub="def bad_posonly(__number: int, text: str) -> None: ...", + runtime="def bad_posonly(flag, /, text): pass", + error="bad_posonly", + ) yield Case( stub=""" class BadMethod: @@ -283,22 +322,21 @@ def test_arg_kind(self) -> Iterator[Case]: runtime="def stub_posonly(number, text): pass", error="stub_posonly", ) - if sys.version_info >= (3, 8): - yield Case( - stub="def good_posonly(__number: int, text: str) -> None: ...", - runtime="def good_posonly(number, /, text): pass", - error=None, - ) - yield Case( - stub="def runtime_posonly(number: int, text: str) -> None: ...", - runtime="def runtime_posonly(number, /, text): pass", - error="runtime_posonly", - ) - yield Case( - stub="def stub_posonly_570(number: int, /, text: str) -> None: ...", - runtime="def stub_posonly_570(number, text): pass", - error="stub_posonly_570", - ) + yield Case( + stub="def good_posonly(__number: int, text: str) -> None: ...", + runtime="def good_posonly(number, /, text): pass", + error=None, + ) + yield Case( + stub="def runtime_posonly(number: int, text: str) -> None: ...", + runtime="def runtime_posonly(number, /, text): pass", + error="runtime_posonly", + ) + yield Case( + stub="def stub_posonly_570(number: int, /, text: str) -> None: ...", + runtime="def stub_posonly_570(number, text): pass", + error="stub_posonly_570", + ) @collect_cases def test_default_presence(self) -> Iterator[Case]: @@ -582,17 +620,16 @@ def f4(a: str, *args, b: int, **kwargs) -> str: ... runtime="def f4(a, *args, b, **kwargs): pass", error=None, ) - if sys.version_info >= (3, 8): - yield Case( - stub=""" - @overload - def f5(__a: int) -> int: ... - @overload - def f5(__b: str) -> str: ... - """, - runtime="def f5(x, /): pass", - error=None, - ) + yield Case( + stub=""" + @overload + def f5(__a: int) -> int: ... + @overload + def f5(__b: str) -> str: ... + """, + runtime="def f5(x, /): pass", + error=None, + ) @collect_cases def test_property(self) -> Iterator[Case]: @@ -955,16 +992,15 @@ def fizz(self): pass @collect_cases def test_enum(self) -> Iterator[Case]: + yield Case(stub="import enum", runtime="import enum", error=None) yield Case( stub=""" - import enum class X(enum.Enum): a: int b: str c: str """, runtime=""" - import enum class X(enum.Enum): a = 1 b = "asdf" @@ -972,6 +1008,86 @@ class X(enum.Enum): """, error="X.c", ) + yield Case( + stub=""" + class Flags1(enum.Flag): + a: int + b: int + def foo(x: Flags1 = ...) -> None: ... + """, + runtime=""" + class Flags1(enum.Flag): + a = 1 + b = 2 + def foo(x=Flags1.a|Flags1.b): pass + """, + error=None, + ) + yield Case( + stub=""" + class Flags2(enum.Flag): + a: int + b: int + def bar(x: Flags2 | None = None) -> None: ... + """, + runtime=""" + class Flags2(enum.Flag): + a = 1 + b = 2 + def bar(x=Flags2.a|Flags2.b): pass + """, + error="bar", + ) + yield Case( + stub=""" + class Flags3(enum.Flag): + a: int + b: int + def baz(x: Flags3 | None = ...) -> None: ... + """, + runtime=""" + class Flags3(enum.Flag): + a = 1 + b = 2 + def baz(x=Flags3(0)): pass + """, + error=None, + ) + yield Case( + stub=""" + class Flags4(enum.Flag): + a: int + b: int + def spam(x: Flags4 | None = None) -> None: ... + """, + runtime=""" + class Flags4(enum.Flag): + a = 1 + b = 2 + def spam(x=Flags4(0)): pass + """, + error="spam", + ) + yield Case( + stub=""" + from typing_extensions import Final, Literal + class BytesEnum(bytes, enum.Enum): + a: bytes + FOO: Literal[BytesEnum.a] + BAR: Final = BytesEnum.a + BAZ: BytesEnum + EGGS: bytes + """, + runtime=""" + class BytesEnum(bytes, enum.Enum): + a = b'foo' + FOO = BytesEnum.a + BAR = BytesEnum.a + BAZ = BytesEnum.a + EGGS = BytesEnum.a + """, + error=None, + ) @collect_cases def test_decorator(self) -> Iterator[Case]: @@ -1602,6 +1718,72 @@ class Y(TypedDict): error=None, ) + @collect_cases + def test_named_tuple(self) -> Iterator[Case]: + yield Case( + stub="from typing import NamedTuple", + runtime="from typing import NamedTuple", + error=None, + ) + yield Case( + stub=""" + class X1(NamedTuple): + bar: int + foo: str = ... + """, + runtime=""" + class X1(NamedTuple): + bar: int + foo: str = 'a' + """, + error=None, + ) + yield Case( + stub=""" + class X2(NamedTuple): + bar: int + foo: str + """, + runtime=""" + class X2(NamedTuple): + bar: int + foo: str = 'a' + """, + # `__new__` will miss a default value for a `foo` parameter, + # but we don't generate special errors for `foo` missing `...` part. + error="X2.__new__", + ) + + @collect_cases + def test_named_tuple_typing_and_collections(self) -> Iterator[Case]: + yield Case( + stub="from typing import NamedTuple", + runtime="from collections import namedtuple", + error=None, + ) + yield Case( + stub=""" + class X1(NamedTuple): + bar: int + foo: str = ... + """, + runtime=""" + X1 = namedtuple('X1', ['bar', 'foo'], defaults=['a']) + """, + error=None, + ) + yield Case( + stub=""" + class X2(NamedTuple): + bar: int + foo: str + """, + runtime=""" + X2 = namedtuple('X1', ['bar', 'foo'], defaults=['a']) + """, + error="X2.__new__", + ) + @collect_cases def test_type_var(self) -> Iterator[Case]: yield Case( diff --git a/mypy/test/testsubtypes.py b/mypy/test/testsubtypes.py index c76a34ff00d7..464f64d2b846 100644 --- a/mypy/test/testsubtypes.py +++ b/mypy/test/testsubtypes.py @@ -2,7 +2,7 @@ from mypy.nodes import CONTRAVARIANT, COVARIANT, INVARIANT from mypy.subtypes import is_subtype -from mypy.test.helpers import Suite, skip +from mypy.test.helpers import Suite from mypy.test.typefixture import InterfaceTypeFixture, TypeFixture from mypy.types import Instance, TupleType, Type, UnpackType @@ -69,7 +69,6 @@ def test_interface_subtyping(self) -> None: self.assert_equivalent(self.fx.f, self.fx.f) self.assert_not_subtype(self.fx.a, self.fx.f) - @skip def test_generic_interface_subtyping(self) -> None: # TODO make this work fx2 = InterfaceTypeFixture() diff --git a/mypy/test/testtypes.py b/mypy/test/testtypes.py index 5f6943de3199..12e7b207b00a 100644 --- a/mypy/test/testtypes.py +++ b/mypy/test/testtypes.py @@ -5,9 +5,7 @@ import re from unittest import TestCase, skipUnless -import mypy.expandtype from mypy.erasetype import erase_type, remove_instance_last_known_values -from mypy.expandtype import expand_type from mypy.indirection import TypeIndirectionVisitor from mypy.join import join_simple, join_types from mypy.meet import meet_types, narrow_declared_type @@ -53,6 +51,9 @@ has_recursive_types, ) +# Solving the import cycle: +import mypy.expandtype # ruff: isort: skip + class TypesSuite(Suite): def setUp(self) -> None: @@ -128,10 +129,10 @@ def test_callable_type_with_var_args(self) -> None: ) assert_equal(str(c3), "def (X? =, *Y?) -> Any") - def test_tuple_type(self) -> None: + def test_tuple_type_upper(self) -> None: options = Options() options.force_uppercase_builtins = True - assert_equal(TupleType([], self.fx.std_tuple).str_with_options(options), "Tuple[]") + assert_equal(TupleType([], self.fx.std_tuple).str_with_options(options), "Tuple[()]") assert_equal(TupleType([self.x], self.fx.std_tuple).str_with_options(options), "Tuple[X?]") assert_equal( TupleType( @@ -268,7 +269,7 @@ def assert_expand( for id, t in map_items: lower_bounds[id] = t - exp = expand_type(orig, lower_bounds) + exp = mypy.expandtype.expand_type(orig, lower_bounds) # Remove erased tags (asterisks). assert_equal(str(exp).replace("*", ""), str(result)) @@ -899,13 +900,11 @@ def ov(*items: CallableType) -> Overloaded: self.assert_join(ov(c(fx.a, fx.a), c(fx.b, fx.b)), c(any, fx.b), c(any, fx.b)) self.assert_join(ov(c(fx.a, fx.a), c(any, fx.b)), c(fx.b, fx.b), c(any, fx.b)) - @skip def test_join_interface_types(self) -> None: self.assert_join(self.fx.f, self.fx.f, self.fx.f) self.assert_join(self.fx.f, self.fx.f2, self.fx.o) self.assert_join(self.fx.f, self.fx.f3, self.fx.f) - @skip def test_join_interface_and_class_types(self) -> None: self.assert_join(self.fx.o, self.fx.f, self.fx.o) self.assert_join(self.fx.a, self.fx.f, self.fx.o) @@ -1180,7 +1179,6 @@ def test_meet_class_types_with_shared_interfaces(self) -> None: self.assert_meet(self.fx.e, self.fx.e2, self.fx.nonet) self.assert_meet(self.fx.e2, self.fx.e3, self.fx.nonet) - @skip def test_meet_with_generic_interfaces(self) -> None: fx = InterfaceTypeFixture() self.assert_meet(fx.gfa, fx.m1, fx.m1) @@ -1466,7 +1464,7 @@ def make_call(*items: tuple[str, str | None]) -> CallExpr: class TestExpandTypeLimitGetProperType(TestCase): # WARNING: do not increase this number unless absolutely necessary, # and you understand what you are doing. - ALLOWED_GET_PROPER_TYPES = 8 + ALLOWED_GET_PROPER_TYPES = 7 @skipUnless(mypy.expandtype.__file__.endswith(".py"), "Skip for compiled mypy") def test_count_get_proper_type(self) -> None: diff --git a/mypy/test/typefixture.py b/mypy/test/typefixture.py index bf1500a3cdec..81af765f8585 100644 --- a/mypy/test/typefixture.py +++ b/mypy/test/typefixture.py @@ -219,6 +219,10 @@ def make_type_var( self._add_bool_dunder(self.bool_type_info) self._add_bool_dunder(self.ai) + # TypeVars with non-trivial bounds + self.ub = make_type_var("UB", 5, [], self.b, variance) # UB`5 (type variable) + self.uc = make_type_var("UC", 6, [], self.c, variance) # UC`6 (type variable) + def make_type_var_tuple(name: str, id: int, upper_bound: Type) -> TypeVarTupleType: return TypeVarTupleType( name, diff --git a/mypy/type_visitor.py b/mypy/type_visitor.py index 2efae49e9e10..1860a43eb14f 100644 --- a/mypy/type_visitor.py +++ b/mypy/type_visitor.py @@ -14,8 +14,7 @@ from __future__ import annotations from abc import abstractmethod -from typing import Any, Callable, Generic, Iterable, Sequence, TypeVar, cast -from typing_extensions import Final +from typing import Any, Callable, Final, Generic, Iterable, Sequence, TypeVar, cast from mypy_extensions import mypyc_attr, trait @@ -349,7 +348,7 @@ def visit_type_var(self, t: TypeVarType) -> T: return self.query_types([t.upper_bound, t.default] + t.values) def visit_param_spec(self, t: ParamSpecType) -> T: - return self.query_types([t.upper_bound, t.default]) + return self.query_types([t.upper_bound, t.default, t.prefix]) def visit_type_var_tuple(self, t: TypeVarTupleType) -> T: return self.query_types([t.upper_bound, t.default]) diff --git a/mypy/typeanal.py b/mypy/typeanal.py index d1e6e315b9e3..ed1a8073887b 100644 --- a/mypy/typeanal.py +++ b/mypy/typeanal.py @@ -4,8 +4,8 @@ import itertools from contextlib import contextmanager -from typing import Callable, Iterable, Iterator, List, Sequence, Tuple, TypeVar -from typing_extensions import Final, Protocol +from typing import Callable, Final, Iterable, Iterator, List, Sequence, Tuple, TypeVar +from typing_extensions import Protocol from mypy import errorcodes as codes, message_registry, nodes from mypy.errorcodes import ErrorCode @@ -82,6 +82,7 @@ UnionType, UnpackType, callable_with_ellipsis, + flatten_nested_tuples, flatten_nested_unions, get_proper_type, has_type_vars, @@ -195,6 +196,7 @@ def __init__( allow_placeholder: bool = False, allow_required: bool = False, allow_param_spec_literals: bool = False, + allow_unpack: bool = False, report_invalid_types: bool = True, prohibit_self_type: str | None = None, allowed_alias_tvars: list[TypeVarLikeType] | None = None, @@ -226,6 +228,8 @@ def __init__( self.allow_required = allow_required # Are we in a context where ParamSpec literals are allowed? self.allow_param_spec_literals = allow_param_spec_literals + # Are we in context where literal "..." specifically is allowed? + self.allow_ellipsis = False # Should we report an error whenever we encounter a RawExpressionType outside # of a Literal context: e.g. whenever we encounter an invalid type? Normally, # we want to report an error, but the caller may want to do more specialized @@ -239,6 +243,8 @@ def __init__( self.prohibit_self_type = prohibit_self_type # Allow variables typed as Type[Any] and type (useful for base classes). self.allow_type_any = allow_type_any + self.allow_type_var_tuple = False + self.allow_unpack = allow_unpack def lookup_qualified( self, name: str, ctx: Context, suppress_errors: bool = False @@ -275,7 +281,10 @@ def visit_unbound_type_nonoptional(self, t: UnboundType, defining_literal: bool) return PlaceholderType( node.fullname, self.anal_array( - t.args, allow_param_spec=True, allow_param_spec_literals=True + t.args, + allow_param_spec=True, + allow_param_spec_literals=True, + allow_unpack=True, ), t.line, ) @@ -363,6 +372,13 @@ def visit_unbound_type_nonoptional(self, t: UnboundType, defining_literal: bool) self.fail(f'TypeVarTuple "{t.name}" is unbound', t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) assert isinstance(tvar_def, TypeVarTupleType) + if not self.allow_type_var_tuple: + self.fail( + f'TypeVarTuple "{t.name}" is only valid with an unpack', + t, + code=codes.VALID_TYPE, + ) + return AnyType(TypeOfAny.from_error) if len(t.args) > 0: self.fail( f'Type variable "{t.name}" used with arguments', t, code=codes.VALID_TYPE @@ -388,6 +404,7 @@ def visit_unbound_type_nonoptional(self, t: UnboundType, defining_literal: bool) t.args, allow_param_spec=True, allow_param_spec_literals=node.has_param_spec_type, + allow_unpack=node.tvar_tuple_index is not None, ) if node.has_param_spec_type and len(node.alias_tvars) == 1: an_args = self.pack_paramspec_args(an_args) @@ -442,11 +459,30 @@ def pack_paramspec_args(self, an_args: Sequence[Type]) -> list[Type]: # These do not support mypy_extensions VarArgs, etc. as they were already analyzed # TODO: should these be re-analyzed to get rid of this inconsistency? count = len(an_args) - if count > 0: - first_arg = get_proper_type(an_args[0]) - if not (count == 1 and isinstance(first_arg, (Parameters, ParamSpecType, AnyType))): - return [Parameters(an_args, [ARG_POS] * count, [None] * count)] - return list(an_args) + if count == 0: + return [] + if count == 1 and isinstance(get_proper_type(an_args[0]), AnyType): + # Single Any is interpreted as ..., rather that a single argument with Any type. + # I didn't find this in the PEP, but it sounds reasonable. + return list(an_args) + if any(isinstance(a, (Parameters, ParamSpecType)) for a in an_args): + if len(an_args) > 1: + first_wrong = next( + arg for arg in an_args if isinstance(arg, (Parameters, ParamSpecType)) + ) + self.fail( + "Nested parameter specifications are not allowed", + first_wrong, + code=codes.VALID_TYPE, + ) + return [AnyType(TypeOfAny.from_error)] + return list(an_args) + first = an_args[0] + return [ + Parameters( + an_args, [ARG_POS] * count, [None] * count, line=first.line, column=first.column + ) + ] def cannot_resolve_type(self, t: UnboundType) -> None: # TODO: Move error message generation to messages.py. We'd first @@ -461,9 +497,9 @@ def apply_concatenate_operator(self, t: UnboundType) -> Type: self.api.fail("Concatenate needs type arguments", t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) - # last argument has to be ParamSpec - ps = self.anal_type(t.args[-1], allow_param_spec=True) - if not isinstance(ps, ParamSpecType): + # Last argument has to be ParamSpec or Ellipsis. + ps = self.anal_type(t.args[-1], allow_param_spec=True, allow_ellipsis=True) + if not isinstance(ps, (ParamSpecType, Parameters)): if isinstance(ps, UnboundType) and self.allow_unbound_tvars: sym = self.lookup_qualified(ps.name, t) if sym is not None and isinstance(sym.node, ParamSpecExpr): @@ -477,19 +513,23 @@ def apply_concatenate_operator(self, t: UnboundType) -> Type: # TODO: this may not work well with aliases, if those worked. # Those should be special-cased. - elif ps.prefix.arg_types: + elif isinstance(ps, ParamSpecType) and ps.prefix.arg_types: self.api.fail("Nested Concatenates are invalid", t, code=codes.VALID_TYPE) args = self.anal_array(t.args[:-1]) - pre = ps.prefix + pre = ps.prefix if isinstance(ps, ParamSpecType) else ps # mypy can't infer this :( names: list[str | None] = [None] * len(args) pre = Parameters( - args + pre.arg_types, [ARG_POS] * len(args) + pre.arg_kinds, names + pre.arg_names + args + pre.arg_types, + [ARG_POS] * len(args) + pre.arg_kinds, + names + pre.arg_names, + line=t.line, + column=t.column, ) - return ps.copy_modified(prefix=pre) + return ps.copy_modified(prefix=pre) if isinstance(ps, ParamSpecType) else pre def try_analyze_special_unbound_type(self, t: UnboundType, fullname: str) -> Type | None: """Bind special type that is recognized through magic name such as 'typing.Any'. @@ -529,7 +569,9 @@ def try_analyze_special_unbound_type(self, t: UnboundType, fullname: str) -> Typ instance = self.named_type("builtins.tuple", [self.anal_type(t.args[0])]) instance.line = t.line return instance - return self.tuple_type(self.anal_array(t.args)) + return self.tuple_type( + self.anal_array(t.args, allow_unpack=True), line=t.line, column=t.column + ) elif fullname == "typing.Union": items = self.anal_array(t.args) return UnionType.make_union(items) @@ -629,7 +671,13 @@ def try_analyze_special_unbound_type(self, t: UnboundType, fullname: str) -> Typ if len(t.args) != 1: self.fail("Unpack[...] requires exactly one type argument", t) return AnyType(TypeOfAny.from_error) - return UnpackType(self.anal_type(t.args[0]), line=t.line, column=t.column) + if not self.allow_unpack: + self.fail(message_registry.INVALID_UNPACK_POSITION, t, code=codes.VALID_TYPE) + return AnyType(TypeOfAny.from_error) + self.allow_type_var_tuple = True + result = UnpackType(self.anal_type(t.args[0]), line=t.line, column=t.column) + self.allow_type_var_tuple = False + return result elif fullname in SELF_TYPE_NAMES: if t.args: self.fail("Self type cannot have type arguments", t) @@ -664,7 +712,7 @@ def analyze_type_with_type_info( if len(args) > 0 and info.fullname == "builtins.tuple": fallback = Instance(info, [AnyType(TypeOfAny.special_form)], ctx.line) - return TupleType(self.anal_array(args), fallback, ctx.line) + return TupleType(self.anal_array(args, allow_unpack=True), fallback, ctx.line) # Analyze arguments and (usually) construct Instance type. The # number of type arguments and their values are @@ -677,7 +725,10 @@ def analyze_type_with_type_info( instance = Instance( info, self.anal_array( - args, allow_param_spec=True, allow_param_spec_literals=info.has_param_spec_type + args, + allow_param_spec=True, + allow_param_spec_literals=info.has_param_spec_type, + allow_unpack=info.has_type_var_tuple_type, ), ctx.line, ctx.column, @@ -714,14 +765,16 @@ def analyze_type_with_type_info( return instantiate_type_alias( info.special_alias, # TODO: should we allow NamedTuples generic in ParamSpec? - self.anal_array(args), + self.anal_array(args, allow_unpack=True), self.fail, False, ctx, self.options, use_standard_error=True, ) - return tup.copy_modified(items=self.anal_array(tup.items), fallback=instance) + return tup.copy_modified( + items=self.anal_array(tup.items, allow_unpack=True), fallback=instance + ) td = info.typeddict_type if td is not None: # The class has a TypedDict[...] base class so it will be @@ -730,7 +783,7 @@ def analyze_type_with_type_info( return instantiate_type_alias( info.special_alias, # TODO: should we allow TypedDicts generic in ParamSpec? - self.anal_array(args), + self.anal_array(args, allow_unpack=True), self.fail, False, ctx, @@ -880,20 +933,21 @@ def visit_deleted_type(self, t: DeletedType) -> Type: return t def visit_type_list(self, t: TypeList) -> Type: - # paramspec literal (Z[[int, str, Whatever]]) + # Parameters literal (Z[[int, str, Whatever]]) if self.allow_param_spec_literals: params = self.analyze_callable_args(t) if params: ts, kinds, names = params # bind these types - return Parameters(self.anal_array(ts), kinds, names) + return Parameters(self.anal_array(ts), kinds, names, line=t.line, column=t.column) else: return AnyType(TypeOfAny.from_error) else: self.fail( 'Bracketed expression "[...]" is not valid as a type', t, code=codes.VALID_TYPE ) - self.note('Did you mean "List[...]"?', t) + if len(t.items) == 1: + self.note('Did you mean "List[...]"?', t) return AnyType(TypeOfAny.from_error) def visit_callable_argument(self, t: CallableArgument) -> Type: @@ -917,7 +971,10 @@ def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type: return t def visit_unpack_type(self, t: UnpackType) -> Type: - raise NotImplementedError + if not self.allow_unpack: + self.fail(message_registry.INVALID_UNPACK_POSITION, t.type, code=codes.VALID_TYPE) + return AnyType(TypeOfAny.from_error) + return UnpackType(self.anal_type(t.type)) def visit_parameters(self, t: Parameters) -> Type: raise NotImplementedError("ParamSpec literals cannot have unbound TypeVars") @@ -937,7 +994,23 @@ def visit_callable_type(self, t: CallableType, nested: bool = True) -> Type: self.anal_star_arg_type(t.arg_types[-1], ARG_STAR2, nested=nested), ] else: - arg_types = self.anal_array(t.arg_types, nested=nested) + arg_types = self.anal_array(t.arg_types, nested=nested, allow_unpack=True) + star_index = None + if ARG_STAR in arg_kinds: + star_index = arg_kinds.index(ARG_STAR) + star2_index = None + if ARG_STAR2 in arg_kinds: + star2_index = arg_kinds.index(ARG_STAR2) + validated_args: list[Type] = [] + for i, at in enumerate(arg_types): + if isinstance(at, UnpackType) and i not in (star_index, star2_index): + self.fail( + message_registry.INVALID_UNPACK_POSITION, at, code=codes.VALID_TYPE + ) + validated_args.append(AnyType(TypeOfAny.from_error)) + else: + validated_args.append(at) + arg_types = validated_args # If there were multiple (invalid) unpacks, the arg types list will become shorter, # we need to trim the kinds/names as well to avoid crashes. arg_kinds = t.arg_kinds[: len(arg_types)] @@ -1009,7 +1082,7 @@ def anal_star_arg_type(self, t: Type, kind: ArgKind, nested: bool) -> Type: line=t.line, column=t.column, ) - return self.anal_type(t, nested=nested) + return self.anal_type(t, nested=nested, allow_unpack=True) def visit_overloaded(self, t: Overloaded) -> Type: # Overloaded types are manually constructed in semanal.py by analyzing the @@ -1048,7 +1121,7 @@ def visit_tuple_type(self, t: TupleType) -> Type: if t.partial_fallback.type else self.named_type("builtins.tuple", [any_type]) ) - return TupleType(self.anal_array(t.items), fallback, t.line) + return TupleType(self.anal_array(t.items, allow_unpack=True), fallback, t.line) def visit_typeddict_type(self, t: TypedDictType) -> Type: items = { @@ -1106,7 +1179,7 @@ def visit_partial_type(self, t: PartialType) -> Type: assert False, "Internal error: Unexpected partial type" def visit_ellipsis_type(self, t: EllipsisType) -> Type: - if self.allow_param_spec_literals: + if self.allow_ellipsis or self.allow_param_spec_literals: any_type = AnyType(TypeOfAny.explicit) return Parameters( [any_type, any_type], [ARG_STAR, ARG_STAR2], [None, None], is_ellipsis_args=True @@ -1174,7 +1247,7 @@ def analyze_callable_args_for_paramspec( def analyze_callable_args_for_concatenate( self, callable_args: Type, ret_type: Type, fallback: Instance - ) -> CallableType | None: + ) -> CallableType | AnyType | None: """Construct a 'Callable[C, RET]', where C is Concatenate[..., P], returning None if we cannot. """ @@ -1189,7 +1262,7 @@ def analyze_callable_args_for_concatenate( return None tvar_def = self.anal_type(callable_args, allow_param_spec=True) - if not isinstance(tvar_def, ParamSpecType): + if not isinstance(tvar_def, (ParamSpecType, Parameters)): if self.allow_unbound_tvars and isinstance(tvar_def, UnboundType): sym = self.lookup_qualified(tvar_def.name, callable_args) if sym is not None and isinstance(sym.node, ParamSpecExpr): @@ -1198,7 +1271,18 @@ def analyze_callable_args_for_concatenate( return callable_with_ellipsis( AnyType(TypeOfAny.explicit), ret_type=ret_type, fallback=fallback ) - return None + # Error was already given, so prevent further errors. + return AnyType(TypeOfAny.from_error) + if isinstance(tvar_def, Parameters): + # This comes from Concatenate[int, ...] + return CallableType( + arg_types=tvar_def.arg_types, + arg_names=tvar_def.arg_names, + arg_kinds=tvar_def.arg_kinds, + ret_type=ret_type, + fallback=fallback, + from_concatenate=True, + ) # ick, CallableType should take ParamSpecType prefix = tvar_def.prefix @@ -1244,9 +1328,23 @@ def analyze_callable_type(self, t: UnboundType) -> Type: ) else: # Callable[P, RET] (where P is ParamSpec) - maybe_ret = self.analyze_callable_args_for_paramspec( - callable_args, ret_type, fallback - ) or self.analyze_callable_args_for_concatenate(callable_args, ret_type, fallback) + with self.tvar_scope_frame(): + # Temporarily bind ParamSpecs to allow code like this: + # my_fun: Callable[Q, Foo[Q]] + # We usually do this later in visit_callable_type(), but the analysis + # below happens at very early stage. + variables = [] + for name, tvar_expr in self.find_type_var_likes(callable_args): + variables.append(self.tvar_scope.bind_new(name, tvar_expr)) + maybe_ret = self.analyze_callable_args_for_paramspec( + callable_args, ret_type, fallback + ) or self.analyze_callable_args_for_concatenate( + callable_args, ret_type, fallback + ) + if isinstance(maybe_ret, CallableType): + maybe_ret = maybe_ret.copy_modified( + ret_type=ret_type.accept(self), variables=variables + ) if maybe_ret is None: # Callable[?, RET] (where ? is something invalid) self.fail( @@ -1256,10 +1354,12 @@ def analyze_callable_type(self, t: UnboundType) -> Type: code=codes.VALID_TYPE, ) self.note( - "See https://mypy.readthedocs.io/en/stable/kinds_of_types.html#callable-types-and-lambdas", # noqa: E501 + "See https://mypy.readthedocs.io/en/stable/kinds_of_types.html#callable-types-and-lambdas", t, ) return AnyType(TypeOfAny.from_error) + elif isinstance(maybe_ret, AnyType): + return maybe_ret ret = maybe_ret else: if self.options.disallow_any_generics: @@ -1270,12 +1370,22 @@ def analyze_callable_type(self, t: UnboundType) -> Type: assert isinstance(ret, CallableType) return ret.accept(self) + def refers_to_full_names(self, arg: UnboundType, names: Sequence[str]) -> bool: + sym = self.lookup_qualified(arg.name, arg) + if sym is not None: + if sym.fullname in names: + return True + return False + def analyze_callable_args( self, arglist: TypeList ) -> tuple[list[Type], list[ArgKind], list[str | None]] | None: args: list[Type] = [] kinds: list[ArgKind] = [] names: list[str | None] = [] + seen_unpack = False + unpack_types: list[Type] = [] + invalid_unpacks = [] for arg in arglist.items: if isinstance(arg, CallableArgument): args.append(arg.typ) @@ -1296,20 +1406,42 @@ def analyze_callable_args( if arg.name is not None and kind.is_star(): self.fail(f"{arg.constructor} arguments should not have names", arg) return None - elif isinstance(arg, UnboundType): - kind = ARG_POS - # Potentially a unpack. - sym = self.lookup_qualified(arg.name, arg) - if sym is not None: - if sym.fullname in ("typing_extensions.Unpack", "typing.Unpack"): - kind = ARG_STAR - args.append(arg) - kinds.append(kind) - names.append(None) + elif ( + isinstance(arg, UnboundType) + and self.refers_to_full_names(arg, ("typing_extensions.Unpack", "typing.Unpack")) + or isinstance(arg, UnpackType) + ): + if seen_unpack: + # Multiple unpacks, preserve them, so we can give an error later. + invalid_unpacks.append(arg) + continue + seen_unpack = True + unpack_types.append(arg) else: - args.append(arg) - kinds.append(ARG_POS) - names.append(None) + if seen_unpack: + unpack_types.append(arg) + else: + args.append(arg) + kinds.append(ARG_POS) + names.append(None) + if seen_unpack: + if len(unpack_types) == 1: + args.append(unpack_types[0]) + else: + first = unpack_types[0] + if isinstance(first, UnpackType): + # UnpackType doesn't have its own line/column numbers, + # so use the unpacked type for error messages. + first = first.type + args.append( + UnpackType(self.tuple_type(unpack_types, line=first.line, column=first.column)) + ) + kinds.append(ARG_STAR) + names.append(None) + for arg in invalid_unpacks: + args.append(arg) + kinds.append(ARG_STAR) + names.append(None) # Note that arglist below is only used for error context. check_arg_names(names, [arglist] * len(args), self.fail, "Callable") check_arg_kinds(kinds, [arglist] * len(args), self.fail) @@ -1504,26 +1636,45 @@ def anal_array( *, allow_param_spec: bool = False, allow_param_spec_literals: bool = False, + allow_unpack: bool = False, ) -> list[Type]: old_allow_param_spec_literals = self.allow_param_spec_literals self.allow_param_spec_literals = allow_param_spec_literals res: list[Type] = [] for t in a: - res.append(self.anal_type(t, nested, allow_param_spec=allow_param_spec)) + res.append( + self.anal_type( + t, nested, allow_param_spec=allow_param_spec, allow_unpack=allow_unpack + ) + ) self.allow_param_spec_literals = old_allow_param_spec_literals return self.check_unpacks_in_list(res) - def anal_type(self, t: Type, nested: bool = True, *, allow_param_spec: bool = False) -> Type: + def anal_type( + self, + t: Type, + nested: bool = True, + *, + allow_param_spec: bool = False, + allow_unpack: bool = False, + allow_ellipsis: bool = False, + ) -> Type: if nested: self.nesting_level += 1 old_allow_required = self.allow_required self.allow_required = False + old_allow_ellipsis = self.allow_ellipsis + self.allow_ellipsis = allow_ellipsis + old_allow_unpack = self.allow_unpack + self.allow_unpack = allow_unpack try: analyzed = t.accept(self) finally: if nested: self.nesting_level -= 1 self.allow_required = old_allow_required + self.allow_ellipsis = old_allow_ellipsis + self.allow_unpack = old_allow_unpack if ( not allow_param_spec and isinstance(analyzed, ParamSpecType) @@ -1532,6 +1683,7 @@ def anal_type(self, t: Type, nested: bool = True, *, allow_param_spec: bool = Fa if analyzed.prefix.arg_types: self.fail("Invalid location for Concatenate", t, code=codes.VALID_TYPE) self.note("You can use Concatenate as the first argument to Callable", t) + analyzed = AnyType(TypeOfAny.from_error) else: self.fail( f'Invalid location for ParamSpec "{analyzed.name}"', t, code=codes.VALID_TYPE @@ -1541,6 +1693,7 @@ def anal_type(self, t: Type, nested: bool = True, *, allow_param_spec: bool = Fa "'Callable[{}, int]'".format(analyzed.name), t, ) + analyzed = AnyType(TypeOfAny.from_error) return analyzed def anal_var_def(self, var_def: TypeVarLikeType) -> TypeVarLikeType: @@ -1598,9 +1751,11 @@ def check_unpacks_in_list(self, items: list[Type]) -> list[Type]: self.fail("More than one Unpack in a type is not allowed", final_unpack) return new_items - def tuple_type(self, items: list[Type]) -> TupleType: + def tuple_type(self, items: list[Type], line: int, column: int) -> TupleType: any_type = AnyType(TypeOfAny.special_form) - return TupleType(items, fallback=self.named_type("builtins.tuple", [any_type])) + return TupleType( + items, fallback=self.named_type("builtins.tuple", [any_type]), line=line, column=column + ) TypeVarLikeList = List[Tuple[str, TypeVarLikeExpr]] @@ -1703,6 +1858,13 @@ def fix_instance( fix_type_var_tuple_argument(any_type, t) return + + if t.type.has_type_var_tuple_type: + # This can be only correctly analyzed when all arguments are fully + # analyzed, because there may be a variadic item among them, so we + # do this in semanal_typeargs.py. + return + # Invalid number of type parameters. fail( wrong_type_arg_count(len(t.type.type_vars), str(len(t.args)), t.type.name), @@ -1713,7 +1875,6 @@ def fix_instance( # otherwise the type checker may crash as it expects # things to be right. t.args = tuple(AnyType(TypeOfAny.from_error) for _ in t.type.type_vars) - fix_type_var_tuple_argument(AnyType(TypeOfAny.from_error), t) t.invalid = True @@ -1788,7 +1949,10 @@ def instantiate_type_alias( # TODO: we need to check args validity w.r.t alias.alias_tvars. # Otherwise invalid instantiations will be allowed in runtime context. # Note: in type context, these will be still caught by semanal_typeargs. - typ = TypeAliasType(node, args, ctx.line, ctx.column) + # Type aliases are special, since they can be expanded during semantic analysis, + # so we need to normalize them as soon as possible. + # TODO: can this cause an infinite recursion? + typ = TypeAliasType(node, flatten_nested_tuples(args), ctx.line, ctx.column) assert typ.alias is not None # HACK: Implement FlexibleAlias[T, typ] by expanding it to typ here. if ( @@ -1843,19 +2007,6 @@ def set_any_tvars( return TypeAliasType(node, args, newline, newcolumn) -def remove_dups(tvars: list[T]) -> list[T]: - if len(tvars) <= 1: - return tvars - # Get unique elements in order of appearance - all_tvars: set[T] = set() - new_tvars: list[T] = [] - for t in tvars: - if t not in all_tvars: - new_tvars.append(t) - all_tvars.add(t) - return new_tvars - - def flatten_tvars(lists: list[list[T]]) -> list[T]: result: list[T] = [] for lst in lists: diff --git a/mypy/typeops.py b/mypy/typeops.py index ee544c6740bb..0e0bc348942e 100644 --- a/mypy/typeops.py +++ b/mypy/typeops.py @@ -76,12 +76,18 @@ def is_recursive_pair(s: Type, t: Type) -> bool: isinstance(get_proper_type(t), (Instance, UnionType)) or isinstance(t, TypeAliasType) and t.is_recursive + # Tuple types are special, they can cause an infinite recursion even if + # the other type is not recursive, because of the tuple fallback that is + # calculated "on the fly". + or isinstance(get_proper_type(s), TupleType) ) if isinstance(t, TypeAliasType) and t.is_recursive: return ( isinstance(get_proper_type(s), (Instance, UnionType)) or isinstance(s, TypeAliasType) and s.is_recursive + # Same as above. + or isinstance(get_proper_type(t), TupleType) ) return False @@ -99,19 +105,18 @@ def tuple_fallback(typ: TupleType) -> Instance: unpacked_type = get_proper_type(item.type) if isinstance(unpacked_type, TypeVarTupleType): items.append(unpacked_type.upper_bound) - elif isinstance(unpacked_type, TupleType): - # TODO: might make sense to do recursion here to support nested unpacks - # of tuple constants - items.extend(unpacked_type.items) elif ( isinstance(unpacked_type, Instance) and unpacked_type.type.fullname == "builtins.tuple" ): items.append(unpacked_type.args[0]) + elif isinstance(unpacked_type, (AnyType, UninhabitedType)): + continue else: - raise NotImplementedError + raise NotImplementedError(unpacked_type) else: items.append(item) + # TODO: we should really use a union here, tuple types are special. return Instance(info, [join_type_list(items)], extra_attrs=typ.partial_fallback.extra_attrs) @@ -297,7 +302,7 @@ class B(A): pass return cast(F, func) self_param_type = get_proper_type(func.arg_types[0]) - variables: Sequence[TypeVarLikeType] = [] + variables: Sequence[TypeVarLikeType] if func.variables and supported_self_type(self_param_type): from mypy.infer import infer_type_arguments @@ -306,44 +311,40 @@ class B(A): pass original_type = erase_to_bound(self_param_type) original_type = get_proper_type(original_type) - all_ids = func.type_var_ids() - typeargs = infer_type_arguments(all_ids, self_param_type, original_type, is_supertype=True) + # Find which of method type variables appear in the type of "self". + self_ids = {tv.id for tv in get_all_type_vars(self_param_type)} + self_vars = [tv for tv in func.variables if tv.id in self_ids] + + # Solve for these type arguments using the actual class or instance type. + typeargs = infer_type_arguments( + self_vars, self_param_type, original_type, is_supertype=True + ) if ( is_classmethod - # TODO: why do we need the extra guards here? and any(isinstance(get_proper_type(t), UninhabitedType) for t in typeargs) and isinstance(original_type, (Instance, TypeVarType, TupleType)) ): - # In case we call a classmethod through an instance x, fallback to type(x) + # In case we call a classmethod through an instance x, fallback to type(x). typeargs = infer_type_arguments( - all_ids, self_param_type, TypeType(original_type), is_supertype=True + self_vars, self_param_type, TypeType(original_type), is_supertype=True ) - ids = [tid for tid in all_ids if any(tid == t.id for t in get_type_vars(self_param_type))] - - # Technically, some constrains might be unsolvable, make them . + # Update the method signature with the solutions found. + # Technically, some constraints might be unsolvable, make them . to_apply = [t if t is not None else UninhabitedType() for t in typeargs] - - def expand(target: Type) -> Type: - return expand_type(target, {id: to_apply[all_ids.index(id)] for id in ids}) - - arg_types = [expand(x) for x in func.arg_types[1:]] - ret_type = expand(func.ret_type) - variables = [v for v in func.variables if v.id not in ids] + func = expand_type(func, {tv.id: arg for tv, arg in zip(self_vars, to_apply)}) + variables = [v for v in func.variables if v not in self_vars] else: - arg_types = func.arg_types[1:] - ret_type = func.ret_type variables = func.variables original_type = get_proper_type(original_type) if isinstance(original_type, CallableType) and original_type.is_type_obj(): original_type = TypeType.make_normalized(original_type.ret_type) res = func.copy_modified( - arg_types=arg_types, + arg_types=func.arg_types[1:], arg_kinds=func.arg_kinds[1:], arg_names=func.arg_names[1:], variables=variables, - ret_type=ret_type, bound_args=[original_type], ) return cast(F, res) @@ -594,10 +595,8 @@ def true_only(t: Type) -> ProperType: else: ret_type = _get_type_special_method_bool_ret_type(t) - if ret_type and ret_type.can_be_false and not ret_type.can_be_true: - new_t = copy_type(t) - new_t.can_be_true = False - return new_t + if ret_type and not ret_type.can_be_true: + return UninhabitedType(line=t.line, column=t.column) new_t = copy_type(t) new_t.can_be_false = False @@ -629,10 +628,8 @@ def false_only(t: Type) -> ProperType: else: ret_type = _get_type_special_method_bool_ret_type(t) - if ret_type and ret_type.can_be_true and not ret_type.can_be_false: - new_t = copy_type(t) - new_t.can_be_false = False - return new_t + if ret_type and not ret_type.can_be_false: + return UninhabitedType(line=t.line) new_t = copy_type(t) new_t.can_be_true = False @@ -704,7 +701,7 @@ def callable_type( fdef: FuncItem, fallback: Instance, ret_type: Type | None = None ) -> CallableType: # TODO: somewhat unfortunate duplication with prepare_method_signature in semanal - if fdef.info and not fdef.is_static and fdef.arg_names: + if fdef.info and (not fdef.is_static or fdef.name == "__new__") and fdef.arg_names: self_type: Type = fill_typevars(fdef.info) if fdef.is_class or fdef.name == "__new__": self_type = TypeType.make_normalized(self_type) @@ -944,22 +941,34 @@ def coerce_to_literal(typ: Type) -> Type: def get_type_vars(tp: Type) -> list[TypeVarType]: - return tp.accept(TypeVarExtractor()) + return cast("list[TypeVarType]", tp.accept(TypeVarExtractor())) + +def get_all_type_vars(tp: Type) -> list[TypeVarLikeType]: + # TODO: should we always use this function instead of get_type_vars() above? + return tp.accept(TypeVarExtractor(include_all=True)) -class TypeVarExtractor(TypeQuery[List[TypeVarType]]): - def __init__(self) -> None: + +class TypeVarExtractor(TypeQuery[List[TypeVarLikeType]]): + def __init__(self, include_all: bool = False) -> None: super().__init__(self._merge) + self.include_all = include_all - def _merge(self, iter: Iterable[list[TypeVarType]]) -> list[TypeVarType]: + def _merge(self, iter: Iterable[list[TypeVarLikeType]]) -> list[TypeVarLikeType]: out = [] for item in iter: out.extend(item) return out - def visit_type_var(self, t: TypeVarType) -> list[TypeVarType]: + def visit_type_var(self, t: TypeVarType) -> list[TypeVarLikeType]: return [t] + def visit_param_spec(self, t: ParamSpecType) -> list[TypeVarLikeType]: + return [t] if self.include_all else [] + + def visit_type_var_tuple(self, t: TypeVarTupleType) -> list[TypeVarLikeType]: + return [t] if self.include_all else [] + def custom_special_method(typ: Type, name: str, check_all: bool = False) -> bool: """Does this type have a custom special method such as __format__() or __eq__()? diff --git a/mypy/types.py b/mypy/types.py index 5fbdd385826c..fb360fb892f1 100644 --- a/mypy/types.py +++ b/mypy/types.py @@ -9,6 +9,7 @@ Any, ClassVar, Dict, + Final, Iterable, NamedTuple, NewType, @@ -17,7 +18,7 @@ Union, cast, ) -from typing_extensions import Final, Self, TypeAlias as _TypeAlias, TypeGuard, overload +from typing_extensions import Self, TypeAlias as _TypeAlias, TypeGuard, overload import mypy.nodes from mypy.bogus_type import Bogus @@ -150,7 +151,12 @@ ) # Mypyc fixed-width native int types (compatible with builtins.int) -MYPYC_NATIVE_INT_NAMES: Final = ("mypy_extensions.i64", "mypy_extensions.i32") +MYPYC_NATIVE_INT_NAMES: Final = ( + "mypy_extensions.i64", + "mypy_extensions.i32", + "mypy_extensions.i16", + "mypy_extensions.u8", +) DATACLASS_TRANSFORM_NAMES: Final = ( "typing.dataclass_transform", @@ -254,7 +260,7 @@ def can_be_false_default(self) -> bool: return True def accept(self, visitor: TypeVisitor[T]) -> T: - raise RuntimeError("Not implemented") + raise RuntimeError("Not implemented", type(self)) def __repr__(self) -> str: return self.accept(TypeStrVisitor(options=Options())) @@ -316,6 +322,7 @@ def _expand_once(self) -> Type: assert isinstance(self.alias.target, Instance) # type: ignore[misc] return self.alias.target.copy_modified(args=self.args) + # TODO: this logic duplicates the one in expand_type_by_instance(). if self.alias.tvar_tuple_index is None: mapping = {v.id: s for (v, s) in zip(self.alias.alias_tvars, self.args)} else: @@ -1038,8 +1045,12 @@ class UnpackType(ProperType): """Type operator Unpack from PEP646. Can be either with Unpack[] or unpacking * syntax. - The inner type should be either a TypeVarTuple, a constant size - tuple, or a variable length tuple, or a union of one of those. + The inner type should be either a TypeVarTuple, or a variable length tuple. + In an exceptional case of callable star argument it can be a fixed length tuple. + + Note: the above restrictions are only guaranteed by normalizations after semantic + analysis, if your code needs to handle UnpackType *during* semantic analysis, it is + wild west, technically anything can be present in the wrapped type. """ __slots__ = ["type"] @@ -1538,9 +1549,6 @@ class FormalArgument(NamedTuple): required: bool -# TODO: should this take bound typevars too? what would this take? -# ex: class Z(Generic[P, T]): ...; Z[[V], V] -# What does a typevar even mean in this context? class Parameters(ProperType): """Type that represents the parameters to a function. @@ -1552,7 +1560,10 @@ class Parameters(ProperType): "arg_names", "min_args", "is_ellipsis_args", + # TODO: variables don't really belong here, but they are used to allow hacky support + # for forall . Foo[[x: T], T] by capturing generic callable with ParamSpec, see #15909 "variables", + "imprecise_arg_kinds", ) def __init__( @@ -1563,6 +1574,7 @@ def __init__( *, variables: Sequence[TypeVarLikeType] | None = None, is_ellipsis_args: bool = False, + imprecise_arg_kinds: bool = False, line: int = -1, column: int = -1, ) -> None: @@ -1571,9 +1583,11 @@ def __init__( self.arg_kinds = arg_kinds self.arg_names = list(arg_names) assert len(arg_types) == len(arg_kinds) == len(arg_names) + assert not any(isinstance(t, Parameters) for t in arg_types) self.min_args = arg_kinds.count(ARG_POS) self.is_ellipsis_args = is_ellipsis_args self.variables = variables or [] + self.imprecise_arg_kinds = imprecise_arg_kinds def copy_modified( self, @@ -1583,6 +1597,7 @@ def copy_modified( *, variables: Bogus[Sequence[TypeVarLikeType]] = _dummy, is_ellipsis_args: Bogus[bool] = _dummy, + imprecise_arg_kinds: Bogus[bool] = _dummy, ) -> Parameters: return Parameters( arg_types=arg_types if arg_types is not _dummy else self.arg_types, @@ -1592,9 +1607,14 @@ def copy_modified( is_ellipsis_args if is_ellipsis_args is not _dummy else self.is_ellipsis_args ), variables=variables if variables is not _dummy else self.variables, + imprecise_arg_kinds=( + imprecise_arg_kinds + if imprecise_arg_kinds is not _dummy + else self.imprecise_arg_kinds + ), ) - # the following are copied from CallableType. Is there a way to decrease code duplication? + # TODO: here is a lot of code duplication with Callable type, fix this. def var_arg(self) -> FormalArgument | None: """The formal argument for *args.""" for position, (type, kind) in enumerate(zip(self.arg_types, self.arg_kinds)): @@ -1688,6 +1708,7 @@ def serialize(self) -> JsonDict: "arg_kinds": [int(x.value) for x in self.arg_kinds], "arg_names": self.arg_names, "variables": [tv.serialize() for tv in self.variables], + "imprecise_arg_kinds": self.imprecise_arg_kinds, } @classmethod @@ -1698,6 +1719,7 @@ def deserialize(cls, data: JsonDict) -> Parameters: [ArgKind(x) for x in data["arg_kinds"]], data["arg_names"], variables=[cast(TypeVarLikeType, deserialize_type(v)) for v in data["variables"]], + imprecise_arg_kinds=data["imprecise_arg_kinds"], ) def __hash__(self) -> int: @@ -1754,6 +1776,7 @@ class CallableType(FunctionLike): "type_guard", # T, if -> TypeGuard[T] (ret_type is bool in this case). "from_concatenate", # whether this callable is from a concatenate object # (this is used for error messages) + "imprecise_arg_kinds", "unpack_kwargs", # Was an Unpack[...] with **kwargs used to define this callable? ) @@ -1778,10 +1801,16 @@ def __init__( def_extras: dict[str, Any] | None = None, type_guard: Type | None = None, from_concatenate: bool = False, + imprecise_arg_kinds: bool = False, unpack_kwargs: bool = False, ) -> None: super().__init__(line, column) assert len(arg_types) == len(arg_kinds) == len(arg_names) + for t, k in zip(arg_types, arg_kinds): + if isinstance(t, ParamSpecType): + assert not t.prefix.arg_types + # TODO: should we assert that only ARG_STAR contain ParamSpecType? + # See testParamSpecJoin, that relies on passing e.g `P.args` as plain argument. if variables is None: variables = [] self.arg_types = list(arg_types) @@ -1799,6 +1828,7 @@ def __init__( self.special_sig = special_sig self.from_type_type = from_type_type self.from_concatenate = from_concatenate + self.imprecise_arg_kinds = imprecise_arg_kinds if not bound_args: bound_args = () self.bound_args = bound_args @@ -1841,6 +1871,7 @@ def copy_modified( def_extras: Bogus[dict[str, Any]] = _dummy, type_guard: Bogus[Type | None] = _dummy, from_concatenate: Bogus[bool] = _dummy, + imprecise_arg_kinds: Bogus[bool] = _dummy, unpack_kwargs: Bogus[bool] = _dummy, ) -> CT: modified = CallableType( @@ -1866,6 +1897,11 @@ def copy_modified( from_concatenate=( from_concatenate if from_concatenate is not _dummy else self.from_concatenate ), + imprecise_arg_kinds=( + imprecise_arg_kinds + if imprecise_arg_kinds is not _dummy + else self.imprecise_arg_kinds + ), unpack_kwargs=unpack_kwargs if unpack_kwargs is not _dummy else self.unpack_kwargs, ) # Optimization: Only NewTypes are supported as subtypes since @@ -2027,36 +2063,20 @@ def param_spec(self) -> ParamSpecType | None: if not isinstance(arg_type, ParamSpecType): return None - # sometimes paramspectypes are analyzed in from mysterious places, - # e.g. def f(prefix..., *args: P.args, **kwargs: P.kwargs) -> ...: ... - prefix = arg_type.prefix - if not prefix.arg_types: - # TODO: confirm that all arg kinds are positional - prefix = Parameters(self.arg_types[:-2], self.arg_kinds[:-2], self.arg_names[:-2]) - + # Prepend prefix for def f(prefix..., *args: P.args, **kwargs: P.kwargs) -> ... + # TODO: confirm that all arg kinds are positional + prefix = Parameters(self.arg_types[:-2], self.arg_kinds[:-2], self.arg_names[:-2]) return arg_type.copy_modified(flavor=ParamSpecFlavor.BARE, prefix=prefix) - def expand_param_spec( - self, c: CallableType | Parameters, no_prefix: bool = False - ) -> CallableType: + def expand_param_spec(self, c: Parameters) -> CallableType: variables = c.variables - - if no_prefix: - return self.copy_modified( - arg_types=c.arg_types, - arg_kinds=c.arg_kinds, - arg_names=c.arg_names, - is_ellipsis_args=c.is_ellipsis_args, - variables=[*variables, *self.variables], - ) - else: - return self.copy_modified( - arg_types=self.arg_types[:-2] + c.arg_types, - arg_kinds=self.arg_kinds[:-2] + c.arg_kinds, - arg_names=self.arg_names[:-2] + c.arg_names, - is_ellipsis_args=c.is_ellipsis_args, - variables=[*variables, *self.variables], - ) + return self.copy_modified( + arg_types=self.arg_types[:-2] + c.arg_types, + arg_kinds=self.arg_kinds[:-2] + c.arg_kinds, + arg_names=self.arg_names[:-2] + c.arg_names, + is_ellipsis_args=c.is_ellipsis_args, + variables=[*variables, *self.variables], + ) def with_unpacked_kwargs(self) -> NormalizedCallableType: if not self.unpack_kwargs: @@ -2079,6 +2099,72 @@ def with_unpacked_kwargs(self) -> NormalizedCallableType: ) ) + def with_normalized_var_args(self) -> Self: + var_arg = self.var_arg() + if not var_arg or not isinstance(var_arg.typ, UnpackType): + return self + unpacked = get_proper_type(var_arg.typ.type) + if not isinstance(unpacked, TupleType): + # Note that we don't normalize *args: *tuple[X, ...] -> *args: X, + # this should be done once in semanal_typeargs.py for user-defined types, + # and we ourselves should never construct such type. + return self + unpack_index = find_unpack_in_list(unpacked.items) + if unpack_index == 0 and len(unpacked.items) > 1: + # Already normalized. + return self + + # Boilerplate: + var_arg_index = self.arg_kinds.index(ARG_STAR) + types_prefix = self.arg_types[:var_arg_index] + kinds_prefix = self.arg_kinds[:var_arg_index] + names_prefix = self.arg_names[:var_arg_index] + types_suffix = self.arg_types[var_arg_index + 1 :] + kinds_suffix = self.arg_kinds[var_arg_index + 1 :] + names_suffix = self.arg_names[var_arg_index + 1 :] + no_name: str | None = None # to silence mypy + + # Now we have something non-trivial to do. + if unpack_index is None: + # Plain *Tuple[X, Y, Z] -> replace with ARG_POS completely + types_middle = unpacked.items + kinds_middle = [ARG_POS] * len(unpacked.items) + names_middle = [no_name] * len(unpacked.items) + else: + # *Tuple[X, *Ts, Y, Z] or *Tuple[X, *tuple[T, ...], X, Z], here + # we replace the prefix by ARG_POS (this is how some places expect + # Callables to be represented) + nested_unpack = unpacked.items[unpack_index] + assert isinstance(nested_unpack, UnpackType) + nested_unpacked = get_proper_type(nested_unpack.type) + if unpack_index == len(unpacked.items) - 1: + # Normalize also single item tuples like + # *args: *Tuple[*tuple[X, ...]] -> *args: X + # *args: *Tuple[*Ts] -> *args: *Ts + # This may be not strictly necessary, but these are very verbose. + if isinstance(nested_unpacked, Instance): + assert nested_unpacked.type.fullname == "builtins.tuple" + new_unpack = nested_unpacked.args[0] + else: + if not isinstance(nested_unpacked, TypeVarTupleType): + # We found a non-nomralized tuple type, this means this method + # is called during semantic analysis (e.g. from get_proper_type()) + # there is no point in normalizing callables at this stage. + return self + new_unpack = nested_unpack + else: + new_unpack = UnpackType( + unpacked.copy_modified(items=unpacked.items[unpack_index:]) + ) + types_middle = unpacked.items[:unpack_index] + [new_unpack] + kinds_middle = [ARG_POS] * unpack_index + [ARG_STAR] + names_middle = [no_name] * unpack_index + [self.arg_names[var_arg_index]] + return self.copy_modified( + arg_types=types_prefix + types_middle + types_suffix, + arg_kinds=kinds_prefix + kinds_middle + kinds_suffix, + arg_names=names_prefix + names_middle + names_suffix, + ) + def __hash__(self) -> int: # self.is_type_obj() will fail if self.fallback.type is a FakeInfo if isinstance(self.fallback.type, FakeInfo): @@ -2132,6 +2218,7 @@ def serialize(self) -> JsonDict: "def_extras": dict(self.def_extras), "type_guard": self.type_guard.serialize() if self.type_guard is not None else None, "from_concatenate": self.from_concatenate, + "imprecise_arg_kinds": self.imprecise_arg_kinds, "unpack_kwargs": self.unpack_kwargs, } @@ -2155,6 +2242,7 @@ def deserialize(cls, data: JsonDict) -> CallableType: deserialize_type(data["type_guard"]) if data["type_guard"] is not None else None ), from_concatenate=data["from_concatenate"], + imprecise_arg_kinds=data["imprecise_arg_kinds"], unpack_kwargs=data["unpack_kwargs"], ) @@ -2433,6 +2521,7 @@ def copy_modified( *, fallback: Instance | None = None, item_types: list[Type] | None = None, + item_names: list[str] | None = None, required_keys: set[str] | None = None, ) -> TypedDictType: if fallback is None: @@ -2443,6 +2532,9 @@ def copy_modified( items = dict(zip(self.items, item_types)) if required_keys is None: required_keys = self.required_keys + if item_names is not None: + items = {k: v for (k, v) in items.items() if k in item_names} + required_keys &= set(item_names) return TypedDictType(items, required_keys, fallback, self.line, self.column) def create_anonymous_fallback(self) -> Instance: @@ -2957,7 +3049,7 @@ def get_proper_types( # to make it easier to gradually get modules working with mypyc. # Import them here, after the types are defined. # This is intended as a re-export also. -from mypy.type_visitor import ( # noqa: F811,F401 +from mypy.type_visitor import ( # noqa: F811 ALL_STRATEGY as ALL_STRATEGY, ANY_STRATEGY as ANY_STRATEGY, BoolTypeQuery as BoolTypeQuery, @@ -3187,7 +3279,7 @@ def visit_overloaded(self, t: Overloaded) -> str: return f"Overload({', '.join(a)})" def visit_tuple_type(self, t: TupleType) -> str: - s = self.list_str(t.items) + s = self.list_str(t.items) or "()" tuple_name = "tuple" if self.options.use_lowercase_names() else "Tuple" if t.partial_fallback and t.partial_fallback.type: fallback_name = t.partial_fallback.type.fullname @@ -3422,6 +3514,20 @@ def flatten_nested_unions( return flat_items +def find_unpack_in_list(items: Sequence[Type]) -> int | None: + unpack_index: int | None = None + for i, item in enumerate(items): + if isinstance(item, UnpackType): + # We cannot fail here, so we must check this in an earlier + # semanal phase. + # Funky code here avoids mypyc narrowing the type of unpack_index. + old_index = unpack_index + assert old_index is None + # Don't return so that we can also sanity check there is only one. + unpack_index = i + return unpack_index + + def flatten_nested_tuples(types: Sequence[Type]) -> list[Type]: """Recursively flatten TupleTypes nested with Unpack. @@ -3475,6 +3581,30 @@ def callable_with_ellipsis(any_type: AnyType, ret_type: Type, fallback: Instance ) +def remove_dups(types: list[T]) -> list[T]: + if len(types) <= 1: + return types + # Get unique elements in order of appearance + all_types: set[T] = set() + new_types: list[T] = [] + for t in types: + if t not in all_types: + new_types.append(t) + all_types.add(t) + return new_types + + +def type_vars_as_args(type_vars: Sequence[TypeVarLikeType]) -> tuple[Type, ...]: + """Represent type variables as they would appear in a type argument list.""" + args: list[Type] = [] + for tv in type_vars: + if isinstance(tv, TypeVarTupleType): + args.append(UnpackType(tv)) + else: + args.append(tv) + return tuple(args) + + # This cyclic import is unfortunate, but to avoid it we would need to move away all uses # of get_proper_type() from types.py. Majority of them have been removed, but few remaining # are quite tricky to get rid of, but ultimately we want to do it at some point. diff --git a/mypy/types_utils.py b/mypy/types_utils.py index 43bca05d6bf9..f289ac3e9ed1 100644 --- a/mypy/types_utils.py +++ b/mypy/types_utils.py @@ -54,7 +54,7 @@ def strip_type(typ: Type) -> Type: def is_invalid_recursive_alias(seen_nodes: set[TypeAlias], target: Type) -> bool: - """Flag aliases like A = Union[int, A] (and similar mutual aliases). + """Flag aliases like A = Union[int, A], T = tuple[int, *T] (and similar mutual aliases). Such aliases don't make much sense, and cause problems in later phases. """ @@ -64,9 +64,15 @@ def is_invalid_recursive_alias(seen_nodes: set[TypeAlias], target: Type) -> bool assert target.alias, f"Unfixed type alias {target.type_ref}" return is_invalid_recursive_alias(seen_nodes | {target.alias}, get_proper_type(target)) assert isinstance(target, ProperType) - if not isinstance(target, UnionType): + if not isinstance(target, (UnionType, TupleType)): return False - return any(is_invalid_recursive_alias(seen_nodes, item) for item in target.items) + if isinstance(target, UnionType): + return any(is_invalid_recursive_alias(seen_nodes, item) for item in target.items) + for item in target.items: + if isinstance(item, UnpackType): + if is_invalid_recursive_alias(seen_nodes, item.type): + return True + return False def is_bad_type_type_item(item: Type) -> bool: @@ -101,10 +107,10 @@ def is_generic_instance(tp: Type) -> bool: return isinstance(tp, Instance) and bool(tp.args) -def is_optional(t: Type) -> bool: +def is_overlapping_none(t: Type) -> bool: t = get_proper_type(t) - return isinstance(t, UnionType) and any( - isinstance(get_proper_type(e), NoneType) for e in t.items + return isinstance(t, NoneType) or ( + isinstance(t, UnionType) and any(isinstance(get_proper_type(e), NoneType) for e in t.items) ) diff --git a/mypy/typeshed/stdlib/VERSIONS b/mypy/typeshed/stdlib/VERSIONS index 86e8da78677c..49433e346765 100644 --- a/mypy/typeshed/stdlib/VERSIONS +++ b/mypy/typeshed/stdlib/VERSIONS @@ -112,7 +112,7 @@ dbm: 2.7- decimal: 2.7- difflib: 2.7- dis: 2.7- -distutils: 2.7- +distutils: 2.7-3.11 distutils.command.bdist_msi: 2.7-3.10 distutils.command.bdist_wininst: 2.7-3.9 doctest: 2.7- @@ -147,7 +147,7 @@ html: 3.0- http: 3.0- imaplib: 2.7- imghdr: 2.7- -imp: 2.7- +imp: 2.7-3.11 importlib: 2.7- importlib.metadata: 3.8- importlib.metadata._meta: 3.10- diff --git a/mypy/typeshed/stdlib/_ast.pyi b/mypy/typeshed/stdlib/_ast.pyi index 7bc47266d713..05e2a08fdc88 100644 --- a/mypy/typeshed/stdlib/_ast.pyi +++ b/mypy/typeshed/stdlib/_ast.pyi @@ -1,13 +1,14 @@ import sys +import typing_extensions from typing import Any, ClassVar -from typing_extensions import Literal, TypeAlias +from typing_extensions import Literal PyCF_ONLY_AST: Literal[1024] if sys.version_info >= (3, 8): PyCF_TYPE_COMMENTS: Literal[4096] PyCF_ALLOW_TOP_LEVEL_AWAIT: Literal[8192] -_Identifier: TypeAlias = str +_Identifier: typing_extensions.TypeAlias = str class AST: if sys.version_info >= (3, 10): @@ -59,31 +60,43 @@ class Expression(mod): class stmt(AST): ... class FunctionDef(stmt): - if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + __match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment", "type_params") + elif sys.version_info >= (3, 10): __match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment") name: _Identifier args: arguments body: list[stmt] decorator_list: list[expr] returns: expr | None + if sys.version_info >= (3, 12): + type_params: list[type_param] class AsyncFunctionDef(stmt): - if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + __match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment", "type_params") + elif sys.version_info >= (3, 10): __match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment") name: _Identifier args: arguments body: list[stmt] decorator_list: list[expr] returns: expr | None + if sys.version_info >= (3, 12): + type_params: list[type_param] class ClassDef(stmt): - if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + __match_args__ = ("name", "bases", "keywords", "body", "decorator_list", "type_params") + elif sys.version_info >= (3, 10): __match_args__ = ("name", "bases", "keywords", "body", "decorator_list") name: _Identifier bases: list[expr] keywords: list[keyword] body: list[stmt] decorator_list: list[expr] + if sys.version_info >= (3, 12): + type_params: list[type_param] class Return(stmt): if sys.version_info >= (3, 10): @@ -366,10 +379,10 @@ class Attribute(expr): ctx: expr_context if sys.version_info >= (3, 9): - _Slice: TypeAlias = expr + _Slice: typing_extensions.TypeAlias = expr else: class slice(AST): ... - _Slice: TypeAlias = slice + _Slice: typing_extensions.TypeAlias = slice class Slice(_Slice): if sys.version_info >= (3, 10): @@ -526,7 +539,7 @@ if sys.version_info >= (3, 10): class pattern(AST): ... # Without the alias, Pyright complains variables named pattern are recursively defined - _Pattern: TypeAlias = pattern + _Pattern: typing_extensions.TypeAlias = pattern class match_case(AST): __match_args__ = ("pattern", "guard", "body") @@ -571,3 +584,25 @@ if sys.version_info >= (3, 10): class MatchOr(pattern): __match_args__ = ("patterns",) patterns: list[pattern] + +if sys.version_info >= (3, 12): + class type_param(AST): ... + + class TypeVar(type_param): + __match_args__ = ("name", "bound") + name: _Identifier + bound: expr | None + + class ParamSpec(type_param): + __match_args__ = ("name",) + name: _Identifier + + class TypeVarTuple(type_param): + __match_args__ = ("name",) + name: _Identifier + + class TypeAlias(stmt): + __match_args__ = ("name", "typeparams", "value") + name: Name + type_params: list[type_param] + value: expr diff --git a/mypy/typeshed/stdlib/_collections_abc.pyi b/mypy/typeshed/stdlib/_collections_abc.pyi index 05b5421c21f3..2b57f157a0e4 100644 --- a/mypy/typeshed/stdlib/_collections_abc.pyi +++ b/mypy/typeshed/stdlib/_collections_abc.pyi @@ -1,7 +1,7 @@ import sys from abc import abstractmethod from types import MappingProxyType -from typing import ( # noqa: Y022,Y038 +from typing import ( # noqa: Y022,Y038,Y057 AbstractSet as Set, AsyncGenerator as AsyncGenerator, AsyncIterable as AsyncIterable, @@ -69,6 +69,7 @@ _VT_co = TypeVar("_VT_co", covariant=True) # Value type covariant containers. @final class dict_keys(KeysView[_KT_co], Generic[_KT_co, _VT_co]): # undocumented + def __eq__(self, __value: object) -> bool: ... if sys.version_info >= (3, 10): @property def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ... @@ -81,6 +82,7 @@ class dict_values(ValuesView[_VT_co], Generic[_KT_co, _VT_co]): # undocumented @final class dict_items(ItemsView[_KT_co, _VT_co], Generic[_KT_co, _VT_co]): # undocumented + def __eq__(self, __value: object) -> bool: ... if sys.version_info >= (3, 10): @property def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ... diff --git a/mypy/typeshed/stdlib/_csv.pyi b/mypy/typeshed/stdlib/_csv.pyi index c9b9f47e6217..19ea487e1530 100644 --- a/mypy/typeshed/stdlib/_csv.pyi +++ b/mypy/typeshed/stdlib/_csv.pyi @@ -1,3 +1,4 @@ +import sys from _typeshed import SupportsWrite from collections.abc import Iterable, Iterator from typing import Any @@ -9,6 +10,9 @@ QUOTE_ALL: Literal[1] QUOTE_MINIMAL: Literal[0] QUOTE_NONE: Literal[3] QUOTE_NONNUMERIC: Literal[2] +if sys.version_info >= (3, 12): + QUOTE_STRINGS: Literal[4] + QUOTE_NOTNULL: Literal[5] # Ideally this would be `QUOTE_ALL | QUOTE_MINIMAL | QUOTE_NONE | QUOTE_NONNUMERIC` # However, using literals in situations like these can cause false-positives (see #7258) diff --git a/mypy/typeshed/stdlib/_ctypes.pyi b/mypy/typeshed/stdlib/_ctypes.pyi index 8e8bcbe84bd4..756ee86d3342 100644 --- a/mypy/typeshed/stdlib/_ctypes.pyi +++ b/mypy/typeshed/stdlib/_ctypes.pyi @@ -22,6 +22,9 @@ RTLD_LOCAL: int if sys.version_info >= (3, 11): CTYPES_MAX_ARGCOUNT: int +if sys.version_info >= (3, 12): + SIZEOF_TIME_T: int + if sys.platform == "win32": # Description, Source, HelpFile, HelpContext, scode _COMError_Details: TypeAlias = tuple[str | None, str | None, str | None, int | None, int | None] diff --git a/mypy/typeshed/stdlib/_decimal.pyi b/mypy/typeshed/stdlib/_decimal.pyi index 60c609456954..9a90760bd2c2 100644 --- a/mypy/typeshed/stdlib/_decimal.pyi +++ b/mypy/typeshed/stdlib/_decimal.pyi @@ -73,6 +73,7 @@ class Decimal: def from_float(cls, __f: float) -> Self: ... def __bool__(self) -> bool: ... def compare(self, other: _Decimal, context: Context | None = None) -> Decimal: ... + def __hash__(self) -> int: ... def as_tuple(self) -> DecimalTuple: ... def as_integer_ratio(self) -> tuple[int, int]: ... def to_eng_string(self, context: Context | None = None) -> str: ... diff --git a/mypy/typeshed/stdlib/_socket.pyi b/mypy/typeshed/stdlib/_socket.pyi index f7b0e6901bf4..7a0ede62838c 100644 --- a/mypy/typeshed/stdlib/_socket.pyi +++ b/mypy/typeshed/stdlib/_socket.pyi @@ -692,3 +692,28 @@ if sys.platform != "win32" or sys.version_info >= (3, 8): def if_nameindex() -> list[tuple[int, str]]: ... def if_nametoindex(__name: str) -> int: ... def if_indextoname(__index: int) -> str: ... + +if sys.version_info >= (3, 12): + IP_PKTINFO: int + IP_UNBLOCK_SOURCE: int + IP_BLOCK_SOURCE: int + IP_ADD_SOURCE_MEMBERSHIP: int + IP_DROP_SOURCE_MEMBERSHIP: int + if sys.platform == "win32": + AF_HYPERV: int + HV_PROTOCOL_RAW: int + HVSOCKET_CONNECT_TIMEOUT: int + HVSOCKET_CONNECT_TIMEOUT_MAX: int + HVSOCKET_CONNECTED_SUSPEND: int + HVSOCKET_ADDRESS_FLAG_PASSTHRU: int + HV_GUID_ZERO: str + HV_GUID_WILDCARD: str + HV_GUID_BROADCAST: str + HV_GUID_CHILDREN: str + HV_GUID_LOOPBACK: str + HV_GUID_PARENT: str + else: + ETHERTYPE_ARP: int + ETHERTYPE_IP: int + ETHERTYPE_IPV6: int + ETHERTYPE_VLAN: int diff --git a/mypy/typeshed/stdlib/_thread.pyi b/mypy/typeshed/stdlib/_thread.pyi index 152362edcaea..dba8664fbf13 100644 --- a/mypy/typeshed/stdlib/_thread.pyi +++ b/mypy/typeshed/stdlib/_thread.pyi @@ -43,3 +43,6 @@ if sys.version_info >= (3, 8): @property def thread(self) -> Thread | None: ... _excepthook: Callable[[_ExceptHookArgs], Any] + +if sys.version_info >= (3, 12): + def daemon_threads_allowed() -> bool: ... diff --git a/mypy/typeshed/stdlib/_typeshed/__init__.pyi b/mypy/typeshed/stdlib/_typeshed/__init__.pyi index 5d03142c6d71..7ae67292e8cd 100644 --- a/mypy/typeshed/stdlib/_typeshed/__init__.pyi +++ b/mypy/typeshed/stdlib/_typeshed/__init__.pyi @@ -36,6 +36,19 @@ Incomplete: TypeAlias = Any # To describe a function parameter that is unused and will work with anything. Unused: TypeAlias = object +# Used to mark arguments that default to a sentinel value. This prevents +# stubtest from complaining about the default value not matching. +# +# def foo(x: int | None = sentinel) -> None: ... +# +# In cases where the sentinel object is exported and can be used by user code, +# a construct like this is better: +# +# _SentinelType = NewType("_SentinelType", object) +# sentinel: _SentinelType +# def foo(x: int | None | _SentinelType = ...) -> None: ... +sentinel = Any # noqa: Y026 + # stable class IdentityFunction(Protocol): def __call__(self, __x: _T) -> _T: ... diff --git a/mypy/typeshed/stdlib/_weakref.pyi b/mypy/typeshed/stdlib/_weakref.pyi index b6044fac4628..ce0f681248ab 100644 --- a/mypy/typeshed/stdlib/_weakref.pyi +++ b/mypy/typeshed/stdlib/_weakref.pyi @@ -11,17 +11,21 @@ _T = TypeVar("_T") @final class CallableProxyType(Generic[_C]): # "weakcallableproxy" + def __eq__(self, __value: object) -> bool: ... def __getattr__(self, attr: str) -> Any: ... __call__: _C @final class ProxyType(Generic[_T]): # "weakproxy" + def __eq__(self, __value: object) -> bool: ... def __getattr__(self, attr: str) -> Any: ... class ReferenceType(Generic[_T]): __callback__: Callable[[ReferenceType[_T]], Any] def __new__(cls, __o: _T, __callback: Callable[[ReferenceType[_T]], Any] | None = ...) -> Self: ... def __call__(self) -> _T | None: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... diff --git a/mypy/typeshed/stdlib/_winapi.pyi b/mypy/typeshed/stdlib/_winapi.pyi index ca1e61f0f19f..b51d844701ac 100644 --- a/mypy/typeshed/stdlib/_winapi.pyi +++ b/mypy/typeshed/stdlib/_winapi.pyi @@ -137,6 +137,34 @@ if sys.platform == "win32": LCMAP_TRADITIONAL_CHINESE: int LCMAP_UPPERCASE: int + if sys.version_info >= (3, 12): + COPYFILE2_CALLBACK_CHUNK_STARTED: Literal[1] + COPYFILE2_CALLBACK_CHUNK_FINISHED: Literal[2] + COPYFILE2_CALLBACK_STREAM_STARTED: Literal[3] + COPYFILE2_CALLBACK_STREAM_FINISHED: Literal[4] + COPYFILE2_CALLBACK_POLL_CONTINUE: Literal[5] + COPYFILE2_CALLBACK_ERROR: Literal[6] + + COPYFILE2_PROGRESS_CONTINUE: Literal[0] + COPYFILE2_PROGRESS_CANCEL: Literal[1] + COPYFILE2_PROGRESS_STOP: Literal[2] + COPYFILE2_PROGRESS_QUIET: Literal[3] + COPYFILE2_PROGRESS_PAUSE: Literal[4] + + COPY_FILE_FAIL_IF_EXISTS: Literal[0x1] + COPY_FILE_RESTARTABLE: Literal[0x2] + COPY_FILE_OPEN_SOURCE_FOR_WRITE: Literal[0x4] + COPY_FILE_ALLOW_DECRYPTED_DESTINATION: Literal[0x8] + COPY_FILE_COPY_SYMLINK: Literal[0x800] + COPY_FILE_NO_BUFFERING: Literal[0x1000] + COPY_FILE_REQUEST_SECURITY_PRIVILEGES: Literal[0x2000] + COPY_FILE_RESUME_FROM_PAUSE: Literal[0x4000] + COPY_FILE_NO_OFFLOAD: Literal[0x40000] + COPY_FILE_REQUEST_COMPRESSED_TRAFFIC: Literal[0x10000000] + + ERROR_ACCESS_DENIED: Literal[5] + ERROR_PRIVILEGE_NOT_HELD: Literal[1314] + def CloseHandle(__handle: int) -> None: ... @overload def ConnectNamedPipe(handle: int, overlapped: Literal[True]) -> Overlapped: ... @@ -224,3 +252,6 @@ if sys.platform == "win32": def GetOverlappedResult(self, __wait: bool) -> tuple[int, int]: ... def cancel(self) -> None: ... def getbuffer(self) -> bytes | None: ... + + if sys.version_info >= (3, 12): + def CopyFile2(existing_file_name: str, new_file_name: str, flags: int, progress_routine: int | None = None) -> int: ... diff --git a/mypy/typeshed/stdlib/abc.pyi b/mypy/typeshed/stdlib/abc.pyi index ec04d8f85d12..43893a298341 100644 --- a/mypy/typeshed/stdlib/abc.pyi +++ b/mypy/typeshed/stdlib/abc.pyi @@ -31,7 +31,7 @@ def abstractmethod(funcobj: _FuncT) -> _FuncT: ... class abstractclassmethod(classmethod[_T, _P, _R_co]): __isabstractmethod__: Literal[True] - def __init__(self, callable: Callable[Concatenate[_T, _P], _R_co]) -> None: ... + def __init__(self, callable: Callable[Concatenate[type[_T], _P], _R_co]) -> None: ... class abstractstaticmethod(staticmethod[_P, _R_co]): __isabstractmethod__: Literal[True] diff --git a/mypy/typeshed/stdlib/argparse.pyi b/mypy/typeshed/stdlib/argparse.pyi index c986b9cdb082..0004250b17a9 100644 --- a/mypy/typeshed/stdlib/argparse.pyi +++ b/mypy/typeshed/stdlib/argparse.pyi @@ -1,4 +1,5 @@ import sys +from _typeshed import sentinel from collections.abc import Callable, Generator, Iterable, Sequence from re import Pattern from typing import IO, Any, Generic, NewType, NoReturn, Protocol, TypeVar, overload @@ -85,7 +86,7 @@ class _ActionsContainer: self, *name_or_flags: str, action: _ActionStr | type[Action] = ..., - nargs: int | _NArgsStr | _SUPPRESS_T = ..., + nargs: int | _NArgsStr | _SUPPRESS_T | None = None, const: Any = ..., default: Any = ..., type: Callable[[str], _T] | FileType = ..., @@ -97,8 +98,16 @@ class _ActionsContainer: version: str = ..., **kwargs: Any, ) -> Action: ... - def add_argument_group(self, *args: Any, **kwargs: Any) -> _ArgumentGroup: ... - def add_mutually_exclusive_group(self, **kwargs: Any) -> _MutuallyExclusiveGroup: ... + def add_argument_group( + self, + title: str | None = None, + description: str | None = None, + *, + prefix_chars: str = ..., + argument_default: Any = ..., + conflict_handler: str = ..., + ) -> _ArgumentGroup: ... + def add_mutually_exclusive_group(self, *, required: bool = False) -> _MutuallyExclusiveGroup: ... def _add_action(self, action: _ActionT) -> _ActionT: ... def _remove_action(self, action: Action) -> None: ... def _add_container_actions(self, container: _ActionsContainer) -> None: ... @@ -161,7 +170,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): add_help: bool = True, allow_abbrev: bool = True, ) -> None: ... - # Ignore errors about overlapping overloads + @overload def parse_args(self, args: Sequence[str] | None = None, namespace: None = None) -> Namespace: ... # type: ignore[misc] @overload @@ -201,16 +210,27 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): def print_help(self, file: IO[str] | None = None) -> None: ... def format_usage(self) -> str: ... def format_help(self) -> str: ... - def parse_known_args( - self, args: Sequence[str] | None = None, namespace: Namespace | None = None - ) -> tuple[Namespace, list[str]]: ... + @overload + def parse_known_args(self, args: Sequence[str] | None = None, namespace: None = None) -> tuple[Namespace, list[str]]: ... # type: ignore[misc] + @overload + def parse_known_args(self, args: Sequence[str] | None, namespace: _N) -> tuple[_N, list[str]]: ... + @overload + def parse_known_args(self, *, namespace: _N) -> tuple[_N, list[str]]: ... def convert_arg_line_to_args(self, arg_line: str) -> list[str]: ... def exit(self, status: int = 0, message: str | None = None) -> NoReturn: ... def error(self, message: str) -> NoReturn: ... - def parse_intermixed_args(self, args: Sequence[str] | None = None, namespace: Namespace | None = None) -> Namespace: ... - def parse_known_intermixed_args( - self, args: Sequence[str] | None = None, namespace: Namespace | None = None - ) -> tuple[Namespace, list[str]]: ... + @overload + def parse_intermixed_args(self, args: Sequence[str] | None = None, namespace: None = None) -> Namespace: ... # type: ignore[misc] + @overload + def parse_intermixed_args(self, args: Sequence[str] | None, namespace: _N) -> _N: ... + @overload + def parse_intermixed_args(self, *, namespace: _N) -> _N: ... + @overload + def parse_known_intermixed_args(self, args: Sequence[str] | None = None, namespace: None = None) -> tuple[Namespace, list[str]]: ... # type: ignore[misc] + @overload + def parse_known_intermixed_args(self, args: Sequence[str] | None, namespace: _N) -> tuple[_N, list[str]]: ... + @overload + def parse_known_intermixed_args(self, *, namespace: _N) -> tuple[_N, list[str]]: ... # undocumented def _get_optional_actions(self) -> list[Action]: ... def _get_positional_actions(self) -> list[Action]: ... @@ -315,7 +335,21 @@ class Action(_AttributeHolder): if sys.version_info >= (3, 9): def format_usage(self) -> str: ... -if sys.version_info >= (3, 9): +if sys.version_info >= (3, 12): + class BooleanOptionalAction(Action): + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: _T | str | None = None, + type: Callable[[str], _T] | FileType | None = sentinel, # noqa: Y011 + choices: Iterable[_T] | None = sentinel, # noqa: Y011 + required: bool = False, + help: str | None = None, + metavar: str | tuple[str, ...] | None = sentinel, # noqa: Y011 + ) -> None: ... + +elif sys.version_info >= (3, 9): class BooleanOptionalAction(Action): def __init__( self, @@ -350,7 +384,14 @@ class _ArgumentGroup(_ActionsContainer): title: str | None _group_actions: list[Action] def __init__( - self, container: _ActionsContainer, title: str | None = None, description: str | None = None, **kwargs: Any + self, + container: _ActionsContainer, + title: str | None = None, + description: str | None = None, + *, + prefix_chars: str = ..., + argument_default: Any = ..., + conflict_handler: str = ..., ) -> None: ... # undocumented diff --git a/mypy/typeshed/stdlib/array.pyi b/mypy/typeshed/stdlib/array.pyi index 8b003503bc9b..b533f9240073 100644 --- a/mypy/typeshed/stdlib/array.pyi +++ b/mypy/typeshed/stdlib/array.pyi @@ -6,6 +6,9 @@ from collections.abc import Iterable from typing import Any, Generic, MutableSequence, TypeVar, overload # noqa: Y022 from typing_extensions import Literal, Self, SupportsIndex, TypeAlias +if sys.version_info >= (3, 12): + from types import GenericAlias + _IntTypeCode: TypeAlias = Literal["b", "B", "h", "H", "i", "I", "l", "L", "q", "Q"] _FloatTypeCode: TypeAlias = Literal["f", "d"] _UnicodeTypeCode: TypeAlias = Literal["u"] @@ -70,6 +73,7 @@ class array(MutableSequence[_T], Generic[_T]): def __setitem__(self, __key: slice, __value: array[_T]) -> None: ... def __delitem__(self, __key: SupportsIndex | slice) -> None: ... def __add__(self, __value: array[_T]) -> array[_T]: ... + def __eq__(self, __value: object) -> bool: ... def __ge__(self, __value: array[_T]) -> bool: ... def __gt__(self, __value: array[_T]) -> bool: ... def __iadd__(self, __value: array[_T]) -> Self: ... # type: ignore[override] @@ -82,5 +86,7 @@ class array(MutableSequence[_T], Generic[_T]): def __deepcopy__(self, __unused: Any) -> array[_T]: ... def __buffer__(self, __flags: int) -> memoryview: ... def __release_buffer__(self, __buffer: memoryview) -> None: ... + if sys.version_info >= (3, 12): + def __class_getitem__(cls, item: Any) -> GenericAlias: ... ArrayType = array diff --git a/mypy/typeshed/stdlib/ast.pyi b/mypy/typeshed/stdlib/ast.pyi index ea899e150f97..377138141340 100644 --- a/mypy/typeshed/stdlib/ast.pyi +++ b/mypy/typeshed/stdlib/ast.pyi @@ -3,7 +3,7 @@ import sys from _ast import * from _typeshed import ReadableBuffer, Unused from collections.abc import Iterator -from typing import Any, TypeVar, overload +from typing import Any, TypeVar as _TypeVar, overload from typing_extensions import Literal if sys.version_info >= (3, 8): @@ -168,7 +168,7 @@ class NodeTransformer(NodeVisitor): # The usual return type is AST | None, but Iterable[AST] # is also allowed in some cases -- this needs to be mapped. -_T = TypeVar("_T", bound=AST) +_T = _TypeVar("_T", bound=AST) if sys.version_info >= (3, 8): @overload diff --git a/mypy/typeshed/stdlib/asyncio/__init__.pyi b/mypy/typeshed/stdlib/asyncio/__init__.pyi index 2ce066cac982..c11465184389 100644 --- a/mypy/typeshed/stdlib/asyncio/__init__.pyi +++ b/mypy/typeshed/stdlib/asyncio/__init__.pyi @@ -36,8 +36,8 @@ _T = TypeVar("_T") # Aliases imported by multiple submodules in typeshed if sys.version_info >= (3, 12): - _AwaitableLike: TypeAlias = Awaitable[_T] - _CoroutineLike: TypeAlias = Coroutine[Any, Any, _T] + _AwaitableLike: TypeAlias = Awaitable[_T] # noqa: Y047 + _CoroutineLike: TypeAlias = Coroutine[Any, Any, _T] # noqa: Y047 else: _AwaitableLike: TypeAlias = Generator[Any, None, _T] | Awaitable[_T] _CoroutineLike: TypeAlias = Generator[Any, None, _T] | Coroutine[Any, Any, _T] diff --git a/mypy/typeshed/stdlib/asyncio/base_events.pyi b/mypy/typeshed/stdlib/asyncio/base_events.pyi index fd765fdb0614..e2b55da8c718 100644 --- a/mypy/typeshed/stdlib/asyncio/base_events.pyi +++ b/mypy/typeshed/stdlib/asyncio/base_events.pyi @@ -107,7 +107,48 @@ class BaseEventLoop(AbstractEventLoop): flags: int = 0, ) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int] | tuple[str, int, int, int]]]: ... async def getnameinfo(self, sockaddr: tuple[str, int] | tuple[str, int, int, int], flags: int = 0) -> tuple[str, str]: ... - if sys.version_info >= (3, 11): + if sys.version_info >= (3, 12): + @overload + async def create_connection( + self, + protocol_factory: Callable[[], _ProtocolT], + host: str = ..., + port: int = ..., + *, + ssl: _SSLContext = None, + family: int = 0, + proto: int = 0, + flags: int = 0, + sock: None = None, + local_addr: tuple[str, int] | None = None, + server_hostname: str | None = None, + ssl_handshake_timeout: float | None = None, + ssl_shutdown_timeout: float | None = None, + happy_eyeballs_delay: float | None = None, + interleave: int | None = None, + all_errors: bool = False, + ) -> tuple[Transport, _ProtocolT]: ... + @overload + async def create_connection( + self, + protocol_factory: Callable[[], _ProtocolT], + host: None = None, + port: None = None, + *, + ssl: _SSLContext = None, + family: int = 0, + proto: int = 0, + flags: int = 0, + sock: socket, + local_addr: None = None, + server_hostname: str | None = None, + ssl_handshake_timeout: float | None = None, + ssl_shutdown_timeout: float | None = None, + happy_eyeballs_delay: float | None = None, + interleave: int | None = None, + all_errors: bool = False, + ) -> tuple[Transport, _ProtocolT]: ... + elif sys.version_info >= (3, 11): @overload async def create_connection( self, @@ -263,7 +304,7 @@ class BaseEventLoop(AbstractEventLoop): server_hostname: str | None = None, ssl_handshake_timeout: float | None = None, ssl_shutdown_timeout: float | None = None, - ) -> Transport: ... + ) -> Transport | None: ... async def connect_accepted_socket( self, protocol_factory: Callable[[], _ProtocolT], @@ -317,7 +358,7 @@ class BaseEventLoop(AbstractEventLoop): server_side: bool = False, server_hostname: str | None = None, ssl_handshake_timeout: float | None = None, - ) -> Transport: ... + ) -> Transport | None: ... async def connect_accepted_socket( self, protocol_factory: Callable[[], _ProtocolT], @@ -426,5 +467,7 @@ class BaseEventLoop(AbstractEventLoop): # Debug flag management. def get_debug(self) -> bool: ... def set_debug(self, enabled: bool) -> None: ... - if sys.version_info >= (3, 9): + if sys.version_info >= (3, 12): + async def shutdown_default_executor(self, timeout: float | None = None) -> None: ... + elif sys.version_info >= (3, 9): async def shutdown_default_executor(self) -> None: ... diff --git a/mypy/typeshed/stdlib/asyncio/constants.pyi b/mypy/typeshed/stdlib/asyncio/constants.pyi index af209fa9ee62..60d8529209c2 100644 --- a/mypy/typeshed/stdlib/asyncio/constants.pyi +++ b/mypy/typeshed/stdlib/asyncio/constants.pyi @@ -11,6 +11,8 @@ if sys.version_info >= (3, 11): SSL_SHUTDOWN_TIMEOUT: float FLOW_CONTROL_HIGH_WATER_SSL_READ: Literal[256] FLOW_CONTROL_HIGH_WATER_SSL_WRITE: Literal[512] +if sys.version_info >= (3, 12): + THREAD_JOIN_TIMEOUT: Literal[300] class _SendfileMode(enum.Enum): UNSUPPORTED: int diff --git a/mypy/typeshed/stdlib/asyncio/events.pyi b/mypy/typeshed/stdlib/asyncio/events.pyi index 11112bb2e87d..cde63b279b0d 100644 --- a/mypy/typeshed/stdlib/asyncio/events.pyi +++ b/mypy/typeshed/stdlib/asyncio/events.pyi @@ -76,6 +76,8 @@ class Handle: def cancel(self) -> None: ... def _run(self) -> None: ... def cancelled(self) -> bool: ... + if sys.version_info >= (3, 12): + def get_context(self) -> Context: ... class TimerHandle(Handle): def __init__( @@ -86,6 +88,7 @@ class TimerHandle(Handle): loop: AbstractEventLoop, context: Context | None = None, ) -> None: ... + def __hash__(self) -> int: ... def when(self) -> float: ... def __lt__(self, other: TimerHandle) -> bool: ... def __le__(self, other: TimerHandle) -> bool: ... @@ -358,7 +361,7 @@ class AbstractEventLoop: server_hostname: str | None = None, ssl_handshake_timeout: float | None = None, ssl_shutdown_timeout: float | None = None, - ) -> Transport: ... + ) -> Transport | None: ... async def create_unix_server( self, protocol_factory: _ProtocolFactory, @@ -418,7 +421,7 @@ class AbstractEventLoop: server_side: bool = False, server_hostname: str | None = None, ssl_handshake_timeout: float | None = None, - ) -> Transport: ... + ) -> Transport | None: ... async def create_unix_server( self, protocol_factory: _ProtocolFactory, diff --git a/mypy/typeshed/stdlib/asyncio/futures.pyi b/mypy/typeshed/stdlib/asyncio/futures.pyi index 79209f5ed4fb..af05425d02a2 100644 --- a/mypy/typeshed/stdlib/asyncio/futures.pyi +++ b/mypy/typeshed/stdlib/asyncio/futures.pyi @@ -31,7 +31,7 @@ def isfuture(obj: object) -> TypeGuard[Future[Any]]: ... class Future(Awaitable[_T], Iterable[_T]): _state: str @property - def _exception(self) -> BaseException: ... + def _exception(self) -> BaseException | None: ... _blocking: bool @property def _log_traceback(self) -> bool: ... diff --git a/mypy/typeshed/stdlib/asyncio/streams.pyi b/mypy/typeshed/stdlib/asyncio/streams.pyi index f30c57305d93..804be1ca5065 100644 --- a/mypy/typeshed/stdlib/asyncio/streams.pyi +++ b/mypy/typeshed/stdlib/asyncio/streams.pyi @@ -148,7 +148,16 @@ class StreamWriter: async def wait_closed(self) -> None: ... def get_extra_info(self, name: str, default: Any = None) -> Any: ... async def drain(self) -> None: ... - if sys.version_info >= (3, 11): + if sys.version_info >= (3, 12): + async def start_tls( + self, + sslcontext: ssl.SSLContext, + *, + server_hostname: str | None = None, + ssl_handshake_timeout: float | None = None, + ssl_shutdown_timeout: float | None = None, + ) -> None: ... + elif sys.version_info >= (3, 11): async def start_tls( self, sslcontext: ssl.SSLContext, *, server_hostname: str | None = None, ssl_handshake_timeout: float | None = None ) -> None: ... diff --git a/mypy/typeshed/stdlib/asyncio/taskgroups.pyi b/mypy/typeshed/stdlib/asyncio/taskgroups.pyi index 08ea8f66559c..47d9bb2f699e 100644 --- a/mypy/typeshed/stdlib/asyncio/taskgroups.pyi +++ b/mypy/typeshed/stdlib/asyncio/taskgroups.pyi @@ -1,5 +1,4 @@ -# This only exists in 3.11+. See VERSIONS. - +import sys from contextvars import Context from types import TracebackType from typing import TypeVar @@ -8,7 +7,10 @@ from typing_extensions import Self from . import _CoroutineLike from .tasks import Task -__all__ = ["TaskGroup"] +if sys.version_info >= (3, 12): + __all__ = ("TaskGroup",) +else: + __all__ = ["TaskGroup"] _T = TypeVar("_T") diff --git a/mypy/typeshed/stdlib/asyncio/tasks.pyi b/mypy/typeshed/stdlib/asyncio/tasks.pyi index d8c101f281fc..5ea30d3791de 100644 --- a/mypy/typeshed/stdlib/asyncio/tasks.pyi +++ b/mypy/typeshed/stdlib/asyncio/tasks.pyi @@ -285,7 +285,26 @@ else: # since the only reason why `asyncio.Future` is invariant is the `set_result()` method, # and `asyncio.Task.set_result()` always raises. class Task(Future[_T_co], Generic[_T_co]): # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues] - if sys.version_info >= (3, 8): + if sys.version_info >= (3, 12): + def __init__( + self, + coro: _TaskCompatibleCoro[_T_co], + *, + loop: AbstractEventLoop = ..., + name: str | None, + context: Context | None = None, + eager_start: bool = False, + ) -> None: ... + elif sys.version_info >= (3, 11): + def __init__( + self, + coro: _TaskCompatibleCoro[_T_co], + *, + loop: AbstractEventLoop = ..., + name: str | None, + context: Context | None = None, + ) -> None: ... + elif sys.version_info >= (3, 8): def __init__( self, coro: _TaskCompatibleCoro[_T_co], *, loop: AbstractEventLoop = ..., name: str | None = ... ) -> None: ... @@ -295,6 +314,8 @@ class Task(Future[_T_co], Generic[_T_co]): # type: ignore[type-var] # pyright: def get_coro(self) -> _TaskCompatibleCoro[_T_co]: ... def get_name(self) -> str: ... def set_name(self, __value: object) -> None: ... + if sys.version_info >= (3, 12): + def get_context(self) -> Context: ... def get_stack(self, *, limit: int | None = None) -> list[FrameType]: ... def print_stack(self, *, limit: int | None = None, file: TextIO | None = None) -> None: ... diff --git a/mypy/typeshed/stdlib/builtins.pyi b/mypy/typeshed/stdlib/builtins.pyi index 0676aba1277e..66c644d09a4d 100644 --- a/mypy/typeshed/stdlib/builtins.pyi +++ b/mypy/typeshed/stdlib/builtins.pyi @@ -53,7 +53,17 @@ from typing import ( # noqa: Y022 overload, type_check_only, ) -from typing_extensions import Concatenate, Literal, ParamSpec, Self, SupportsIndex, TypeAlias, TypeGuard, final +from typing_extensions import ( + Concatenate, + Literal, + ParamSpec, + Self, + SupportsIndex, + TypeAlias, + TypeGuard, + TypeVarTuple, + final, +) if sys.version_info >= (3, 9): from types import GenericAlias @@ -121,6 +131,9 @@ class staticmethod(Generic[_P, _R_co]): @property def __isabstractmethod__(self) -> bool: ... def __init__(self, __f: Callable[_P, _R_co]) -> None: ... + @overload + def __get__(self, __instance: None, __owner: type) -> Callable[_P, _R_co]: ... + @overload def __get__(self, __instance: _T, __owner: type[_T] | None = None) -> Callable[_P, _R_co]: ... if sys.version_info >= (3, 10): __name__: str @@ -131,16 +144,19 @@ class staticmethod(Generic[_P, _R_co]): class classmethod(Generic[_T, _P, _R_co]): @property - def __func__(self) -> Callable[Concatenate[_T, _P], _R_co]: ... + def __func__(self) -> Callable[Concatenate[type[_T], _P], _R_co]: ... @property def __isabstractmethod__(self) -> bool: ... - def __init__(self, __f: Callable[Concatenate[_T, _P], _R_co]) -> None: ... + def __init__(self, __f: Callable[Concatenate[type[_T], _P], _R_co]) -> None: ... + @overload def __get__(self, __instance: _T, __owner: type[_T] | None = None) -> Callable[_P, _R_co]: ... + @overload + def __get__(self, __instance: None, __owner: type[_T]) -> Callable[_P, _R_co]: ... if sys.version_info >= (3, 10): __name__: str __qualname__: str @property - def __wrapped__(self) -> Callable[Concatenate[_T, _P], _R_co]: ... + def __wrapped__(self) -> Callable[Concatenate[type[_T], _P], _R_co]: ... class type: @property @@ -187,6 +203,8 @@ class type: if sys.version_info >= (3, 10): def __or__(self, __value: Any) -> types.UnionType: ... def __ror__(self, __value: Any) -> types.UnionType: ... + if sys.version_info >= (3, 12): + __type_params__: tuple[TypeVar | ParamSpec | TypeVarTuple, ...] class super: @overload @@ -244,6 +262,9 @@ class int: signed: bool = False, ) -> Self: ... + if sys.version_info >= (3, 12): + def is_integer(self) -> Literal[True]: ... + def __add__(self, __value: int) -> int: ... def __sub__(self, __value: int) -> int: ... def __mul__(self, __value: int) -> int: ... @@ -300,6 +321,7 @@ class int: def __float__(self) -> float: ... def __int__(self) -> int: ... def __abs__(self) -> int: ... + def __hash__(self) -> int: ... def __bool__(self) -> bool: ... def __index__(self) -> int: ... @@ -363,6 +385,7 @@ class float: def __int__(self) -> int: ... def __float__(self) -> float: ... def __abs__(self) -> float: ... + def __hash__(self) -> int: ... def __bool__(self) -> bool: ... class complex: @@ -402,6 +425,7 @@ class complex: def __neg__(self) -> complex: ... def __pos__(self) -> complex: ... def __abs__(self) -> float: ... + def __hash__(self) -> int: ... def __bool__(self) -> bool: ... if sys.version_info >= (3, 11): def __complex__(self) -> complex: ... @@ -489,6 +513,7 @@ class str(Sequence[str]): def __ge__(self, __value: str) -> bool: ... def __getitem__(self, __key: SupportsIndex | slice) -> str: ... def __gt__(self, __value: str) -> bool: ... + def __hash__(self) -> int: ... def __iter__(self) -> Iterator[str]: ... # type: ignore[misc] def __le__(self, __value: str) -> bool: ... def __len__(self) -> int: ... @@ -582,6 +607,7 @@ class bytes(Sequence[int]): def maketrans(__frm: ReadableBuffer, __to: ReadableBuffer) -> bytes: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[int]: ... + def __hash__(self) -> int: ... @overload def __getitem__(self, __key: SupportsIndex) -> int: ... @overload @@ -761,6 +787,8 @@ class memoryview(Sequence[int]): def __contains__(self, __x: object) -> bool: ... def __iter__(self) -> Iterator[int]: ... def __len__(self) -> int: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... @overload def __setitem__(self, __key: slice, __value: ReadableBuffer) -> None: ... @overload @@ -828,6 +856,7 @@ class slice: def __init__(self, __stop: Any) -> None: ... @overload def __init__(self, __start: Any, __stop: Any, __step: Any = ...) -> None: ... + def __eq__(self, __value: object) -> bool: ... __hash__: ClassVar[None] # type: ignore[assignment] def indices(self, __len: SupportsIndex) -> tuple[int, int, int]: ... @@ -844,6 +873,8 @@ class tuple(Sequence[_T_co], Generic[_T_co]): def __le__(self, __value: tuple[_T_co, ...]) -> bool: ... def __gt__(self, __value: tuple[_T_co, ...]) -> bool: ... def __ge__(self, __value: tuple[_T_co, ...]) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... @overload def __add__(self, __value: tuple[_T_co, ...]) -> tuple[_T_co, ...]: ... @overload @@ -874,6 +905,8 @@ class function: if sys.version_info >= (3, 10): @property def __builtins__(self) -> dict[str, Any]: ... + if sys.version_info >= (3, 12): + __type_params__: tuple[TypeVar | ParamSpec | TypeVarTuple, ...] __module__: str # mypy uses `builtins.function.__get__` to represent methods, properties, and getset_descriptors so we type the return as Any. @@ -930,6 +963,7 @@ class list(MutableSequence[_T], Generic[_T]): def __ge__(self, __value: list[_T]) -> bool: ... def __lt__(self, __value: list[_T]) -> bool: ... def __le__(self, __value: list[_T]) -> bool: ... + def __eq__(self, __value: object) -> bool: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @@ -969,25 +1003,36 @@ class dict(MutableMapping[_KT, _VT], Generic[_KT, _VT]): @overload def fromkeys(cls, __iterable: Iterable[_T], __value: _S) -> dict[_T, _S]: ... # Positional-only in dict, but not in MutableMapping - @overload + @overload # type: ignore[override] def get(self, __key: _KT) -> _VT | None: ... @overload - def get(self, __key: _KT, __default: _VT | _T) -> _VT | _T: ... + def get(self, __key: _KT, __default: _VT) -> _VT: ... + @overload + def get(self, __key: _KT, __default: _T) -> _VT | _T: ... @overload def pop(self, __key: _KT) -> _VT: ... @overload - def pop(self, __key: _KT, __default: _VT | _T) -> _VT | _T: ... + def pop(self, __key: _KT, __default: _VT) -> _VT: ... + @overload + def pop(self, __key: _KT, __default: _T) -> _VT | _T: ... def __len__(self) -> int: ... def __getitem__(self, __key: _KT) -> _VT: ... def __setitem__(self, __key: _KT, __value: _VT) -> None: ... def __delitem__(self, __key: _KT) -> None: ... def __iter__(self) -> Iterator[_KT]: ... + def __eq__(self, __value: object) -> bool: ... if sys.version_info >= (3, 8): def __reversed__(self) -> Iterator[_KT]: ... __hash__: ClassVar[None] # type: ignore[assignment] if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... + @overload + def __or__(self, __value: Mapping[_KT, _VT]) -> dict[_KT, _VT]: ... + @overload def __or__(self, __value: Mapping[_T1, _T2]) -> dict[_KT | _T1, _VT | _T2]: ... + @overload + def __ror__(self, __value: Mapping[_KT, _VT]) -> dict[_KT, _VT]: ... + @overload def __ror__(self, __value: Mapping[_T1, _T2]) -> dict[_KT | _T1, _VT | _T2]: ... # dict.__ior__ should be kept roughly in line with MutableMapping.update() @overload # type: ignore[misc] @@ -1030,6 +1075,7 @@ class set(MutableSet[_T], Generic[_T]): def __lt__(self, __value: AbstractSet[object]) -> bool: ... def __ge__(self, __value: AbstractSet[object]) -> bool: ... def __gt__(self, __value: AbstractSet[object]) -> bool: ... + def __eq__(self, __value: object) -> bool: ... __hash__: ClassVar[None] # type: ignore[assignment] if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @@ -1058,6 +1104,8 @@ class frozenset(AbstractSet[_T_co], Generic[_T_co]): def __lt__(self, __value: AbstractSet[object]) -> bool: ... def __ge__(self, __value: AbstractSet[object]) -> bool: ... def __gt__(self, __value: AbstractSet[object]) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @@ -1083,6 +1131,8 @@ class range(Sequence[int]): def count(self, __value: int) -> int: ... def index(self, __value: int) -> int: ... # type: ignore[override] def __len__(self) -> int: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... def __contains__(self, __key: object) -> bool: ... def __iter__(self) -> Iterator[int]: ... @overload @@ -1826,6 +1876,8 @@ class ImportError(Exception): name: str | None path: str | None msg: str # undocumented + if sys.version_info >= (3, 12): + name_from: str | None # undocumented class LookupError(Exception): ... class MemoryError(Exception): ... diff --git a/mypy/typeshed/stdlib/calendar.pyi b/mypy/typeshed/stdlib/calendar.pyi index 255a12d3348a..3f881393e99f 100644 --- a/mypy/typeshed/stdlib/calendar.pyi +++ b/mypy/typeshed/stdlib/calendar.pyi @@ -1,4 +1,5 @@ import datetime +import enum import sys from _typeshed import Unused from collections.abc import Iterable, Sequence @@ -35,6 +36,23 @@ __all__ = [ if sys.version_info >= (3, 10): __all__ += ["FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"] +if sys.version_info >= (3, 12): + __all__ += [ + "Day", + "Month", + "JANUARY", + "FEBRUARY", + "MARCH", + "APRIL", + "MAY", + "JUNE", + "JULY", + "AUGUST", + "SEPTEMBER", + "OCTOBER", + "NOVEMBER", + "DECEMBER", + ] _LocaleType: TypeAlias = tuple[str | None, str | None] @@ -134,12 +152,55 @@ day_abbr: Sequence[str] month_name: Sequence[str] month_abbr: Sequence[str] -MONDAY: Literal[0] -TUESDAY: Literal[1] -WEDNESDAY: Literal[2] -THURSDAY: Literal[3] -FRIDAY: Literal[4] -SATURDAY: Literal[5] -SUNDAY: Literal[6] +if sys.version_info >= (3, 12): + class Month(enum.IntEnum): + JANUARY: Literal[1] + FEBRUARY: Literal[2] + MARCH: Literal[3] + APRIL: Literal[4] + MAY: Literal[5] + JUNE: Literal[6] + JULY: Literal[7] + AUGUST: Literal[8] + SEPTEMBER: Literal[9] + OCTOBER: Literal[10] + NOVEMBER: Literal[11] + DECEMBER: Literal[12] + JANUARY = Month.JANUARY + FEBRUARY = Month.FEBRUARY + MARCH = Month.MARCH + APRIL = Month.APRIL + MAY = Month.MAY + JUNE = Month.JUNE + JULY = Month.JULY + AUGUST = Month.AUGUST + SEPTEMBER = Month.SEPTEMBER + OCTOBER = Month.OCTOBER + NOVEMBER = Month.NOVEMBER + DECEMBER = Month.DECEMBER + + class Day(enum.IntEnum): + MONDAY: Literal[0] + TUESDAY: Literal[1] + WEDNESDAY: Literal[2] + THURSDAY: Literal[3] + FRIDAY: Literal[4] + SATURDAY: Literal[5] + SUNDAY: Literal[6] + MONDAY = Day.MONDAY + TUESDAY = Day.TUESDAY + WEDNESDAY = Day.WEDNESDAY + THURSDAY = Day.THURSDAY + FRIDAY = Day.FRIDAY + SATURDAY = Day.SATURDAY + SUNDAY = Day.SUNDAY +else: + MONDAY: Literal[0] + TUESDAY: Literal[1] + WEDNESDAY: Literal[2] + THURSDAY: Literal[3] + FRIDAY: Literal[4] + SATURDAY: Literal[5] + SUNDAY: Literal[6] EPOCH: Literal[1970] diff --git a/mypy/typeshed/stdlib/codecs.pyi b/mypy/typeshed/stdlib/codecs.pyi index 3f6d2d3d16b7..c9b6a4a82da6 100644 --- a/mypy/typeshed/stdlib/codecs.pyi +++ b/mypy/typeshed/stdlib/codecs.pyi @@ -96,6 +96,7 @@ class _IncrementalDecoder(Protocol): def __call__(self, errors: str = ...) -> IncrementalDecoder: ... class CodecInfo(tuple[_Encoder, _Decoder, _StreamReader, _StreamWriter]): + _is_text_encoding: bool @property def encode(self) -> _Encoder: ... @property diff --git a/mypy/typeshed/stdlib/collections/__init__.pyi b/mypy/typeshed/stdlib/collections/__init__.pyi index d5ca17c749eb..8ceecd1f354e 100644 --- a/mypy/typeshed/stdlib/collections/__init__.pyi +++ b/mypy/typeshed/stdlib/collections/__init__.pyi @@ -1,6 +1,6 @@ import sys from _collections_abc import dict_items, dict_keys, dict_values -from _typeshed import SupportsKeysAndGetItem, SupportsRichComparison, SupportsRichComparisonT +from _typeshed import SupportsItems, SupportsKeysAndGetItem, SupportsRichComparison, SupportsRichComparisonT from typing import Any, Generic, NoReturn, TypeVar, overload from typing_extensions import Self, SupportsIndex, final @@ -83,8 +83,14 @@ class UserDict(MutableMapping[_KT, _VT], Generic[_KT, _VT]): @overload def fromkeys(cls, iterable: Iterable[_T], value: _S) -> UserDict[_T, _S]: ... if sys.version_info >= (3, 9): + @overload + def __or__(self, other: UserDict[_KT, _VT] | dict[_KT, _VT]) -> Self: ... + @overload def __or__(self, other: UserDict[_T1, _T2] | dict[_T1, _T2]) -> UserDict[_KT | _T1, _VT | _T2]: ... - def __ror__(self, other: UserDict[_T1, _T2] | dict[_T1, _T2]) -> UserDict[_KT | _T1, _VT | _T2]: ... # type: ignore[misc] + @overload # type: ignore[misc] + def __ror__(self, other: UserDict[_KT, _VT] | dict[_KT, _VT]) -> Self: ... + @overload # type: ignore[misc] + def __ror__(self, other: UserDict[_T1, _T2] | dict[_T1, _T2]) -> UserDict[_KT | _T1, _VT | _T2]: ... # UserDict.__ior__ should be kept roughly in line with MutableMapping.update() @overload # type: ignore[misc] def __ior__(self, other: SupportsKeysAndGetItem[_KT, _VT]) -> Self: ... @@ -147,6 +153,7 @@ class UserString(Sequence[UserString]): def __gt__(self, string: str | UserString) -> bool: ... def __ge__(self, string: str | UserString) -> bool: ... def __eq__(self, string: object) -> bool: ... + def __hash__(self) -> int: ... def __contains__(self, char: object) -> bool: ... def __len__(self) -> int: ... def __getitem__(self, index: SupportsIndex | slice) -> Self: ... @@ -251,6 +258,7 @@ class deque(MutableSequence[_T], Generic[_T]): def __le__(self, __value: deque[_T]) -> bool: ... def __gt__(self, __value: deque[_T]) -> bool: ... def __ge__(self, __value: deque[_T]) -> bool: ... + def __eq__(self, __value: object) -> bool: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @@ -299,10 +307,10 @@ class Counter(dict[_T, int], Generic[_T]): def __pos__(self) -> Counter[_T]: ... def __neg__(self) -> Counter[_T]: ... # several type: ignores because __iadd__ is supposedly incompatible with __add__, etc. - def __iadd__(self, other: Counter[_T]) -> Self: ... # type: ignore[misc] - def __isub__(self, other: Counter[_T]) -> Self: ... - def __iand__(self, other: Counter[_T]) -> Self: ... - def __ior__(self, other: Counter[_T]) -> Self: ... # type: ignore[override,misc] + def __iadd__(self, other: SupportsItems[_T, int]) -> Self: ... # type: ignore[misc] + def __isub__(self, other: SupportsItems[_T, int]) -> Self: ... + def __iand__(self, other: SupportsItems[_T, int]) -> Self: ... + def __ior__(self, other: SupportsItems[_T, int]) -> Self: ... # type: ignore[override,misc] if sys.version_info >= (3, 10): def total(self) -> int: ... def __le__(self, other: Counter[Any]) -> bool: ... @@ -359,6 +367,7 @@ class OrderedDict(dict[_KT, _VT], Reversible[_KT], Generic[_KT, _VT]): def setdefault(self: OrderedDict[_KT, _T | None], key: _KT, default: None = None) -> _T | None: ... @overload def setdefault(self, key: _KT, default: _VT) -> _VT: ... + def __eq__(self, __value: object) -> bool: ... class defaultdict(dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] | None @@ -391,6 +400,15 @@ class defaultdict(dict[_KT, _VT], Generic[_KT, _VT]): def __missing__(self, __key: _KT) -> _VT: ... def __copy__(self) -> Self: ... def copy(self) -> Self: ... + if sys.version_info >= (3, 9): + @overload + def __or__(self, __value: Mapping[_KT, _VT]) -> Self: ... + @overload + def __or__(self, __value: Mapping[_T1, _T2]) -> defaultdict[_KT | _T1, _VT | _T2]: ... + @overload + def __ror__(self, __value: Mapping[_KT, _VT]) -> Self: ... + @overload + def __ror__(self, __value: Mapping[_T1, _T2]) -> defaultdict[_KT | _T1, _VT | _T2]: ... class ChainMap(MutableMapping[_KT, _VT], Generic[_KT, _VT]): maps: list[MutableMapping[_KT, _VT]] @@ -414,7 +432,9 @@ class ChainMap(MutableMapping[_KT, _VT], Generic[_KT, _VT]): @overload def pop(self, key: _KT) -> _VT: ... @overload - def pop(self, key: _KT, default: _VT | _T) -> _VT | _T: ... + def pop(self, key: _KT, default: _VT) -> _VT: ... + @overload + def pop(self, key: _KT, default: _T) -> _VT | _T: ... def copy(self) -> Self: ... __copy__ = copy # All arguments to `fromkeys` are passed to `dict.fromkeys` at runtime, so the signature should be kept in line with `dict.fromkeys`. @@ -425,7 +445,13 @@ class ChainMap(MutableMapping[_KT, _VT], Generic[_KT, _VT]): @overload def fromkeys(cls, __iterable: Iterable[_T], __value: _S) -> ChainMap[_T, _S]: ... if sys.version_info >= (3, 9): + @overload + def __or__(self, other: Mapping[_KT, _VT]) -> Self: ... + @overload def __or__(self, other: Mapping[_T1, _T2]) -> ChainMap[_KT | _T1, _VT | _T2]: ... + @overload + def __ror__(self, other: Mapping[_KT, _VT]) -> Self: ... + @overload def __ror__(self, other: Mapping[_T1, _T2]) -> ChainMap[_KT | _T1, _VT | _T2]: ... # ChainMap.__ior__ should be kept roughly in line with MutableMapping.update() @overload # type: ignore[misc] diff --git a/mypy/typeshed/stdlib/contextvars.pyi b/mypy/typeshed/stdlib/contextvars.pyi index ef6e2700e667..63b5f80aea6c 100644 --- a/mypy/typeshed/stdlib/contextvars.pyi +++ b/mypy/typeshed/stdlib/contextvars.pyi @@ -18,16 +18,21 @@ class ContextVar(Generic[_T]): def __init__(self, name: str) -> None: ... @overload def __init__(self, name: str, *, default: _T) -> None: ... + def __hash__(self) -> int: ... @property def name(self) -> str: ... @overload def get(self) -> _T: ... if sys.version_info >= (3, 8): @overload - def get(self, default: _D | _T) -> _D | _T: ... + def get(self, default: _T) -> _T: ... + @overload + def get(self, default: _D) -> _D | _T: ... else: @overload - def get(self, __default: _D | _T) -> _D | _T: ... + def get(self, __default: _T) -> _T: ... + @overload + def get(self, __default: _D) -> _D | _T: ... def set(self, __value: _T) -> Token[_T]: ... def reset(self, __token: Token[_T]) -> None: ... @@ -52,7 +57,9 @@ def copy_context() -> Context: ... class Context(Mapping[ContextVar[Any], Any]): def __init__(self) -> None: ... @overload - def get(self, __key: ContextVar[_T]) -> _T | None: ... + def get(self, __key: ContextVar[_T], __default: None = None) -> _T | None: ... # type: ignore[misc] # overlapping overloads + @overload + def get(self, __key: ContextVar[_T], __default: _T) -> _T: ... @overload def get(self, __key: ContextVar[_T], __default: _D) -> _T | _D: ... def run(self, callable: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs) -> _T: ... @@ -60,3 +67,4 @@ class Context(Mapping[ContextVar[Any], Any]): def __getitem__(self, __key: ContextVar[_T]) -> _T: ... def __iter__(self) -> Iterator[ContextVar[Any]]: ... def __len__(self) -> int: ... + def __eq__(self, __value: object) -> bool: ... diff --git a/mypy/typeshed/stdlib/csv.pyi b/mypy/typeshed/stdlib/csv.pyi index 59f2e7a3c96b..139ba7af2208 100644 --- a/mypy/typeshed/stdlib/csv.pyi +++ b/mypy/typeshed/stdlib/csv.pyi @@ -21,6 +21,9 @@ from _csv import ( unregister_dialect as unregister_dialect, writer as writer, ) + +if sys.version_info >= (3, 12): + from _csv import QUOTE_STRINGS as QUOTE_STRINGS, QUOTE_NOTNULL as QUOTE_NOTNULL from _typeshed import SupportsWrite from collections.abc import Collection, Iterable, Iterator, Mapping, Sequence from typing import Any, Generic, TypeVar, overload @@ -57,6 +60,8 @@ __all__ = [ "DictWriter", "unix_dialect", ] +if sys.version_info >= (3, 12): + __all__ += ["QUOTE_STRINGS", "QUOTE_NOTNULL"] _T = TypeVar("_T") diff --git a/mypy/typeshed/stdlib/ctypes/__init__.pyi b/mypy/typeshed/stdlib/ctypes/__init__.pyi index 7a185a5b523e..b14fb93c8163 100644 --- a/mypy/typeshed/stdlib/ctypes/__init__.pyi +++ b/mypy/typeshed/stdlib/ctypes/__init__.pyi @@ -181,6 +181,9 @@ class c_bool(_SimpleCData[bool]): if sys.platform == "win32": class HRESULT(_SimpleCData[int]): ... # TODO undocumented +if sys.version_info >= (3, 12): + c_time_t: type[c_int32 | c_int64] + class py_object(_CanCastTo, _SimpleCData[_T]): ... class BigEndianStructure(Structure): ... class LittleEndianStructure(Structure): ... diff --git a/mypy/typeshed/stdlib/datetime.pyi b/mypy/typeshed/stdlib/datetime.pyi index 2bb2264c97b1..36577c5b7e1b 100644 --- a/mypy/typeshed/stdlib/datetime.pyi +++ b/mypy/typeshed/stdlib/datetime.pyi @@ -35,6 +35,8 @@ class timezone(tzinfo): def tzname(self, __dt: datetime | None) -> str: ... def utcoffset(self, __dt: datetime | None) -> timedelta: ... def dst(self, __dt: datetime | None) -> None: ... + def __hash__(self) -> int: ... + def __eq__(self, __value: object) -> bool: ... if sys.version_info >= (3, 11): UTC: timezone @@ -86,6 +88,7 @@ class date: def __lt__(self, __value: date) -> bool: ... def __ge__(self, __value: date) -> bool: ... def __gt__(self, __value: date) -> bool: ... + def __eq__(self, __value: object) -> bool: ... if sys.version_info >= (3, 8): def __add__(self, __value: timedelta) -> Self: ... def __radd__(self, __value: timedelta) -> Self: ... @@ -106,6 +109,7 @@ class date: @overload def __sub__(self, __value: date) -> timedelta: ... + def __hash__(self) -> int: ... def weekday(self) -> int: ... def isoweekday(self) -> int: ... if sys.version_info >= (3, 9): @@ -143,6 +147,8 @@ class time: def __lt__(self, __value: time) -> bool: ... def __ge__(self, __value: time) -> bool: ... def __gt__(self, __value: time) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... def isoformat(self, timespec: str = ...) -> str: ... @classmethod def fromisoformat(cls, __time_string: str) -> Self: ... @@ -216,7 +222,9 @@ class timedelta: def __lt__(self, __value: timedelta) -> bool: ... def __ge__(self, __value: timedelta) -> bool: ... def __gt__(self, __value: timedelta) -> bool: ... + def __eq__(self, __value: object) -> bool: ... def __bool__(self) -> bool: ... + def __hash__(self) -> int: ... class datetime(date): min: ClassVar[datetime] @@ -306,6 +314,8 @@ class datetime(date): def __lt__(self, __value: datetime) -> bool: ... # type: ignore[override] def __ge__(self, __value: datetime) -> bool: ... # type: ignore[override] def __gt__(self, __value: datetime) -> bool: ... # type: ignore[override] + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... if sys.version_info >= (3, 8): @overload # type: ignore[override] def __sub__(self, __value: timedelta) -> Self: ... diff --git a/mypy/typeshed/stdlib/dis.pyi b/mypy/typeshed/stdlib/dis.pyi index d153771e676b..ab101a517a6f 100644 --- a/mypy/typeshed/stdlib/dis.pyi +++ b/mypy/typeshed/stdlib/dis.pyi @@ -29,9 +29,12 @@ __all__ = [ "opmap", "HAVE_ARGUMENT", "EXTENDED_ARG", - "hasnargs", "stack_effect", ] +if sys.version_info >= (3, 12): + __all__ += ["hasarg", "hasexc"] +else: + __all__ += ["hasnargs"] # Strictly this should not have to include Callable, but mypy doesn't use FunctionType # for functions (python/mypy#3171) diff --git a/mypy/typeshed/stdlib/distutils/__init__.pyi b/mypy/typeshed/stdlib/distutils/__init__.pyi index e69de29bb2d1..328a5b783441 100644 --- a/mypy/typeshed/stdlib/distutils/__init__.pyi +++ b/mypy/typeshed/stdlib/distutils/__init__.pyi @@ -0,0 +1,5 @@ +# Attempts to improve these stubs are probably not the best use of time: +# - distutils is deleted in Python 3.12 and newer +# - Most users already do not use stdlib distutils, due to setuptools monkeypatching +# - We have very little quality assurance on these stubs, since due to the two above issues +# we allowlist all distutils errors in stubtest. diff --git a/mypy/typeshed/stdlib/doctest.pyi b/mypy/typeshed/stdlib/doctest.pyi index 88d066fdc23c..f3c05781ad92 100644 --- a/mypy/typeshed/stdlib/doctest.pyi +++ b/mypy/typeshed/stdlib/doctest.pyi @@ -85,6 +85,7 @@ class Example: indent: int = 0, options: dict[int, bool] | None = None, ) -> None: ... + def __hash__(self) -> int: ... def __eq__(self, other: object) -> bool: ... class DocTest: @@ -103,6 +104,7 @@ class DocTest: lineno: int | None, docstring: str | None, ) -> None: ... + def __hash__(self) -> int: ... def __lt__(self, other: DocTest) -> bool: ... def __eq__(self, other: object) -> bool: ... @@ -210,6 +212,7 @@ class DocTestCase(unittest.TestCase): ) -> None: ... def runTest(self) -> None: ... def format_failure(self, err: str) -> str: ... + def __hash__(self) -> int: ... def __eq__(self, other: object) -> bool: ... class SkipDocTestCase(DocTestCase): diff --git a/mypy/typeshed/stdlib/email/charset.pyi b/mypy/typeshed/stdlib/email/charset.pyi index e612847c75b6..f8de016ab8bf 100644 --- a/mypy/typeshed/stdlib/email/charset.pyi +++ b/mypy/typeshed/stdlib/email/charset.pyi @@ -1,4 +1,6 @@ -from collections.abc import Iterator +from collections.abc import Callable, Iterator +from email.message import Message +from typing import overload __all__ = ["Charset", "add_alias", "add_charset", "add_codec"] @@ -14,11 +16,14 @@ class Charset: input_codec: str | None output_codec: str | None def __init__(self, input_charset: str = "us-ascii") -> None: ... - def get_body_encoding(self) -> str: ... + def get_body_encoding(self) -> str | Callable[[Message], None]: ... def get_output_charset(self) -> str | None: ... def header_encode(self, string: str) -> str: ... - def header_encode_lines(self, string: str, maxlengths: Iterator[int]) -> list[str]: ... - def body_encode(self, string: str) -> str: ... + def header_encode_lines(self, string: str, maxlengths: Iterator[int]) -> list[str | None]: ... + @overload + def body_encode(self, string: None) -> None: ... + @overload + def body_encode(self, string: str | bytes) -> str: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, __value: object) -> bool: ... diff --git a/mypy/typeshed/stdlib/email/generator.pyi b/mypy/typeshed/stdlib/email/generator.pyi index 8362dd9c4ff6..faa6551fc925 100644 --- a/mypy/typeshed/stdlib/email/generator.pyi +++ b/mypy/typeshed/stdlib/email/generator.pyi @@ -1,11 +1,12 @@ from _typeshed import SupportsWrite from email.message import Message from email.policy import Policy +from typing_extensions import Self __all__ = ["Generator", "DecodedGenerator", "BytesGenerator"] class Generator: - def clone(self, fp: SupportsWrite[str]) -> Generator: ... + def clone(self, fp: SupportsWrite[str]) -> Self: ... def write(self, s: str) -> None: ... def __init__( self, @@ -17,9 +18,7 @@ class Generator: ) -> None: ... def flatten(self, msg: Message, unixfrom: bool = False, linesep: str | None = None) -> None: ... -class BytesGenerator: - def clone(self, fp: SupportsWrite[bytes]) -> BytesGenerator: ... - def write(self, s: str) -> None: ... +class BytesGenerator(Generator): def __init__( self, outfp: SupportsWrite[bytes], @@ -28,7 +27,6 @@ class BytesGenerator: *, policy: Policy | None = None, ) -> None: ... - def flatten(self, msg: Message, unixfrom: bool = False, linesep: str | None = None) -> None: ... class DecodedGenerator(Generator): def __init__( diff --git a/mypy/typeshed/stdlib/email/policy.pyi b/mypy/typeshed/stdlib/email/policy.pyi index 4df3c1e48b07..804044031fcd 100644 --- a/mypy/typeshed/stdlib/email/policy.pyi +++ b/mypy/typeshed/stdlib/email/policy.pyi @@ -5,6 +5,7 @@ from email.errors import MessageDefect from email.header import Header from email.message import Message from typing import Any +from typing_extensions import Self __all__ = ["Compat32", "compat32", "Policy", "EmailPolicy", "default", "strict", "SMTP", "HTTP"] @@ -25,7 +26,7 @@ class Policy(metaclass=ABCMeta): mangle_from_: bool = ..., message_factory: Callable[[Policy], Message] | None = ..., ) -> None: ... - def clone(self, **kw: Any) -> Policy: ... + def clone(self, **kw: Any) -> Self: ... def handle_defect(self, obj: Message, defect: MessageDefect) -> None: ... def register_defect(self, obj: Message, defect: MessageDefect) -> None: ... def header_max_count(self, name: str) -> int | None: ... @@ -52,7 +53,7 @@ compat32: Compat32 class EmailPolicy(Policy): utf8: bool refold_source: str - header_factory: Callable[[str, str], str] + header_factory: Callable[[str, Any], Any] content_manager: ContentManager def __init__( self, @@ -69,9 +70,9 @@ class EmailPolicy(Policy): content_manager: ContentManager = ..., ) -> None: ... def header_source_parse(self, sourcelines: list[str]) -> tuple[str, str]: ... - def header_store_parse(self, name: str, value: str) -> tuple[str, str]: ... - def header_fetch_parse(self, name: str, value: str) -> str: ... - def fold(self, name: str, value: str) -> str: ... + def header_store_parse(self, name: str, value: Any) -> tuple[str, Any]: ... + def header_fetch_parse(self, name: str, value: str) -> Any: ... + def fold(self, name: str, value: str) -> Any: ... def fold_binary(self, name: str, value: str) -> bytes: ... default: EmailPolicy diff --git a/mypy/typeshed/stdlib/email/utils.pyi b/mypy/typeshed/stdlib/email/utils.pyi index 090ddf9e31bc..186e768050be 100644 --- a/mypy/typeshed/stdlib/email/utils.pyi +++ b/mypy/typeshed/stdlib/email/utils.pyi @@ -1,5 +1,6 @@ import datetime import sys +from _typeshed import Unused from email import _ParamType from email.charset import Charset from typing import overload @@ -51,9 +52,15 @@ else: def mktime_tz(data: _PDTZ) -> int: ... def formatdate(timeval: float | None = None, localtime: bool = False, usegmt: bool = False) -> str: ... def format_datetime(dt: datetime.datetime, usegmt: bool = False) -> str: ... -def localtime(dt: datetime.datetime | None = None, isdst: int = -1) -> datetime.datetime: ... + +if sys.version_info >= (3, 12): + def localtime(dt: datetime.datetime | None = None, isdst: Unused = None) -> datetime.datetime: ... + +else: + def localtime(dt: datetime.datetime | None = None, isdst: int = -1) -> datetime.datetime: ... + def make_msgid(idstring: str | None = None, domain: str | None = None) -> str: ... -def decode_rfc2231(s: str) -> tuple[str | None, str | None, str]: ... +def decode_rfc2231(s: str) -> tuple[str | None, str | None, str]: ... # May return list[str]. See issue #10431 for details. def encode_rfc2231(s: str, charset: str | None = None, language: str | None = None) -> str: ... def collapse_rfc2231_value(value: _ParamType, errors: str = "replace", fallback_charset: str = "us-ascii") -> str: ... def decode_params(params: list[tuple[str, str]]) -> list[tuple[str, _ParamType]]: ... diff --git a/mypy/typeshed/stdlib/enum.pyi b/mypy/typeshed/stdlib/enum.pyi index 383c336ed2c7..a8ba7bf157c2 100644 --- a/mypy/typeshed/stdlib/enum.pyi +++ b/mypy/typeshed/stdlib/enum.pyi @@ -2,9 +2,8 @@ import _typeshed import sys import types from _typeshed import SupportsKeysAndGetItem, Unused -from abc import ABCMeta from builtins import property as _builtins_property -from collections.abc import Iterable, Iterator, Mapping +from collections.abc import Callable, Iterable, Iterator, Mapping from typing import Any, Generic, TypeVar, overload from typing_extensions import Literal, Self, TypeAlias @@ -34,6 +33,9 @@ if sys.version_info >= (3, 11): "verify", ] +if sys.version_info >= (3, 12): + __all__ += ["pickle_by_enum_name", "pickle_by_global_name"] + _EnumMemberT = TypeVar("_EnumMemberT") _EnumerationT = TypeVar("_EnumerationT", bound=type[Enum]) @@ -73,12 +75,8 @@ class _EnumDict(dict[str, Any]): @overload def update(self, members: Iterable[tuple[str, Any]], **more_members: Any) -> None: ... -# Note: EnumMeta actually subclasses type directly, not ABCMeta. -# This is a temporary workaround to allow multiple creation of enums with builtins -# such as str as mixins, which due to the handling of ABCs of builtin types, cause -# spurious inconsistent metaclass structure. See #1595. # Structurally: Iterable[T], Reversible[T], Container[T] where T is the enum itself -class EnumMeta(ABCMeta): +class EnumMeta(type): if sys.version_info >= (3, 11): def __new__( metacls: type[_typeshed.Self], @@ -187,8 +185,12 @@ class Enum(metaclass=EnumMeta): # and in practice using `object` here has the same effect as using `Any`. def __new__(cls, value: object) -> Self: ... def __dir__(self) -> list[str]: ... + def __hash__(self) -> int: ... def __format__(self, format_spec: str) -> str: ... def __reduce_ex__(self, proto: Unused) -> tuple[Any, ...]: ... + if sys.version_info >= (3, 12): + def __copy__(self) -> Self: ... + def __deepcopy__(self, memo: Any) -> Self: ... if sys.version_info >= (3, 11): class ReprEnum(Enum): ... @@ -234,6 +236,8 @@ if sys.version_info >= (3, 11): _value_: str @_magic_enum_attr def value(self) -> str: ... + @staticmethod + def _generate_next_value_(name: str, start: int, count: int, last_values: list[str]) -> str: ... class EnumCheck(StrEnum): CONTINUOUS: str @@ -289,3 +293,7 @@ class auto(IntFlag): @_magic_enum_attr def value(self) -> Any: ... def __new__(cls) -> Self: ... + +if sys.version_info >= (3, 12): + def pickle_by_global_name(self: Enum, proto: int) -> str: ... + def pickle_by_enum_name(self: _EnumMemberT, proto: int) -> tuple[Callable[..., Any], tuple[type[_EnumMemberT], str]]: ... diff --git a/mypy/typeshed/stdlib/errno.pyi b/mypy/typeshed/stdlib/errno.pyi index 28874d44ff5f..84d2b44a6a61 100644 --- a/mypy/typeshed/stdlib/errno.pyi +++ b/mypy/typeshed/stdlib/errno.pyi @@ -91,9 +91,15 @@ ECANCELED: int # undocumented ENOTRECOVERABLE: int # undocumented EOWNERDEAD: int # undocumented +if sys.platform == "sunos5" or sys.platform == "solaris": # noqa: Y008 + ELOCKUNMAPPED: int + ENOTACTIVE: int + if sys.platform != "win32": ENOTBLK: int EMULTIHOP: int + +if sys.platform == "darwin": # All of the below are undocumented EAUTH: int EBADARCH: int @@ -112,9 +118,8 @@ if sys.platform != "win32": EPWROFF: int ERPCMISMATCH: int ESHLIBVERS: int - - if sys.platform != "darwin" or sys.version_info >= (3, 11): - EQFULL: int # undocumented + if sys.version_info >= (3, 11): + EQFULL: int if sys.platform != "darwin": EDEADLOCK: int @@ -164,9 +169,6 @@ if sys.platform != "win32" and sys.platform != "darwin": ENOKEY: int ENOMEDIUM: int ERFKILL: int - EL: int - ELOCKUNMAPPED: int - ENOTACTIVE: int if sys.platform == "win32": # All of these are undocumented diff --git a/mypy/typeshed/stdlib/fcntl.pyi b/mypy/typeshed/stdlib/fcntl.pyi index 01443083f48d..6aec7515f330 100644 --- a/mypy/typeshed/stdlib/fcntl.pyi +++ b/mypy/typeshed/stdlib/fcntl.pyi @@ -20,6 +20,9 @@ if sys.platform != "win32": F_SETOWN: int F_UNLCK: int F_WRLCK: int + + F_GETLEASE: int + F_SETLEASE: int if sys.platform == "darwin": F_FULLFSYNC: int F_NOCACHE: int @@ -30,11 +33,9 @@ if sys.platform != "win32": F_SETSIG: int F_SHLCK: int F_SETLK64: int - F_SETLEASE: int F_GETSIG: int F_NOTIFY: int F_EXLCK: int - F_GETLEASE: int F_GETLK64: int if sys.version_info >= (3, 8): F_ADD_SEALS: int diff --git a/mypy/typeshed/stdlib/ftplib.pyi b/mypy/typeshed/stdlib/ftplib.pyi index 36a213d48680..2d2ffa9aff03 100644 --- a/mypy/typeshed/stdlib/ftplib.pyi +++ b/mypy/typeshed/stdlib/ftplib.pyi @@ -87,7 +87,7 @@ class FTP: def makepasv(self) -> tuple[str, int]: ... def login(self, user: str = "", passwd: str = "", acct: str = "") -> str: ... # In practice, `rest` rest can actually be anything whose str() is an integer sequence, so to make it simple we allow integers. - def ntransfercmd(self, cmd: str, rest: int | str | None = None) -> tuple[socket, int]: ... + def ntransfercmd(self, cmd: str, rest: int | str | None = None) -> tuple[socket, int | None]: ... def transfercmd(self, cmd: str, rest: int | str | None = None) -> socket: ... def retrbinary( self, cmd: str, callback: Callable[[bytes], object], blocksize: int = 8192, rest: int | str | None = None @@ -118,7 +118,20 @@ class FTP: def close(self) -> None: ... class FTP_TLS(FTP): - if sys.version_info >= (3, 9): + if sys.version_info >= (3, 12): + def __init__( + self, + host: str = "", + user: str = "", + passwd: str = "", + acct: str = "", + *, + context: SSLContext | None = None, + timeout: float = ..., + source_address: tuple[str, int] | None = None, + encoding: str = "utf-8", + ) -> None: ... + elif sys.version_info >= (3, 9): def __init__( self, host: str = "", diff --git a/mypy/typeshed/stdlib/functools.pyi b/mypy/typeshed/stdlib/functools.pyi index d01fd8ce55cb..1b4e59b7c120 100644 --- a/mypy/typeshed/stdlib/functools.pyi +++ b/mypy/typeshed/stdlib/functools.pyi @@ -70,22 +70,47 @@ if sys.version_info >= (3, 8): else: def lru_cache(maxsize: int | None = 128, typed: bool = False) -> Callable[[Callable[..., _T]], _lru_cache_wrapper[_T]]: ... -WRAPPER_ASSIGNMENTS: tuple[ - Literal["__module__"], Literal["__name__"], Literal["__qualname__"], Literal["__doc__"], Literal["__annotations__"] -] +if sys.version_info >= (3, 12): + WRAPPER_ASSIGNMENTS: tuple[ + Literal["__module__"], + Literal["__name__"], + Literal["__qualname__"], + Literal["__doc__"], + Literal["__annotations__"], + Literal["__type_params__"], + ] +else: + WRAPPER_ASSIGNMENTS: tuple[ + Literal["__module__"], Literal["__name__"], Literal["__qualname__"], Literal["__doc__"], Literal["__annotations__"] + ] WRAPPER_UPDATES: tuple[Literal["__dict__"]] -def update_wrapper( - wrapper: _T, - wrapped: _AnyCallable, - assigned: Sequence[str] = ("__module__", "__name__", "__qualname__", "__doc__", "__annotations__"), - updated: Sequence[str] = ("__dict__",), -) -> _T: ... -def wraps( - wrapped: _AnyCallable, - assigned: Sequence[str] = ("__module__", "__name__", "__qualname__", "__doc__", "__annotations__"), - updated: Sequence[str] = ("__dict__",), -) -> IdentityFunction: ... +if sys.version_info >= (3, 12): + def update_wrapper( + wrapper: _T, + wrapped: _AnyCallable, + assigned: Sequence[str] = ("__module__", "__name__", "__qualname__", "__doc__", "__annotations__", "__type_params__"), + updated: Sequence[str] = ("__dict__",), + ) -> _T: ... + def wraps( + wrapped: _AnyCallable, + assigned: Sequence[str] = ("__module__", "__name__", "__qualname__", "__doc__", "__annotations__", "__type_params__"), + updated: Sequence[str] = ("__dict__",), + ) -> IdentityFunction: ... + +else: + def update_wrapper( + wrapper: _T, + wrapped: _AnyCallable, + assigned: Sequence[str] = ("__module__", "__name__", "__qualname__", "__doc__", "__annotations__"), + updated: Sequence[str] = ("__dict__",), + ) -> _T: ... + def wraps( + wrapped: _AnyCallable, + assigned: Sequence[str] = ("__module__", "__name__", "__qualname__", "__doc__", "__annotations__"), + updated: Sequence[str] = ("__dict__",), + ) -> IdentityFunction: ... + def total_ordering(cls: type[_T]) -> type[_T]: ... def cmp_to_key(mycmp: Callable[[_T, _T], int]) -> Callable[[_T], SupportsAllComparisons]: ... diff --git a/mypy/typeshed/stdlib/http/__init__.pyi b/mypy/typeshed/stdlib/http/__init__.pyi index d4b44f2eb99b..4310c79b9269 100644 --- a/mypy/typeshed/stdlib/http/__init__.pyi +++ b/mypy/typeshed/stdlib/http/__init__.pyi @@ -79,6 +79,17 @@ class HTTPStatus(IntEnum): EARLY_HINTS: Literal[103] IM_A_TEAPOT: Literal[418] TOO_EARLY: Literal[425] + if sys.version_info >= (3, 12): + @property + def is_informational(self) -> bool: ... + @property + def is_success(self) -> bool: ... + @property + def is_redirection(self) -> bool: ... + @property + def is_client_error(self) -> bool: ... + @property + def is_server_error(self) -> bool: ... if sys.version_info >= (3, 11): class HTTPMethod(StrEnum): diff --git a/mypy/typeshed/stdlib/http/client.pyi b/mypy/typeshed/stdlib/http/client.pyi index 9c7c0c1c4a12..4b5ed3d8bda0 100644 --- a/mypy/typeshed/stdlib/http/client.pyi +++ b/mypy/typeshed/stdlib/http/client.pyi @@ -1,6 +1,7 @@ import email.message import io import ssl +import sys import types from _typeshed import ReadableBuffer, SupportsRead, WriteableBuffer from collections.abc import Callable, Iterable, Iterator, Mapping @@ -114,6 +115,10 @@ class HTTPResponse(io.BufferedIOBase, BinaryIO): # type: ignore[misc] # incomp chunk_left: int | None length: int | None will_close: bool + # url is set on instances of the class in urllib.request.AbstractHTTPHandler.do_open + # to match urllib.response.addinfourl's interface. + # It's not set in HTTPResponse.__init__ or any other method on the class + url: str def __init__(self, sock: socket, debuglevel: int = 0, method: str | None = None, url: str | None = None) -> None: ... def peek(self, n: int = -1) -> bytes: ... def read(self, amt: int | None = None) -> bytes: ... @@ -175,19 +180,31 @@ class HTTPConnection: class HTTPSConnection(HTTPConnection): # Can be `None` if `.connect()` was not called: sock: ssl.SSLSocket | Any - def __init__( - self, - host: str, - port: int | None = None, - key_file: str | None = None, - cert_file: str | None = None, - timeout: float | None = ..., - source_address: tuple[str, int] | None = None, - *, - context: ssl.SSLContext | None = None, - check_hostname: bool | None = None, - blocksize: int = 8192, - ) -> None: ... + if sys.version_info >= (3, 12): + def __init__( + self, + host: str, + port: str | None = None, + *, + timeout: float | None = ..., + source_address: tuple[str, int] | None = None, + context: ssl.SSLContext | None = None, + blocksize: int = 8192, + ) -> None: ... + else: + def __init__( + self, + host: str, + port: int | None = None, + key_file: str | None = None, + cert_file: str | None = None, + timeout: float | None = ..., + source_address: tuple[str, int] | None = None, + *, + context: ssl.SSLContext | None = None, + check_hostname: bool | None = None, + blocksize: int = 8192, + ) -> None: ... class HTTPException(Exception): ... diff --git a/mypy/typeshed/stdlib/importlib/machinery.pyi b/mypy/typeshed/stdlib/importlib/machinery.pyi index 5aaefce87e3a..f5037da00d5f 100644 --- a/mypy/typeshed/stdlib/importlib/machinery.pyi +++ b/mypy/typeshed/stdlib/importlib/machinery.pyi @@ -148,3 +148,4 @@ class ExtensionFileLoader(importlib.abc.ExecutionLoader): def exec_module(self, module: types.ModuleType) -> None: ... def get_code(self, fullname: str) -> None: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... diff --git a/mypy/typeshed/stdlib/importlib/metadata/__init__.pyi b/mypy/typeshed/stdlib/importlib/metadata/__init__.pyi index 083453cd3c9a..0f8a6f56cf88 100644 --- a/mypy/typeshed/stdlib/importlib/metadata/__init__.pyi +++ b/mypy/typeshed/stdlib/importlib/metadata/__init__.pyi @@ -66,6 +66,9 @@ class EntryPoint(_EntryPointBase): extras: list[str] = ..., ) -> bool: ... # undocumented + def __hash__(self) -> int: ... + def __eq__(self, other: object) -> bool: ... + if sys.version_info >= (3, 10): class EntryPoints(list[EntryPoint]): # use as list is deprecated since 3.10 # int argument is deprecated since 3.10 @@ -177,6 +180,7 @@ class MetadataPathFinder(DistributionFinder): def invalidate_caches(cls) -> None: ... class PathDistribution(Distribution): + _path: Path def __init__(self, path: Path) -> None: ... def read_text(self, filename: StrPath) -> str: ... def locate_file(self, path: StrPath) -> PathLike[str]: ... diff --git a/mypy/typeshed/stdlib/inspect.pyi b/mypy/typeshed/stdlib/inspect.pyi index 2d004a8e6b57..601d23e786ac 100644 --- a/mypy/typeshed/stdlib/inspect.pyi +++ b/mypy/typeshed/stdlib/inspect.pyi @@ -2,6 +2,7 @@ import dis import enum import sys import types +from _typeshed import StrPath from collections import OrderedDict from collections.abc import AsyncGenerator, Awaitable, Callable, Coroutine, Generator, Mapping, Sequence, Set as AbstractSet from types import ( @@ -127,8 +128,21 @@ if sys.version_info >= (3, 11): "walktree", ] + if sys.version_info >= (3, 12): + __all__ += [ + "markcoroutinefunction", + "AGEN_CLOSED", + "AGEN_CREATED", + "AGEN_RUNNING", + "AGEN_SUSPENDED", + "getasyncgenlocals", + "getasyncgenstate", + "BufferFlags", + ] + _P = ParamSpec("_P") _T = TypeVar("_T") +_F = TypeVar("_F", bound=Callable[..., Any]) _T_cont = TypeVar("_T_cont", contravariant=True) _V_cont = TypeVar("_V_cont", contravariant=True) @@ -177,12 +191,15 @@ if sys.version_info >= (3, 11): @overload def getmembers_static(object: object, predicate: _GetMembersPredicate | None = None) -> _GetMembersReturn: ... -def getmodulename(path: str) -> str | None: ... +def getmodulename(path: StrPath) -> str | None: ... def ismodule(object: object) -> TypeGuard[ModuleType]: ... def isclass(object: object) -> TypeGuard[type[Any]]: ... def ismethod(object: object) -> TypeGuard[MethodType]: ... def isfunction(object: object) -> TypeGuard[FunctionType]: ... +if sys.version_info >= (3, 12): + def markcoroutinefunction(func: _F) -> _F: ... + if sys.version_info >= (3, 8): @overload def isgeneratorfunction(obj: Callable[..., Generator[Any, Any, Any]]) -> bool: ... @@ -337,6 +354,7 @@ class Signature: def from_callable(cls, obj: _IntrospectableCallable, *, follow_wrapped: bool = True) -> Self: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... if sys.version_info >= (3, 10): def get_annotations( @@ -359,6 +377,17 @@ class _ParameterKind(enum.IntEnum): @property def description(self) -> str: ... +if sys.version_info >= (3, 12): + AGEN_CREATED: Literal["AGEN_CREATED"] + AGEN_RUNNING: Literal["AGEN_RUNNING"] + AGEN_SUSPENDED: Literal["AGEN_SUSPENDED"] + AGEN_CLOSED: Literal["AGEN_CLOSED"] + + def getasyncgenstate( + agen: AsyncGenerator[Any, Any] + ) -> Literal["AGEN_CREATED", "AGEN_RUNNING", "AGEN_SUSPENDED", "AGEN_CLOSED"]: ... + def getasyncgenlocals(agen: AsyncGeneratorType[Any, Any]) -> dict[str, Any]: ... + class Parameter: def __init__(self, name: str, kind: _ParameterKind, *, default: Any = ..., annotation: Any = ...) -> None: ... empty = _empty @@ -385,6 +414,7 @@ class Parameter: annotation: Any = ..., ) -> Self: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... class BoundArguments: arguments: OrderedDict[str, Any] diff --git a/mypy/typeshed/stdlib/ipaddress.pyi b/mypy/typeshed/stdlib/ipaddress.pyi index 7a4146885b29..945e8bcbbdee 100644 --- a/mypy/typeshed/stdlib/ipaddress.pyi +++ b/mypy/typeshed/stdlib/ipaddress.pyi @@ -34,6 +34,7 @@ class _IPAddressBase: class _BaseAddress(_IPAddressBase, SupportsInt): def __init__(self, address: object) -> None: ... def __add__(self, other: int) -> Self: ... + def __hash__(self) -> int: ... def __int__(self) -> int: ... def __sub__(self, other: int) -> Self: ... if sys.version_info >= (3, 9): @@ -77,6 +78,7 @@ class _BaseNetwork(_IPAddressBase, Container[_A], Iterable[_A], Generic[_A]): def __getitem__(self, n: int) -> _A: ... def __iter__(self) -> Iterator[_A]: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... def __lt__(self, other: Self) -> bool: ... if sys.version_info >= (3, 11): def __ge__(self, other: Self) -> bool: ... @@ -147,7 +149,10 @@ class _BaseV4: class IPv4Address(_BaseV4, _BaseAddress): ... class IPv4Network(_BaseV4, _BaseNetwork[IPv4Address]): ... -class IPv4Interface(IPv4Address, _BaseInterface[IPv4Address, IPv4Network]): ... + +class IPv4Interface(IPv4Address, _BaseInterface[IPv4Address, IPv4Network]): + def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... class _BaseV6: @property @@ -168,11 +173,16 @@ class IPv6Address(_BaseV6, _BaseAddress): @property def scope_id(self) -> str | None: ... + def __hash__(self) -> int: ... + def __eq__(self, other: object) -> bool: ... + class IPv6Network(_BaseV6, _BaseNetwork[IPv6Address]): @property def is_site_local(self) -> bool: ... -class IPv6Interface(IPv6Address, _BaseInterface[IPv6Address, IPv6Network]): ... +class IPv6Interface(IPv6Address, _BaseInterface[IPv6Address, IPv6Network]): + def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... def v4_int_to_packed(address: int) -> bytes: ... def v6_int_to_packed(address: int) -> bytes: ... diff --git a/mypy/typeshed/stdlib/json/__init__.pyi b/mypy/typeshed/stdlib/json/__init__.pyi index dc0cdff926d4..63e9718ee151 100644 --- a/mypy/typeshed/stdlib/json/__init__.pyi +++ b/mypy/typeshed/stdlib/json/__init__.pyi @@ -1,4 +1,3 @@ -import sys from _typeshed import SupportsRead, SupportsWrite from collections.abc import Callable from typing import Any @@ -7,8 +6,6 @@ from .decoder import JSONDecodeError as JSONDecodeError, JSONDecoder as JSONDeco from .encoder import JSONEncoder as JSONEncoder __all__ = ["dump", "dumps", "load", "loads", "JSONDecoder", "JSONDecodeError", "JSONEncoder"] -if sys.version_info >= (3, 12): - __all__ += ["AttrDict"] def dumps( obj: Any, @@ -62,9 +59,3 @@ def load( **kwds: Any, ) -> Any: ... def detect_encoding(b: bytes | bytearray) -> str: ... # undocumented - -if sys.version_info >= (3, 12): - class AttrDict(dict[str, Any]): - def __getattr__(self, name: str) -> Any: ... - def __setattr__(self, name: str, value: Any) -> None: ... - def __delattr__(self, name: str) -> None: ... diff --git a/mypy/typeshed/stdlib/linecache.pyi b/mypy/typeshed/stdlib/linecache.pyi index 8e317dd38990..2e050e13b621 100644 --- a/mypy/typeshed/stdlib/linecache.pyi +++ b/mypy/typeshed/stdlib/linecache.pyi @@ -1,5 +1,6 @@ import sys -from typing import Any, Protocol +from collections.abc import Callable +from typing import Any from typing_extensions import TypeAlias if sys.version_info >= (3, 9): @@ -10,8 +11,7 @@ else: _ModuleGlobals: TypeAlias = dict[str, Any] _ModuleMetadata: TypeAlias = tuple[int, float | None, list[str], str] -class _SourceLoader(Protocol): - def __call__(self) -> str | None: ... +_SourceLoader: TypeAlias = tuple[Callable[[], str | None]] cache: dict[str, _SourceLoader | _ModuleMetadata] # undocumented diff --git a/mypy/typeshed/stdlib/locale.pyi b/mypy/typeshed/stdlib/locale.pyi index c6cc7cacfb1d..3753700ea889 100644 --- a/mypy/typeshed/stdlib/locale.pyi +++ b/mypy/typeshed/stdlib/locale.pyi @@ -15,7 +15,6 @@ __all__ = [ "str", "atof", "atoi", - "format", "format_string", "currency", "normalize", @@ -32,6 +31,9 @@ __all__ = [ if sys.version_info >= (3, 11): __all__ += ["getencoding"] +if sys.version_info < (3, 12): + __all__ += ["format"] + # This module defines a function "str()", which is why "str" can't be used # as a type annotation or type alias. from builtins import str as _str @@ -123,7 +125,12 @@ def normalize(localename: _str) -> _str: ... def resetlocale(category: int = ...) -> None: ... def strcoll(__os1: _str, __os2: _str) -> int: ... def strxfrm(__string: _str) -> _str: ... -def format(percent: _str, value: float | Decimal, grouping: bool = False, monetary: bool = False, *additional: Any) -> _str: ... + +if sys.version_info < (3, 12): + def format( + percent: _str, value: float | Decimal, grouping: bool = False, monetary: bool = False, *additional: Any + ) -> _str: ... + def format_string(f: _str, val: Any, grouping: bool = False, monetary: bool = False) -> _str: ... def currency(val: float | Decimal, symbol: bool = True, grouping: bool = False, international: bool = False) -> _str: ... def delocalize(string: _str) -> _str: ... diff --git a/mypy/typeshed/stdlib/logging/__init__.pyi b/mypy/typeshed/stdlib/logging/__init__.pyi index 6ebd305aacb8..db797d4180ea 100644 --- a/mypy/typeshed/stdlib/logging/__init__.pyi +++ b/mypy/typeshed/stdlib/logging/__init__.pyi @@ -60,6 +60,8 @@ __all__ = [ if sys.version_info >= (3, 11): __all__ += ["getLevelNamesMapping"] +if sys.version_info >= (3, 12): + __all__ += ["getHandlerByName", "getHandlerNames"] _SysExcInfoType: TypeAlias = tuple[type[BaseException], BaseException, TracebackType | None] | tuple[None, None, None] _ExcInfoType: TypeAlias = None | bool | _SysExcInfoType | BaseException @@ -83,7 +85,10 @@ class Filterer: filters: list[_FilterType] def addFilter(self, filter: _FilterType) -> None: ... def removeFilter(self, filter: _FilterType) -> None: ... - def filter(self, record: LogRecord) -> bool: ... + if sys.version_info >= (3, 12): + def filter(self, record: LogRecord) -> bool | LogRecord: ... + else: + def filter(self, record: LogRecord) -> bool: ... class Manager: # undocumented root: RootLogger @@ -111,6 +116,8 @@ class Logger(Filterer): def isEnabledFor(self, level: int) -> bool: ... def getEffectiveLevel(self) -> int: ... def getChild(self, suffix: str) -> Self: ... # see python/typing#980 + if sys.version_info >= (3, 12): + def getChildren(self) -> set[Logger]: ... if sys.version_info >= (3, 8): def debug( self, @@ -324,6 +331,10 @@ class Handler(Filterer): def format(self, record: LogRecord) -> str: ... def emit(self, record: LogRecord) -> None: ... +if sys.version_info >= (3, 12): + def getHandlerByName(name: str) -> Handler | None: ... + def getHandlerNames() -> frozenset[str]: ... + class Formatter: converter: Callable[[float | None], struct_time] _fmt: str | None # undocumented @@ -370,7 +381,10 @@ class Filter: name: str # undocumented nlen: int # undocumented def __init__(self, name: str = "") -> None: ... - def filter(self, record: LogRecord) -> bool: ... + if sys.version_info >= (3, 12): + def filter(self, record: LogRecord) -> bool | LogRecord: ... + else: + def filter(self, record: LogRecord) -> bool: ... class LogRecord: # args can be set to None by logging.handlers.QueueHandler diff --git a/mypy/typeshed/stdlib/multiprocessing/connection.pyi b/mypy/typeshed/stdlib/multiprocessing/connection.pyi index d034373712e0..28696fe6a3a3 100644 --- a/mypy/typeshed/stdlib/multiprocessing/connection.pyi +++ b/mypy/typeshed/stdlib/multiprocessing/connection.pyi @@ -52,7 +52,12 @@ class Listener: self, exc_type: type[BaseException] | None, exc_value: BaseException | None, exc_tb: types.TracebackType | None ) -> None: ... -def deliver_challenge(connection: Connection, authkey: bytes) -> None: ... +if sys.version_info >= (3, 12): + def deliver_challenge(connection: Connection, authkey: bytes, digest_name: str = "sha256") -> None: ... + +else: + def deliver_challenge(connection: Connection, authkey: bytes) -> None: ... + def answer_challenge(connection: Connection, authkey: bytes) -> None: ... def wait( object_list: Iterable[Connection | socket.socket | int], timeout: float | None = None diff --git a/mypy/typeshed/stdlib/multiprocessing/managers.pyi b/mypy/typeshed/stdlib/multiprocessing/managers.pyi index 27a903fb9987..9cfc1ebbdd5e 100644 --- a/mypy/typeshed/stdlib/multiprocessing/managers.pyi +++ b/mypy/typeshed/stdlib/multiprocessing/managers.pyi @@ -73,14 +73,18 @@ class DictProxy(BaseProxy, MutableMapping[_KT, _VT]): def __delitem__(self, __key: _KT) -> None: ... def __iter__(self) -> Iterator[_KT]: ... def copy(self) -> dict[_KT, _VT]: ... - @overload + @overload # type: ignore[override] def get(self, __key: _KT) -> _VT | None: ... @overload - def get(self, __key: _KT, __default: _VT | _T) -> _VT | _T: ... + def get(self, __key: _KT, __default: _VT) -> _VT: ... + @overload + def get(self, __key: _KT, __default: _T) -> _VT | _T: ... @overload def pop(self, __key: _KT) -> _VT: ... @overload - def pop(self, __key: _KT, __default: _VT | _T) -> _VT | _T: ... + def pop(self, __key: _KT, __default: _VT) -> _VT: ... + @overload + def pop(self, __key: _KT, __default: _T) -> _VT | _T: ... def keys(self) -> list[_KT]: ... # type: ignore[override] def items(self) -> list[tuple[_KT, _VT]]: ... # type: ignore[override] def values(self) -> list[_VT]: ... # type: ignore[override] diff --git a/mypy/typeshed/stdlib/multiprocessing/queues.pyi b/mypy/typeshed/stdlib/multiprocessing/queues.pyi index a26ab7173232..8e72d15f25f6 100644 --- a/mypy/typeshed/stdlib/multiprocessing/queues.pyi +++ b/mypy/typeshed/stdlib/multiprocessing/queues.pyi @@ -22,6 +22,8 @@ class Queue(Generic[_T]): def close(self) -> None: ... def join_thread(self) -> None: ... def cancel_join_thread(self) -> None: ... + if sys.version_info >= (3, 12): + def __class_getitem__(cls, __item: Any) -> GenericAlias: ... class JoinableQueue(Queue[_T]): def task_done(self) -> None: ... diff --git a/mypy/typeshed/stdlib/opcode.pyi b/mypy/typeshed/stdlib/opcode.pyi index 1232454e71ea..f852489ffacf 100644 --- a/mypy/typeshed/stdlib/opcode.pyi +++ b/mypy/typeshed/stdlib/opcode.pyi @@ -14,9 +14,12 @@ __all__ = [ "opmap", "HAVE_ARGUMENT", "EXTENDED_ARG", - "hasnargs", "stack_effect", ] +if sys.version_info >= (3, 12): + __all__ += ["hasarg", "hasexc"] +else: + __all__ += ["hasnargs"] if sys.version_info >= (3, 9): cmp_op: tuple[Literal["<"], Literal["<="], Literal["=="], Literal["!="], Literal[">"], Literal[">="]] @@ -42,6 +45,11 @@ hasjabs: list[int] haslocal: list[int] hascompare: list[int] hasfree: list[int] +if sys.version_info >= (3, 12): + hasarg: list[int] + hasexc: list[int] +else: + hasnargs: list[int] opname: list[str] opmap: dict[str, int] @@ -53,5 +61,3 @@ if sys.version_info >= (3, 8): else: def stack_effect(__opcode: int, __oparg: int | None = None) -> int: ... - -hasnargs: list[int] diff --git a/mypy/typeshed/stdlib/os/__init__.pyi b/mypy/typeshed/stdlib/os/__init__.pyi index efe80d82ffba..994595aae781 100644 --- a/mypy/typeshed/stdlib/os/__init__.pyi +++ b/mypy/typeshed/stdlib/os/__init__.pyi @@ -912,7 +912,7 @@ else: @property def si_code(self) -> int: ... - def waitid(__idtype: int, __ident: int, __options: int) -> waitid_result: ... + def waitid(__idtype: int, __ident: int, __options: int) -> waitid_result | None: ... def wait3(options: int) -> tuple[int, int, Any]: ... def wait4(pid: int, options: int) -> tuple[int, int, Any]: ... diff --git a/mypy/typeshed/stdlib/pathlib.pyi b/mypy/typeshed/stdlib/pathlib.pyi index 7aec66b584e3..a509ec3af9f2 100644 --- a/mypy/typeshed/stdlib/pathlib.pyi +++ b/mypy/typeshed/stdlib/pathlib.pyi @@ -39,6 +39,7 @@ class PurePath(PathLike[str]): @property def stem(self) -> str: ... def __new__(cls, *args: StrPath) -> Self: ... + def __hash__(self) -> int: ... def __eq__(self, other: object) -> bool: ... def __fspath__(self) -> str: ... def __lt__(self, other: PurePath) -> bool: ... @@ -55,7 +56,11 @@ class PurePath(PathLike[str]): if sys.version_info >= (3, 9): def is_relative_to(self, *other: StrPath) -> bool: ... - def match(self, path_pattern: str) -> bool: ... + if sys.version_info >= (3, 12): + def match(self, path_pattern: str, *, case_sensitive: bool | None = None) -> bool: ... + else: + def match(self, path_pattern: str) -> bool: ... + def relative_to(self, *other: StrPath) -> Self: ... def with_name(self, name: str) -> Self: ... if sys.version_info >= (3, 9): @@ -70,6 +75,9 @@ class PurePath(PathLike[str]): if sys.version_info >= (3, 9) and sys.version_info < (3, 11): def __class_getitem__(cls, type: Any) -> GenericAlias: ... + if sys.version_info >= (3, 12): + def with_segments(self, *args: StrPath) -> Self: ... + class PurePosixPath(PurePath): ... class PureWindowsPath(PurePath): ... @@ -86,8 +94,15 @@ class Path(PurePath): def stat(self) -> stat_result: ... def chmod(self, mode: int) -> None: ... - def exists(self) -> bool: ... - def glob(self, pattern: str) -> Generator[Self, None, None]: ... + if sys.version_info >= (3, 12): + def exists(self, *, follow_symlinks: bool = True) -> bool: ... + def glob(self, pattern: str, *, case_sensitive: bool | None = None) -> Generator[Self, None, None]: ... + def rglob(self, pattern: str, *, case_sensitive: bool | None = None) -> Generator[Self, None, None]: ... + else: + def exists(self) -> bool: ... + def glob(self, pattern: str) -> Generator[Self, None, None]: ... + def rglob(self, pattern: str) -> Generator[Self, None, None]: ... + def is_dir(self) -> bool: ... def is_file(self) -> bool: ... def is_symlink(self) -> bool: ... @@ -95,6 +110,9 @@ class Path(PurePath): def is_fifo(self) -> bool: ... def is_block_device(self) -> bool: ... def is_char_device(self) -> bool: ... + if sys.version_info >= (3, 12): + def is_junction(self) -> bool: ... + def iterdir(self) -> Generator[Self, None, None]: ... def lchmod(self, mode: int) -> None: ... def lstat(self) -> stat_result: ... @@ -159,6 +177,10 @@ class Path(PurePath): # so it's safer to pretend they don't exist def owner(self) -> str: ... def group(self) -> str: ... + + # This method does "exist" on Windows on <3.12, but always raises NotImplementedError + # On py312+, it works properly on Windows, as with all other platforms + if sys.platform != "win32" or sys.version_info >= (3, 12): def is_mount(self) -> bool: ... if sys.version_info >= (3, 9): @@ -171,7 +193,6 @@ class Path(PurePath): def replace(self, target: str | PurePath) -> None: ... def resolve(self, strict: bool = False) -> Self: ... - def rglob(self, pattern: str) -> Generator[Self, None, None]: ... def rmdir(self) -> None: ... def symlink_to(self, target: StrOrBytesPath, target_is_directory: bool = False) -> None: ... if sys.version_info >= (3, 10): diff --git a/mypy/typeshed/stdlib/pdb.pyi b/mypy/typeshed/stdlib/pdb.pyi index 405c45ca01ac..4cc708d9d5fe 100644 --- a/mypy/typeshed/stdlib/pdb.pyi +++ b/mypy/typeshed/stdlib/pdb.pyi @@ -125,6 +125,9 @@ class Pdb(Bdb, Cmd): def sigint_handler(self, signum: signal.Signals, frame: FrameType) -> None: ... def message(self, msg: str) -> None: ... def error(self, msg: str) -> None: ... + if sys.version_info >= (3, 12): + def set_convenience_variable(self, frame: FrameType, name: str, value: Any) -> None: ... + def _select_frame(self, number: int) -> None: ... def _getval_except(self, arg: str, frame: FrameType | None = None) -> object: ... def _print_lines( @@ -168,7 +171,10 @@ class Pdb(Bdb, Cmd): def find_function(funcname: str, filename: str) -> tuple[str, str, int] | None: ... def main() -> None: ... def help() -> None: ... -def getsourcelines(obj: _SourceObjectType) -> tuple[list[str], int]: ... + +if sys.version_info < (3, 10): + def getsourcelines(obj: _SourceObjectType) -> tuple[list[str], int]: ... + def lasti2lineno(code: CodeType, lasti: int) -> int: ... class _rstr(str): diff --git a/mypy/typeshed/stdlib/pkgutil.pyi b/mypy/typeshed/stdlib/pkgutil.pyi index f9808c9e5de8..59f1f734cf90 100644 --- a/mypy/typeshed/stdlib/pkgutil.pyi +++ b/mypy/typeshed/stdlib/pkgutil.pyi @@ -12,12 +12,12 @@ __all__ = [ "walk_packages", "iter_modules", "get_data", - "ImpImporter", - "ImpLoader", "read_code", "extend_path", "ModuleInfo", ] +if sys.version_info < (3, 12): + __all__ += ["ImpImporter", "ImpLoader"] _PathT = TypeVar("_PathT", bound=Iterable[str]) @@ -28,11 +28,12 @@ class ModuleInfo(NamedTuple): def extend_path(path: _PathT, name: str) -> _PathT: ... -class ImpImporter: - def __init__(self, path: str | None = None) -> None: ... +if sys.version_info < (3, 12): + class ImpImporter: + def __init__(self, path: str | None = None) -> None: ... -class ImpLoader: - def __init__(self, fullname: str, file: IO[str], filename: str, etc: tuple[str, str, int]) -> None: ... + class ImpLoader: + def __init__(self, fullname: str, file: IO[str], filename: str, etc: tuple[str, str, int]) -> None: ... def find_loader(fullname: str) -> Loader | None: ... def get_importer(path_item: str) -> PathEntryFinder | None: ... diff --git a/mypy/typeshed/stdlib/plistlib.pyi b/mypy/typeshed/stdlib/plistlib.pyi index 5b76c935f76e..bd5525484514 100644 --- a/mypy/typeshed/stdlib/plistlib.pyi +++ b/mypy/typeshed/stdlib/plistlib.pyi @@ -102,6 +102,7 @@ if sys.version_info >= (3, 8): def __init__(self, data: int) -> None: ... def __index__(self) -> int: ... def __reduce__(self) -> tuple[type[Self], tuple[int]]: ... + def __hash__(self) -> int: ... def __eq__(self, other: object) -> bool: ... class InvalidFileException(ValueError): diff --git a/mypy/typeshed/stdlib/pydoc.pyi b/mypy/typeshed/stdlib/pydoc.pyi index ed97f1918e01..7791c977aa8b 100644 --- a/mypy/typeshed/stdlib/pydoc.pyi +++ b/mypy/typeshed/stdlib/pydoc.pyi @@ -61,6 +61,7 @@ class Doc: def getdocloc(self, object: object, basedir: str = ...) -> str | None: ... class HTMLRepr(Repr): + def __init__(self) -> None: ... def escape(self, text: str) -> str: ... def repr(self, object: object) -> str: ... def repr1(self, x: object, level: complex) -> str: ... @@ -148,6 +149,7 @@ class HTMLDoc(Doc): def filelink(self, url: str, path: str) -> str: ... class TextRepr(Repr): + def __init__(self) -> None: ... def repr1(self, x: object, level: complex) -> str: ... def repr_string(self, x: str, level: complex) -> str: ... def repr_str(self, x: str, level: complex) -> str: ... @@ -195,12 +197,24 @@ def resolve(thing: str | object, forceload: bool = ...) -> tuple[object, str] | def render_doc( thing: str | object, title: str = "Python Library Documentation: %s", forceload: bool = ..., renderer: Doc | None = None ) -> str: ... -def doc( - thing: str | object, - title: str = "Python Library Documentation: %s", - forceload: bool = ..., - output: SupportsWrite[str] | None = None, -) -> None: ... + +if sys.version_info >= (3, 12): + def doc( + thing: str | object, + title: str = "Python Library Documentation: %s", + forceload: bool = ..., + output: SupportsWrite[str] | None = None, + is_cli: bool = False, + ) -> None: ... + +else: + def doc( + thing: str | object, + title: str = "Python Library Documentation: %s", + forceload: bool = ..., + output: SupportsWrite[str] | None = None, + ) -> None: ... + def writedoc(thing: str | object, forceload: bool = ...) -> None: ... def writedocs(dir: str, pkgpath: str = "", done: Any | None = None) -> None: ... @@ -216,7 +230,11 @@ class Helper: def __call__(self, request: str | Helper | object = ...) -> None: ... def interact(self) -> None: ... def getline(self, prompt: str) -> str: ... - def help(self, request: Any) -> None: ... + if sys.version_info >= (3, 12): + def help(self, request: Any, is_cli: bool = False) -> None: ... + else: + def help(self, request: Any) -> None: ... + def intro(self) -> None: ... def list(self, items: _list[str], columns: int = 4, width: int = 80) -> None: ... def listkeywords(self) -> None: ... diff --git a/mypy/typeshed/stdlib/re.pyi b/mypy/typeshed/stdlib/re.pyi index 4e53141ade84..29ee8b66815e 100644 --- a/mypy/typeshed/stdlib/re.pyi +++ b/mypy/typeshed/stdlib/re.pyi @@ -175,6 +175,8 @@ class Pattern(Generic[AnyStr]): def subn(self, repl: AnyStr | Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int = 0) -> tuple[AnyStr, int]: ... def __copy__(self) -> Pattern[AnyStr]: ... def __deepcopy__(self, __memo: Any) -> Pattern[AnyStr]: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... diff --git a/mypy/typeshed/stdlib/shelve.pyi b/mypy/typeshed/stdlib/shelve.pyi index 82d0b03f4049..b162b3a85766 100644 --- a/mypy/typeshed/stdlib/shelve.pyi +++ b/mypy/typeshed/stdlib/shelve.pyi @@ -15,8 +15,10 @@ class Shelf(MutableMapping[str, _VT]): ) -> None: ... def __iter__(self) -> Iterator[str]: ... def __len__(self) -> int: ... + @overload # type: ignore[override] + def get(self, key: str, default: None = None) -> _VT | None: ... # type: ignore[misc] # overlapping overloads @overload - def get(self, key: str) -> _VT | None: ... + def get(self, key: str, default: _VT) -> _VT: ... @overload def get(self, key: str, default: _T) -> _VT | _T: ... def __getitem__(self, key: str) -> _VT: ... diff --git a/mypy/typeshed/stdlib/shutil.pyi b/mypy/typeshed/stdlib/shutil.pyi index ef716d4049dd..38c50d51b129 100644 --- a/mypy/typeshed/stdlib/shutil.pyi +++ b/mypy/typeshed/stdlib/shutil.pyi @@ -2,6 +2,7 @@ import os import sys from _typeshed import BytesPath, FileDescriptorOrPath, StrOrBytesPath, StrPath, SupportsRead, SupportsWrite from collections.abc import Callable, Iterable, Sequence +from tarfile import _TarfileFilter from typing import Any, AnyStr, NamedTuple, Protocol, TypeVar, overload from typing_extensions import TypeAlias @@ -192,9 +193,9 @@ def register_archive_format( ) -> None: ... def unregister_archive_format(name: str) -> None: ... -if sys.version_info >= (3, 12): +if sys.version_info >= (3, 8): def unpack_archive( - filename: StrPath, extract_dir: StrPath | None = None, format: str | None = None, *, filter: str | None = None + filename: StrPath, extract_dir: StrPath | None = None, format: str | None = None, *, filter: _TarfileFilter | None = None ) -> None: ... else: diff --git a/mypy/typeshed/stdlib/socket.pyi b/mypy/typeshed/stdlib/socket.pyi index 6c897b919909..da06ce2c2b06 100644 --- a/mypy/typeshed/stdlib/socket.pyi +++ b/mypy/typeshed/stdlib/socket.pyi @@ -201,6 +201,7 @@ if sys.platform != "win32" and sys.platform != "darwin": TCP_LINGER2 as TCP_LINGER2, TCP_QUICKACK as TCP_QUICKACK, TCP_SYNCNT as TCP_SYNCNT, + TCP_USER_TIMEOUT as TCP_USER_TIMEOUT, TCP_WINDOW_CLAMP as TCP_WINDOW_CLAMP, ) if sys.platform != "win32": @@ -438,6 +439,38 @@ if sys.platform == "win32": SIO_LOOPBACK_FAST_PATH as SIO_LOOPBACK_FAST_PATH, SIO_RCVALL as SIO_RCVALL, ) +if sys.version_info >= (3, 12): + from _socket import ( + IP_ADD_SOURCE_MEMBERSHIP as IP_ADD_SOURCE_MEMBERSHIP, + IP_BLOCK_SOURCE as IP_BLOCK_SOURCE, + IP_DROP_SOURCE_MEMBERSHIP as IP_DROP_SOURCE_MEMBERSHIP, + IP_PKTINFO as IP_PKTINFO, + IP_UNBLOCK_SOURCE as IP_UNBLOCK_SOURCE, + ) + + if sys.platform == "win32": + from _socket import ( + HV_GUID_BROADCAST as HV_GUID_BROADCAST, + HV_GUID_CHILDREN as HV_GUID_CHILDREN, + HV_GUID_LOOPBACK as HV_GUID_LOOPBACK, + HV_GUID_PARENT as HV_GUID_PARENT, + HV_GUID_WILDCARD as HV_GUID_WILDCARD, + HV_GUID_ZERO as HV_GUID_ZERO, + HV_PROTOCOL_RAW as HV_PROTOCOL_RAW, + HVSOCKET_ADDRESS_FLAG_PASSTHRU as HVSOCKET_ADDRESS_FLAG_PASSTHRU, + HVSOCKET_CONNECT_TIMEOUT as HVSOCKET_CONNECT_TIMEOUT, + HVSOCKET_CONNECT_TIMEOUT_MAX as HVSOCKET_CONNECT_TIMEOUT_MAX, + HVSOCKET_CONNECTED_SUSPEND as HVSOCKET_CONNECTED_SUSPEND, + ) + else: + from _socket import ( + ETHERTYPE_ARP as ETHERTYPE_ARP, + ETHERTYPE_IP as ETHERTYPE_IP, + ETHERTYPE_IPV6 as ETHERTYPE_IPV6, + ETHERTYPE_VLAN as ETHERTYPE_VLAN, + ) +if sys.version_info >= (3, 11) and sys.platform == "darwin": + from _socket import TCP_CONNECTION_INFO as TCP_CONNECTION_INFO # Re-exported from errno EBADF: int @@ -489,6 +522,8 @@ class AddressFamily(IntEnum): AF_LINK: int if sys.platform != "darwin": AF_BLUETOOTH: int + if sys.platform == "win32" and sys.version_info >= (3, 12): + AF_HYPERV: int AF_INET = AddressFamily.AF_INET AF_INET6 = AddressFamily.AF_INET6 @@ -540,6 +575,9 @@ if sys.platform != "win32" or sys.version_info >= (3, 9): if sys.platform != "darwin": AF_BLUETOOTH = AddressFamily.AF_BLUETOOTH +if sys.platform == "win32" and sys.version_info >= (3, 12): + AF_HYPERV = AddressFamily.AF_HYPERV + class SocketKind(IntEnum): SOCK_STREAM: int SOCK_DGRAM: int diff --git a/mypy/typeshed/stdlib/socketserver.pyi b/mypy/typeshed/stdlib/socketserver.pyi index 3799d82a0065..6a932f66cd09 100644 --- a/mypy/typeshed/stdlib/socketserver.pyi +++ b/mypy/typeshed/stdlib/socketserver.pyi @@ -28,6 +28,8 @@ if sys.platform != "win32": "UnixDatagramServer", "UnixStreamServer", ] + if sys.version_info >= (3, 12): + __all__ += ["ForkingUnixStreamServer", "ForkingUnixDatagramServer"] _RequestType: TypeAlias = _socket | tuple[bytes, _socket] _AfUnixAddress: TypeAlias = str | ReadableBuffer # address acceptable for an AF_UNIX socket @@ -124,6 +126,9 @@ class ThreadingMixIn: if sys.platform != "win32": class ForkingTCPServer(ForkingMixIn, TCPServer): ... class ForkingUDPServer(ForkingMixIn, UDPServer): ... + if sys.version_info >= (3, 12): + class ForkingUnixStreamServer(ForkingMixIn, UnixStreamServer): ... + class ForkingUnixDatagramServer(ForkingMixIn, UnixDatagramServer): ... class ThreadingTCPServer(ThreadingMixIn, TCPServer): ... class ThreadingUDPServer(ThreadingMixIn, UDPServer): ... diff --git a/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi b/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi index 24974f787c62..41f731e21e26 100644 --- a/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi +++ b/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi @@ -196,6 +196,25 @@ if sys.version_info >= (3, 11): SQLITE_WARNING: int SQLITE_WARNING_AUTOINDEX: int +if sys.version_info >= (3, 12): + LEGACY_TRANSACTION_CONTROL: int + SQLITE_DBCONFIG_DEFENSIVE: int + SQLITE_DBCONFIG_DQS_DDL: int + SQLITE_DBCONFIG_DQS_DML: int + SQLITE_DBCONFIG_ENABLE_FKEY: int + SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER: int + SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION: int + SQLITE_DBCONFIG_ENABLE_QPSG: int + SQLITE_DBCONFIG_ENABLE_TRIGGER: int + SQLITE_DBCONFIG_ENABLE_VIEW: int + SQLITE_DBCONFIG_LEGACY_ALTER_TABLE: int + SQLITE_DBCONFIG_LEGACY_FILE_FORMAT: int + SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE: int + SQLITE_DBCONFIG_RESET_DATABASE: int + SQLITE_DBCONFIG_TRIGGER_EQP: int + SQLITE_DBCONFIG_TRUSTED_SCHEMA: int + SQLITE_DBCONFIG_WRITABLE_SCHEMA: int + # Can take or return anything depending on what's in the registry. @overload def adapt(__obj: Any, __proto: Any) -> Any: ... @@ -214,8 +233,9 @@ def connect( ) -> Connection: ... def enable_callback_tracebacks(__enable: bool) -> None: ... -# takes a pos-or-keyword argument because there is a C wrapper -def enable_shared_cache(enable: int) -> None: ... +if sys.version_info < (3, 12): + # takes a pos-or-keyword argument because there is a C wrapper + def enable_shared_cache(enable: int) -> None: ... if sys.version_info >= (3, 10): def register_adapter(__type: type[_T], __adapter: _Adapter[_T]) -> None: ... @@ -279,6 +299,11 @@ class Connection: isolation_level: str | None # one of '', 'DEFERRED', 'IMMEDIATE' or 'EXCLUSIVE' @property def total_changes(self) -> int: ... + if sys.version_info >= (3, 12): + @property + def autocommit(self) -> int: ... + @autocommit.setter + def autocommit(self, val: int) -> None: ... row_factory: Any text_factory: Any def __init__( @@ -356,6 +381,9 @@ class Connection: def getlimit(self, __category: int) -> int: ... def serialize(self, *, name: str = "main") -> bytes: ... def deserialize(self, __data: ReadableBuffer, *, name: str = "main") -> None: ... + if sys.version_info >= (3, 12): + def getconfig(self, __op: int) -> bool: ... + def setconfig(self, __op: int, __enable: bool = True) -> bool: ... def __call__(self, __sql: str) -> _Statement: ... def __enter__(self) -> Self: ... @@ -419,6 +447,7 @@ class Row: def __getitem__(self, __key: int | str) -> Any: ... @overload def __getitem__(self, __key: slice) -> tuple[Any, ...]: ... + def __hash__(self) -> int: ... def __iter__(self) -> Iterator[Any]: ... def __len__(self) -> int: ... # These return NotImplemented for anything that is not a Row. diff --git a/mypy/typeshed/stdlib/sre_parse.pyi b/mypy/typeshed/stdlib/sre_parse.pyi index 56f10bb41d57..8ef65223dc34 100644 --- a/mypy/typeshed/stdlib/sre_parse.pyi +++ b/mypy/typeshed/stdlib/sre_parse.pyi @@ -87,25 +87,39 @@ class Tokenizer: def seek(self, index: int) -> None: ... def error(self, msg: str, offset: int = 0) -> _Error: ... - if sys.version_info >= (3, 11): + if sys.version_info >= (3, 12): + def checkgroupname(self, name: str, offset: int) -> None: ... + elif sys.version_info >= (3, 11): def checkgroupname(self, name: str, offset: int, nested: int) -> None: ... def fix_flags(src: str | bytes, flags: int) -> int: ... _TemplateType: TypeAlias = tuple[list[tuple[int, int]], list[str | None]] _TemplateByteType: TypeAlias = tuple[list[tuple[int, int]], list[bytes | None]] -if sys.version_info >= (3, 8): - def parse(str: str, flags: int = 0, state: State | None = None) -> SubPattern: ... + +if sys.version_info >= (3, 12): + @overload + def parse_template(source: str, pattern: _Pattern[Any]) -> _TemplateType: ... + @overload + def parse_template(source: bytes, pattern: _Pattern[Any]) -> _TemplateByteType: ... + +elif sys.version_info >= (3, 8): @overload def parse_template(source: str, state: _Pattern[Any]) -> _TemplateType: ... @overload def parse_template(source: bytes, state: _Pattern[Any]) -> _TemplateByteType: ... else: - def parse(str: str, flags: int = 0, pattern: Pattern | None = None) -> SubPattern: ... @overload def parse_template(source: str, pattern: _Pattern[Any]) -> _TemplateType: ... @overload def parse_template(source: bytes, pattern: _Pattern[Any]) -> _TemplateByteType: ... -def expand_template(template: _TemplateType, match: Match[Any]) -> str: ... +if sys.version_info >= (3, 8): + def parse(str: str, flags: int = 0, state: State | None = None) -> SubPattern: ... + +else: + def parse(str: str, flags: int = 0, pattern: Pattern | None = None) -> SubPattern: ... + +if sys.version_info < (3, 12): + def expand_template(template: _TemplateType, match: Match[Any]) -> str: ... diff --git a/mypy/typeshed/stdlib/ssl.pyi b/mypy/typeshed/stdlib/ssl.pyi index 20b8802bd7b9..1c49b130e48f 100644 --- a/mypy/typeshed/stdlib/ssl.pyi +++ b/mypy/typeshed/stdlib/ssl.pyi @@ -44,18 +44,20 @@ class SSLCertVerificationError(SSLError, ValueError): CertificateError = SSLCertVerificationError -def wrap_socket( - sock: socket.socket, - keyfile: StrOrBytesPath | None = None, - certfile: StrOrBytesPath | None = None, - server_side: bool = False, - cert_reqs: int = ..., - ssl_version: int = ..., - ca_certs: str | None = None, - do_handshake_on_connect: bool = True, - suppress_ragged_eofs: bool = True, - ciphers: str | None = None, -) -> SSLSocket: ... +if sys.version_info < (3, 12): + def wrap_socket( + sock: socket.socket, + keyfile: StrOrBytesPath | None = None, + certfile: StrOrBytesPath | None = None, + server_side: bool = False, + cert_reqs: int = ..., + ssl_version: int = ..., + ca_certs: str | None = None, + do_handshake_on_connect: bool = True, + suppress_ragged_eofs: bool = True, + ciphers: str | None = None, + ) -> SSLSocket: ... + def create_default_context( purpose: Purpose = ..., *, @@ -95,7 +97,10 @@ else: _create_default_https_context: Callable[..., SSLContext] def RAND_bytes(__n: int) -> bytes: ... -def RAND_pseudo_bytes(__n: int) -> tuple[bytes, bool]: ... + +if sys.version_info < (3, 12): + def RAND_pseudo_bytes(__n: int) -> tuple[bytes, bool]: ... + def RAND_status() -> bool: ... def RAND_egd(path: str) -> None: ... def RAND_add(__string: str | ReadableBuffer, __entropy: float) -> None: ... @@ -198,6 +203,11 @@ class Options(enum.IntFlag): OP_ENABLE_MIDDLEBOX_COMPAT: int if sys.platform == "linux": OP_IGNORE_UNEXPECTED_EOF: int + if sys.version_info >= (3, 12): + OP_LEGACY_SERVER_CONNECT: int + if sys.version_info >= (3, 12) and sys.platform != "linux": + OP_ENABLE_KTLS: int + OP_IGNORE_UNEXPECTED_EOF: int OP_ALL: Options OP_NO_SSLv2: Options @@ -216,6 +226,11 @@ if sys.version_info >= (3, 8): OP_ENABLE_MIDDLEBOX_COMPAT: Options if sys.platform == "linux": OP_IGNORE_UNEXPECTED_EOF: Options +if sys.version_info >= (3, 12): + OP_LEGACY_SERVER_CONNECT: Options +if sys.version_info >= (3, 12) and sys.platform != "linux": + OP_ENABLE_KTLS: Options + OP_IGNORE_UNEXPECTED_EOF: Options HAS_NEVER_CHECK_COMMON_NAME: bool HAS_SSLv2: bool @@ -421,7 +436,7 @@ class SSLContext: server_side: bool = False, do_handshake_on_connect: bool = True, suppress_ragged_eofs: bool = True, - server_hostname: str | None = None, + server_hostname: str | bytes | None = None, session: SSLSession | None = None, ) -> SSLSocket: ... def wrap_bio( @@ -429,7 +444,7 @@ class SSLContext: incoming: MemoryBIO, outgoing: MemoryBIO, server_side: bool = False, - server_hostname: str | None = None, + server_hostname: str | bytes | None = None, session: SSLSession | None = None, ) -> SSLObject: ... def session_stats(self) -> dict[str, int]: ... @@ -485,6 +500,7 @@ class SSLSession: def time(self) -> int: ... @property def timeout(self) -> int: ... + def __eq__(self, __value: object) -> bool: ... class SSLErrorNumber(enum.IntEnum): SSL_ERROR_EOF: int diff --git a/mypy/typeshed/stdlib/statistics.pyi b/mypy/typeshed/stdlib/statistics.pyi index af5fcec6ad0c..07174f4531b9 100644 --- a/mypy/typeshed/stdlib/statistics.pyi +++ b/mypy/typeshed/stdlib/statistics.pyi @@ -113,6 +113,7 @@ if sys.version_info >= (3, 8): __radd__ = __add__ def __rsub__(self, x2: float | NormalDist) -> NormalDist: ... __rmul__ = __mul__ + def __hash__(self) -> int: ... if sys.version_info >= (3, 12): def correlation( diff --git a/mypy/typeshed/stdlib/sys.pyi b/mypy/typeshed/stdlib/sys.pyi index c2fdbeccca72..ca049124053a 100644 --- a/mypy/typeshed/stdlib/sys.pyi +++ b/mypy/typeshed/stdlib/sys.pyi @@ -321,7 +321,7 @@ if sys.version_info < (3, 9): if sys.version_info >= (3, 8): # Doesn't exist at runtime, but exported in the stubs so pytest etc. can annotate their code more easily. - class UnraisableHookArgs: + class UnraisableHookArgs(Protocol): exc_type: type[BaseException] exc_value: BaseException | None exc_traceback: TracebackType | None @@ -359,3 +359,13 @@ if sys.version_info < (3, 8): # as part of the response to CVE-2020-10735 def set_int_max_str_digits(maxdigits: int) -> None: ... def get_int_max_str_digits() -> int: ... + +if sys.version_info >= (3, 12): + def getunicodeinternedsize() -> int: ... + def deactivate_stack_trampoline() -> None: ... + def is_stack_trampoline_active() -> bool: ... + # It always exists, but raises on non-linux platforms: + if sys.platform == "linux": + def activate_stack_trampoline(__backend: str) -> None: ... + else: + def activate_stack_trampoline(__backend: str) -> NoReturn: ... diff --git a/mypy/typeshed/stdlib/tarfile.pyi b/mypy/typeshed/stdlib/tarfile.pyi index 5cf1d55cac63..d9d9641ac698 100644 --- a/mypy/typeshed/stdlib/tarfile.pyi +++ b/mypy/typeshed/stdlib/tarfile.pyi @@ -7,7 +7,7 @@ from collections.abc import Callable, Iterable, Iterator, Mapping from gzip import _ReadableFileobj as _GzipReadableFileobj, _WritableFileobj as _GzipWritableFileobj from types import TracebackType from typing import IO, ClassVar, Protocol, overload -from typing_extensions import Literal, Self +from typing_extensions import Literal, Self, TypeAlias __all__ = [ "TarFile", @@ -26,6 +26,21 @@ __all__ = [ "DEFAULT_FORMAT", "open", ] +if sys.version_info >= (3, 12): + __all__ += [ + "fully_trusted_filter", + "data_filter", + "tar_filter", + "FilterError", + "AbsoluteLinkError", + "OutsideDestinationError", + "SpecialFileError", + "AbsolutePathError", + "LinkOutsideDestinationError", + ] + +_FilterFunction: TypeAlias = Callable[[TarInfo, str], TarInfo | None] +_TarfileFilter: TypeAlias = Literal["fully_trusted", "tar", "data"] | _FilterFunction class _Fileobj(Protocol): def read(self, __size: int) -> bytes: ... @@ -125,6 +140,7 @@ class TarFile: debug: int | None errorlevel: int | None offset: int # undocumented + extraction_filter: _FilterFunction | None def __init__( self, name: StrOrBytesPath | None = None, @@ -275,12 +291,32 @@ class TarFile: def getnames(self) -> _list[str]: ... def list(self, verbose: bool = True, *, members: _list[TarInfo] | None = None) -> None: ... def next(self) -> TarInfo | None: ... - def extractall( - self, path: StrOrBytesPath = ".", members: Iterable[TarInfo] | None = None, *, numeric_owner: bool = False - ) -> None: ... - def extract( - self, member: str | TarInfo, path: StrOrBytesPath = "", set_attrs: bool = True, *, numeric_owner: bool = False - ) -> None: ... + if sys.version_info >= (3, 8): + def extractall( + self, + path: StrOrBytesPath = ".", + members: Iterable[TarInfo] | None = None, + *, + numeric_owner: bool = False, + filter: _TarfileFilter | None = ..., + ) -> None: ... + def extract( + self, + member: str | TarInfo, + path: StrOrBytesPath = "", + set_attrs: bool = True, + *, + numeric_owner: bool = False, + filter: _TarfileFilter | None = ..., + ) -> None: ... + else: + def extractall( + self, path: StrOrBytesPath = ".", members: Iterable[TarInfo] | None = None, *, numeric_owner: bool = False + ) -> None: ... + def extract( + self, member: str | TarInfo, path: StrOrBytesPath = "", set_attrs: bool = True, *, numeric_owner: bool = False + ) -> None: ... + def _extract_member( self, tarinfo: TarInfo, targetpath: str, set_attrs: bool = True, numeric_owner: bool = False ) -> None: ... # undocumented @@ -324,6 +360,31 @@ class StreamError(TarError): ... class ExtractError(TarError): ... class HeaderError(TarError): ... +if sys.version_info >= (3, 8): + class FilterError(TarError): + # This attribute is only set directly on the subclasses, but the documentation guarantees + # that it is always present on FilterError. + tarinfo: TarInfo + + class AbsolutePathError(FilterError): + def __init__(self, tarinfo: TarInfo) -> None: ... + + class OutsideDestinationError(FilterError): + def __init__(self, tarinfo: TarInfo, path: str) -> None: ... + + class SpecialFileError(FilterError): + def __init__(self, tarinfo: TarInfo) -> None: ... + + class AbsoluteLinkError(FilterError): + def __init__(self, tarinfo: TarInfo) -> None: ... + + class LinkOutsideDestinationError(FilterError): + def __init__(self, tarinfo: TarInfo, path: str) -> None: ... + + def fully_trusted_filter(member: TarInfo, dest_path: str) -> TarInfo: ... + def tar_filter(member: TarInfo, dest_path: str) -> TarInfo: ... + def data_filter(member: TarInfo, dest_path: str) -> TarInfo: ... + class TarInfo: name: str path: str @@ -353,6 +414,21 @@ class TarInfo: def linkpath(self) -> str: ... @linkpath.setter def linkpath(self, linkname: str) -> None: ... + if sys.version_info >= (3, 8): + def replace( + self, + *, + name: str = ..., + mtime: int = ..., + mode: int = ..., + linkname: str = ..., + uid: int = ..., + gid: int = ..., + uname: str = ..., + gname: str = ..., + deep: bool = True, + ) -> Self: ... + def get_info(self) -> Mapping[str, str | int | bytes | Mapping[str, str]]: ... if sys.version_info >= (3, 8): def tobuf(self, format: int | None = 2, encoding: str | None = "utf-8", errors: str = "surrogateescape") -> bytes: ... diff --git a/mypy/typeshed/stdlib/tempfile.pyi b/mypy/typeshed/stdlib/tempfile.pyi index cd27e91fbc75..ea04303683b5 100644 --- a/mypy/typeshed/stdlib/tempfile.pyi +++ b/mypy/typeshed/stdlib/tempfile.pyi @@ -1,10 +1,21 @@ import io import sys -from _typeshed import BytesPath, GenericPath, ReadableBuffer, StrPath, WriteableBuffer +from _typeshed import ( + BytesPath, + GenericPath, + OpenBinaryMode, + OpenBinaryModeReading, + OpenBinaryModeUpdating, + OpenBinaryModeWriting, + OpenTextMode, + ReadableBuffer, + StrPath, + WriteableBuffer, +) from collections.abc import Iterable, Iterator from types import TracebackType from typing import IO, Any, AnyStr, Generic, overload -from typing_extensions import Literal, Self, TypeAlias +from typing_extensions import Literal, Self if sys.version_info >= (3, 9): from types import GenericAlias @@ -30,13 +41,54 @@ TMP_MAX: int tempdir: str | None template: str -_StrMode: TypeAlias = Literal["r", "w", "a", "x", "r+", "w+", "a+", "x+", "rt", "wt", "at", "xt", "r+t", "w+t", "a+t", "x+t"] -_BytesMode: TypeAlias = Literal["rb", "wb", "ab", "xb", "r+b", "w+b", "a+b", "x+b"] +if sys.version_info >= (3, 12): + @overload + def NamedTemporaryFile( + mode: OpenTextMode, + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + delete: bool = True, + *, + errors: str | None = None, + delete_on_close: bool = True, + ) -> _TemporaryFileWrapper[str]: ... + @overload + def NamedTemporaryFile( + mode: OpenBinaryMode = "w+b", + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + delete: bool = True, + *, + errors: str | None = None, + delete_on_close: bool = True, + ) -> _TemporaryFileWrapper[bytes]: ... + @overload + def NamedTemporaryFile( + mode: str = "w+b", + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + delete: bool = True, + *, + errors: str | None = None, + delete_on_close: bool = True, + ) -> _TemporaryFileWrapper[Any]: ... -if sys.version_info >= (3, 8): +elif sys.version_info >= (3, 8): @overload def NamedTemporaryFile( - mode: _StrMode, + mode: OpenTextMode, buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -49,7 +101,7 @@ if sys.version_info >= (3, 8): ) -> _TemporaryFileWrapper[str]: ... @overload def NamedTemporaryFile( - mode: _BytesMode = "w+b", + mode: OpenBinaryMode = "w+b", buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -77,7 +129,7 @@ if sys.version_info >= (3, 8): else: @overload def NamedTemporaryFile( - mode: _StrMode, + mode: OpenTextMode, buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -88,7 +140,7 @@ else: ) -> _TemporaryFileWrapper[str]: ... @overload def NamedTemporaryFile( - mode: _BytesMode = "w+b", + mode: OpenBinaryMode = "w+b", buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -112,10 +164,11 @@ else: if sys.platform == "win32": TemporaryFile = NamedTemporaryFile else: + # See the comments for builtins.open() for an explanation of the overloads. if sys.version_info >= (3, 8): @overload def TemporaryFile( - mode: _StrMode, + mode: OpenTextMode, buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -124,11 +177,11 @@ else: dir: GenericPath[AnyStr] | None = None, *, errors: str | None = None, - ) -> IO[str]: ... + ) -> io.TextIOWrapper: ... @overload def TemporaryFile( - mode: _BytesMode = "w+b", - buffering: int = -1, + mode: OpenBinaryMode, + buffering: Literal[0], encoding: str | None = None, newline: str | None = None, suffix: AnyStr | None = None, @@ -136,7 +189,54 @@ else: dir: GenericPath[AnyStr] | None = None, *, errors: str | None = None, - ) -> IO[bytes]: ... + ) -> io.FileIO: ... + @overload + def TemporaryFile( + *, + buffering: Literal[0], + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + errors: str | None = None, + ) -> io.FileIO: ... + @overload + def TemporaryFile( + mode: OpenBinaryModeWriting, + buffering: Literal[-1, 1] = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + *, + errors: str | None = None, + ) -> io.BufferedWriter: ... + @overload + def TemporaryFile( + mode: OpenBinaryModeReading, + buffering: Literal[-1, 1] = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + *, + errors: str | None = None, + ) -> io.BufferedReader: ... + @overload + def TemporaryFile( + mode: OpenBinaryModeUpdating = "w+b", + buffering: Literal[-1, 1] = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + *, + errors: str | None = None, + ) -> io.BufferedRandom: ... @overload def TemporaryFile( mode: str = "w+b", @@ -152,40 +252,84 @@ else: else: @overload def TemporaryFile( - mode: _StrMode, - buffering: int = ..., - encoding: str | None = ..., - newline: str | None = ..., - suffix: AnyStr | None = ..., - prefix: AnyStr | None = ..., - dir: GenericPath[AnyStr] | None = ..., - ) -> IO[str]: ... + mode: OpenTextMode, + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + ) -> io.TextIOWrapper: ... + @overload + def TemporaryFile( + mode: OpenBinaryMode, + buffering: Literal[0], + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + ) -> io.FileIO: ... + @overload + def TemporaryFile( + *, + buffering: Literal[0], + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + ) -> io.FileIO: ... + @overload + def TemporaryFile( + mode: OpenBinaryModeUpdating = "w+b", + buffering: Literal[-1, 1] = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + ) -> io.BufferedRandom: ... + @overload + def TemporaryFile( + mode: OpenBinaryModeWriting, + buffering: Literal[-1, 1] = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + ) -> io.BufferedWriter: ... @overload def TemporaryFile( - mode: _BytesMode = ..., - buffering: int = ..., - encoding: str | None = ..., - newline: str | None = ..., - suffix: AnyStr | None = ..., - prefix: AnyStr | None = ..., - dir: GenericPath[AnyStr] | None = ..., - ) -> IO[bytes]: ... + mode: OpenBinaryModeReading, + buffering: Literal[-1, 1] = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + ) -> io.BufferedReader: ... @overload def TemporaryFile( - mode: str = ..., - buffering: int = ..., - encoding: str | None = ..., - newline: str | None = ..., - suffix: AnyStr | None = ..., - prefix: AnyStr | None = ..., - dir: GenericPath[AnyStr] | None = ..., + mode: str = "w+b", + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, ) -> IO[Any]: ... class _TemporaryFileWrapper(Generic[AnyStr], IO[AnyStr]): file: IO[AnyStr] # io.TextIOWrapper, io.BufferedReader or io.BufferedWriter name: str delete: bool - def __init__(self, file: IO[AnyStr], name: str, delete: bool = True) -> None: ... + if sys.version_info >= (3, 12): + def __init__(self, file: IO[AnyStr], name: str, delete: bool = True, delete_on_close: bool = True) -> None: ... + else: + def __init__(self, file: IO[AnyStr], name: str, delete: bool = True) -> None: ... + def __enter__(self) -> Self: ... def __exit__(self, exc: type[BaseException] | None, value: BaseException | None, tb: TracebackType | None) -> None: ... def __getattr__(self, name: str) -> Any: ... @@ -236,6 +380,7 @@ else: # It does not actually derive from IO[AnyStr], but it does mostly behave # like one. class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): + _file: IO[AnyStr] @property def encoding(self) -> str: ... # undocumented @property @@ -246,7 +391,7 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): def __init__( self: SpooledTemporaryFile[bytes], max_size: int = 0, - mode: _BytesMode = "w+b", + mode: OpenBinaryMode = "w+b", buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -260,7 +405,7 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): def __init__( self: SpooledTemporaryFile[str], max_size: int, - mode: _StrMode, + mode: OpenTextMode, buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -275,7 +420,7 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): self: SpooledTemporaryFile[str], max_size: int = 0, *, - mode: _StrMode, + mode: OpenTextMode, buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -319,7 +464,7 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): def __init__( self: SpooledTemporaryFile[bytes], max_size: int = 0, - mode: _BytesMode = "w+b", + mode: OpenBinaryMode = "w+b", buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -331,7 +476,7 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): def __init__( self: SpooledTemporaryFile[str], max_size: int, - mode: _StrMode, + mode: OpenTextMode, buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -344,7 +489,7 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): self: SpooledTemporaryFile[str], max_size: int = 0, *, - mode: _StrMode, + mode: OpenTextMode, buffering: int = -1, encoding: str | None = None, newline: str | None = None, @@ -425,7 +570,28 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): class TemporaryDirectory(Generic[AnyStr]): name: AnyStr - if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + @overload + def __init__( + self: TemporaryDirectory[str], + suffix: str | None = None, + prefix: str | None = None, + dir: StrPath | None = None, + ignore_cleanup_errors: bool = False, + *, + delete: bool = True, + ) -> None: ... + @overload + def __init__( + self: TemporaryDirectory[bytes], + suffix: bytes | None = None, + prefix: bytes | None = None, + dir: BytesPath | None = None, + ignore_cleanup_errors: bool = False, + *, + delete: bool = True, + ) -> None: ... + elif sys.version_info >= (3, 10): @overload def __init__( self: TemporaryDirectory[str], diff --git a/mypy/typeshed/stdlib/threading.pyi b/mypy/typeshed/stdlib/threading.pyi index 6275e4552630..badd09cae051 100644 --- a/mypy/typeshed/stdlib/threading.pyi +++ b/mypy/typeshed/stdlib/threading.pyi @@ -37,6 +37,9 @@ if sys.version_info >= (3, 8): if sys.version_info >= (3, 10): __all__ += ["getprofile", "gettrace"] +if sys.version_info >= (3, 12): + __all__ += ["setprofile_all_threads", "settrace_all_threads"] + _profile_hook: ProfileFunction | None def active_count() -> int: ... @@ -53,6 +56,10 @@ if sys.version_info >= (3, 8): def settrace(func: TraceFunction) -> None: ... def setprofile(func: ProfileFunction | None) -> None: ... +if sys.version_info >= (3, 12): + def setprofile_all_threads(func: ProfileFunction | None) -> None: ... + def settrace_all_threads(func: TraceFunction) -> None: ... + if sys.version_info >= (3, 10): def gettrace() -> TraceFunction | None: ... def getprofile() -> ProfileFunction | None: ... diff --git a/mypy/typeshed/stdlib/tkinter/__init__.pyi b/mypy/typeshed/stdlib/tkinter/__init__.pyi index 3291b0c9dd98..a03c48c039dd 100644 --- a/mypy/typeshed/stdlib/tkinter/__init__.pyi +++ b/mypy/typeshed/stdlib/tkinter/__init__.pyi @@ -500,7 +500,7 @@ class Misc: bbox = grid_bbox def grid_columnconfigure( self, - index: _GridIndex, + index: _GridIndex | list[int] | tuple[int, ...], cnf: _GridIndexInfo = {}, *, minsize: _ScreenUnits = ..., @@ -510,7 +510,7 @@ class Misc: ) -> _GridIndexInfo | Any: ... # can be None but annoying to check def grid_rowconfigure( self, - index: _GridIndex, + index: _GridIndex | list[int] | tuple[int, ...], cnf: _GridIndexInfo = {}, *, minsize: _ScreenUnits = ..., @@ -1633,6 +1633,7 @@ class Canvas(Widget, XView, YView): activefill: str = ..., activestipple: str = ..., anchor: _Anchor = ..., + angle: float | str = ..., disabledfill: str = ..., disabledstipple: str = ..., fill: str = ..., @@ -1653,6 +1654,7 @@ class Canvas(Widget, XView, YView): activefill: str = ..., activestipple: str = ..., anchor: _Anchor = ..., + angle: float | str = ..., disabledfill: str = ..., disabledstipple: str = ..., fill: str = ..., diff --git a/mypy/typeshed/stdlib/tkinter/ttk.pyi b/mypy/typeshed/stdlib/tkinter/ttk.pyi index 009fdf51a440..bb416717a378 100644 --- a/mypy/typeshed/stdlib/tkinter/ttk.pyi +++ b/mypy/typeshed/stdlib/tkinter/ttk.pyi @@ -953,17 +953,15 @@ class _TreeviewColumnDict(TypedDict): anchor: tkinter._Anchor id: str -_TreeviewColumnId: TypeAlias = int | str # manual page: "COLUMN IDENTIFIERS" - class Treeview(Widget, tkinter.XView, tkinter.YView): def __init__( self, master: tkinter.Misc | None = None, *, class_: str = ..., - columns: str | list[str] | tuple[str, ...] = ..., + columns: str | list[str] | list[int] | list[str | int] | tuple[str | int, ...] = ..., cursor: tkinter._Cursor = ..., - displaycolumns: str | list[str] | tuple[str, ...] | list[int] | tuple[int, ...] = ..., + displaycolumns: str | int | list[str] | tuple[str, ...] | list[int] | tuple[int, ...] = ..., height: int = ..., name: str = ..., padding: _Padding = ..., @@ -983,9 +981,9 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): self, cnf: dict[str, Any] | None = None, *, - columns: str | list[str] | tuple[str, ...] = ..., + columns: str | list[str] | list[int] | list[str | int] | tuple[str | int, ...] = ..., cursor: tkinter._Cursor = ..., - displaycolumns: str | list[str] | tuple[str, ...] | list[int] | tuple[int, ...] = ..., + displaycolumns: str | int | list[str] | tuple[str, ...] | list[int] | tuple[int, ...] = ..., height: int = ..., padding: _Padding = ..., selectmode: Literal["extended", "browse", "none"] = ..., @@ -998,23 +996,23 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): @overload def configure(self, cnf: str) -> tuple[str, str, str, Any, Any]: ... config = configure - def bbox(self, item, column: _TreeviewColumnId | None = None) -> tuple[int, int, int, int] | Literal[""]: ... # type: ignore[override] - def get_children(self, item: str | None = None) -> tuple[str, ...]: ... - def set_children(self, item: str, *newchildren: str) -> None: ... + def bbox(self, item: str | int, column: str | int | None = None) -> tuple[int, int, int, int] | Literal[""]: ... # type: ignore[override] + def get_children(self, item: str | int | None = None) -> tuple[str, ...]: ... + def set_children(self, item: str | int, *newchildren: str | int) -> None: ... @overload - def column(self, column: _TreeviewColumnId, option: Literal["width", "minwidth"]) -> int: ... + def column(self, column: str | int, option: Literal["width", "minwidth"]) -> int: ... @overload - def column(self, column: _TreeviewColumnId, option: Literal["stretch"]) -> bool: ... # actually 0 or 1 + def column(self, column: str | int, option: Literal["stretch"]) -> bool: ... # actually 0 or 1 @overload - def column(self, column: _TreeviewColumnId, option: Literal["anchor"]) -> _tkinter.Tcl_Obj: ... + def column(self, column: str | int, option: Literal["anchor"]) -> _tkinter.Tcl_Obj: ... @overload - def column(self, column: _TreeviewColumnId, option: Literal["id"]) -> str: ... + def column(self, column: str | int, option: Literal["id"]) -> str: ... @overload - def column(self, column: _TreeviewColumnId, option: str) -> Any: ... + def column(self, column: str | int, option: str) -> Any: ... @overload def column( self, - column: _TreeviewColumnId, + column: str | int, option: None = None, *, width: int = ..., @@ -1023,29 +1021,29 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): anchor: tkinter._Anchor = ..., # id is read-only ) -> _TreeviewColumnDict | None: ... - def delete(self, *items: str) -> None: ... - def detach(self, *items: str) -> None: ... - def exists(self, item: str) -> bool: ... + def delete(self, *items: str | int) -> None: ... + def detach(self, *items: str | int) -> None: ... + def exists(self, item: str | int) -> bool: ... @overload # type: ignore[override] def focus(self, item: None = None) -> str: ... # can return empty string @overload - def focus(self, item: str) -> Literal[""]: ... + def focus(self, item: str | int) -> Literal[""]: ... @overload - def heading(self, column: _TreeviewColumnId, option: Literal["text"]) -> str: ... + def heading(self, column: str | int, option: Literal["text"]) -> str: ... @overload - def heading(self, column: _TreeviewColumnId, option: Literal["image"]) -> tuple[str] | str: ... + def heading(self, column: str | int, option: Literal["image"]) -> tuple[str] | str: ... @overload - def heading(self, column: _TreeviewColumnId, option: Literal["anchor"]) -> _tkinter.Tcl_Obj: ... + def heading(self, column: str | int, option: Literal["anchor"]) -> _tkinter.Tcl_Obj: ... @overload - def heading(self, column: _TreeviewColumnId, option: Literal["command"]) -> str: ... + def heading(self, column: str | int, option: Literal["command"]) -> str: ... @overload - def heading(self, column: _TreeviewColumnId, option: str) -> Any: ... + def heading(self, column: str | int, option: str) -> Any: ... @overload - def heading(self, column: _TreeviewColumnId, option: None = None) -> _TreeviewHeaderDict: ... # type: ignore[misc] + def heading(self, column: str | int, option: None = None) -> _TreeviewHeaderDict: ... # type: ignore[misc] @overload def heading( self, - column: _TreeviewColumnId, + column: str | int, option: None = None, *, text: str = ..., @@ -1058,14 +1056,14 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): def identify_column(self, x: int) -> str: ... def identify_region(self, x: int, y: int) -> Literal["heading", "separator", "tree", "cell", "nothing"]: ... def identify_element(self, x: int, y: int) -> str: ... # don't know what possible return values are - def index(self, item: str) -> int: ... + def index(self, item: str | int) -> int: ... def insert( self, parent: str, index: int | Literal["end"], - iid: str | None = None, + iid: str | int | None = None, *, - id: str = ..., # same as iid + id: str | int = ..., # same as iid text: str = ..., image: tkinter._ImageSpec = ..., values: list[Any] | tuple[Any, ...] = ..., @@ -1073,23 +1071,23 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): tags: str | list[str] | tuple[str, ...] = ..., ) -> str: ... @overload - def item(self, item: str, option: Literal["text"]) -> str: ... + def item(self, item: str | int, option: Literal["text"]) -> str: ... @overload - def item(self, item: str, option: Literal["image"]) -> tuple[str] | Literal[""]: ... + def item(self, item: str | int, option: Literal["image"]) -> tuple[str] | Literal[""]: ... @overload - def item(self, item: str, option: Literal["values"]) -> tuple[Any, ...] | Literal[""]: ... + def item(self, item: str | int, option: Literal["values"]) -> tuple[Any, ...] | Literal[""]: ... @overload - def item(self, item: str, option: Literal["open"]) -> bool: ... # actually 0 or 1 + def item(self, item: str | int, option: Literal["open"]) -> bool: ... # actually 0 or 1 @overload - def item(self, item: str, option: Literal["tags"]) -> tuple[str, ...] | Literal[""]: ... + def item(self, item: str | int, option: Literal["tags"]) -> tuple[str, ...] | Literal[""]: ... @overload - def item(self, item: str, option: str) -> Any: ... + def item(self, item: str | int, option: str) -> Any: ... @overload - def item(self, item: str, option: None = None) -> _TreeviewItemDict: ... # type: ignore[misc] + def item(self, item: str | int, option: None = None) -> _TreeviewItemDict: ... # type: ignore[misc] @overload def item( self, - item: str, + item: str | int, option: None = None, *, text: str = ..., @@ -1098,27 +1096,39 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): open: bool = ..., tags: str | list[str] | tuple[str, ...] = ..., ) -> None: ... - def move(self, item: str, parent: str, index: int) -> None: ... + def move(self, item: str | int, parent: str, index: int) -> None: ... reattach = move - def next(self, item: str) -> str: ... # returning empty string means last item - def parent(self, item: str) -> str: ... - def prev(self, item: str) -> str: ... # returning empty string means first item - def see(self, item: str) -> None: ... + def next(self, item: str | int) -> str: ... # returning empty string means last item + def parent(self, item: str | int) -> str: ... + def prev(self, item: str | int) -> str: ... # returning empty string means first item + def see(self, item: str | int) -> None: ... if sys.version_info >= (3, 8): def selection(self) -> tuple[str, ...]: ... else: def selection(self, selop: Incomplete | None = ..., items: Incomplete | None = None) -> tuple[str, ...]: ... - def selection_set(self, items: str | list[str] | tuple[str, ...]) -> None: ... - def selection_add(self, items: str | list[str] | tuple[str, ...]) -> None: ... - def selection_remove(self, items: str | list[str] | tuple[str, ...]) -> None: ... - def selection_toggle(self, items: str | list[str] | tuple[str, ...]) -> None: ... @overload - def set(self, item: str, column: None = None, value: None = None) -> dict[str, Any]: ... + def selection_set(self, __items: list[str] | tuple[str, ...] | list[int] | tuple[int, ...]) -> None: ... + @overload + def selection_set(self, *items: str | int) -> None: ... + @overload + def selection_add(self, __items: list[str] | tuple[str, ...] | list[int] | tuple[int, ...]) -> None: ... + @overload + def selection_add(self, *items: str | int) -> None: ... + @overload + def selection_remove(self, __items: list[str] | tuple[str, ...] | list[int] | tuple[int, ...]) -> None: ... + @overload + def selection_remove(self, *items: str | int) -> None: ... + @overload + def selection_toggle(self, __items: list[str] | tuple[str, ...] | list[int] | tuple[int, ...]) -> None: ... + @overload + def selection_toggle(self, *items: str | int) -> None: ... + @overload + def set(self, item: str | int, column: None = None, value: None = None) -> dict[str, Any]: ... @overload - def set(self, item: str, column: _TreeviewColumnId, value: None = None) -> Any: ... + def set(self, item: str | int, column: str | int, value: None = None) -> Any: ... @overload - def set(self, item: str, column: _TreeviewColumnId, value: Any) -> Literal[""]: ... + def set(self, item: str | int, column: str | int, value: Any) -> Literal[""]: ... # There's no tag_unbind() or 'add' argument for whatever reason. # Also, it's 'callback' instead of 'func' here. @overload @@ -1150,7 +1160,7 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): @overload def tag_has(self, tagname: str, item: None = None) -> tuple[str, ...]: ... @overload - def tag_has(self, tagname: str, item: str) -> bool: ... + def tag_has(self, tagname: str, item: str | int) -> bool: ... class LabeledScale(Frame): label: Incomplete diff --git a/mypy/typeshed/stdlib/token.pyi b/mypy/typeshed/stdlib/token.pyi index fcd6ef87d217..85867a2b9744 100644 --- a/mypy/typeshed/stdlib/token.pyi +++ b/mypy/typeshed/stdlib/token.pyi @@ -73,6 +73,9 @@ if sys.version_info >= (3, 8): if sys.version_info >= (3, 10): __all__ += ["SOFT_KEYWORD"] +if sys.version_info >= (3, 12): + __all__ += ["EXCLAMATION", "FSTRING_END", "FSTRING_MIDDLE", "FSTRING_START"] + ENDMARKER: int NAME: int NUMBER: int @@ -145,6 +148,12 @@ if sys.version_info >= (3, 8): if sys.version_info >= (3, 10): SOFT_KEYWORD: int +if sys.version_info >= (3, 12): + EXCLAMATION: int + FSTRING_END: int + FSTRING_MIDDLE: int + FSTRING_START: int + def ISTERMINAL(x: int) -> bool: ... def ISNONTERMINAL(x: int) -> bool: ... def ISEOF(x: int) -> bool: ... diff --git a/mypy/typeshed/stdlib/tokenize.pyi b/mypy/typeshed/stdlib/tokenize.pyi index ba57402fb845..0028ed034ae6 100644 --- a/mypy/typeshed/stdlib/tokenize.pyi +++ b/mypy/typeshed/stdlib/tokenize.pyi @@ -83,6 +83,9 @@ if sys.version_info >= (3, 8): if sys.version_info >= (3, 10): __all__ += ["SOFT_KEYWORD"] +if sys.version_info >= (3, 12): + __all__ += ["EXCLAMATION", "FSTRING_END", "FSTRING_MIDDLE", "FSTRING_START"] + if sys.version_info >= (3, 8): from token import EXACT_TOKEN_TYPES as EXACT_TOKEN_TYPES else: diff --git a/mypy/typeshed/stdlib/traceback.pyi b/mypy/typeshed/stdlib/traceback.pyi index a6d6d3e168b3..47449dfe8143 100644 --- a/mypy/typeshed/stdlib/traceback.pyi +++ b/mypy/typeshed/stdlib/traceback.pyi @@ -1,5 +1,5 @@ import sys -from _typeshed import SupportsWrite +from _typeshed import SupportsWrite, Unused from collections.abc import Generator, Iterable, Iterator, Mapping from types import FrameType, TracebackType from typing import Any, overload @@ -84,7 +84,10 @@ def format_list(extracted_list: list[FrameSummary]) -> list[str]: ... def print_list(extracted_list: list[FrameSummary], file: SupportsWrite[str] | None = None) -> None: ... if sys.version_info >= (3, 10): - def format_exception_only(__exc: type[BaseException] | None, value: BaseException | None = ...) -> list[str]: ... + @overload + def format_exception_only(__exc: BaseException | None) -> list[str]: ... + @overload + def format_exception_only(__exc: Unused, value: BaseException | None) -> list[str]: ... else: def format_exception_only(etype: type[BaseException] | None, value: BaseException | None) -> list[str]: ... diff --git a/mypy/typeshed/stdlib/tracemalloc.pyi b/mypy/typeshed/stdlib/tracemalloc.pyi index 3dc8b8603fe5..6448a16ce11a 100644 --- a/mypy/typeshed/stdlib/tracemalloc.pyi +++ b/mypy/typeshed/stdlib/tracemalloc.pyi @@ -37,6 +37,7 @@ class Statistic: traceback: Traceback def __init__(self, traceback: Traceback, size: int, count: int) -> None: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... class StatisticDiff: count: int @@ -46,6 +47,7 @@ class StatisticDiff: traceback: Traceback def __init__(self, traceback: Traceback, size: int, size_diff: int, count: int, count_diff: int) -> None: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... _FrameTuple: TypeAlias = tuple[str, int] @@ -56,6 +58,7 @@ class Frame: def lineno(self) -> int: ... def __init__(self, frame: _FrameTuple) -> None: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... def __lt__(self, other: Frame) -> bool: ... if sys.version_info >= (3, 11): def __gt__(self, other: Frame) -> bool: ... @@ -80,6 +83,7 @@ class Trace: def traceback(self) -> Traceback: ... def __init__(self, trace: _TraceTuple) -> None: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... class Traceback(Sequence[Frame]): if sys.version_info >= (3, 9): @@ -97,6 +101,7 @@ class Traceback(Sequence[Frame]): def __contains__(self, frame: Frame) -> bool: ... # type: ignore[override] def __len__(self) -> int: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... def __lt__(self, other: Traceback) -> bool: ... if sys.version_info >= (3, 11): def __gt__(self, other: Traceback) -> bool: ... diff --git a/mypy/typeshed/stdlib/turtle.pyi b/mypy/typeshed/stdlib/turtle.pyi index 5df3e4b90cb5..80ea40879dee 100644 --- a/mypy/typeshed/stdlib/turtle.pyi +++ b/mypy/typeshed/stdlib/turtle.pyi @@ -1,3 +1,4 @@ +import sys from collections.abc import Callable, Sequence from tkinter import Canvas, Frame, Misc, PhotoImage, Scrollbar from typing import Any, ClassVar, overload @@ -249,6 +250,9 @@ class TNavigator: def reset(self) -> None: ... def degrees(self, fullcircle: float = 360.0) -> None: ... def radians(self) -> None: ... + if sys.version_info >= (3, 12): + def teleport(self, x: float | None = None, y: float | None = None, *, fill_gap: bool = False) -> None: ... + def forward(self, distance: float) -> None: ... def back(self, distance: float) -> None: ... def right(self, angle: float) -> None: ... @@ -321,6 +325,9 @@ class TPen: def color(self, r: float, g: float, b: float) -> None: ... @overload def color(self, color1: _Color, color2: _Color) -> None: ... + if sys.version_info >= (3, 12): + def teleport(self, x: float | None = None, y: float | None = None, *, fill_gap: bool = False) -> None: ... + def showturtle(self) -> None: ... def hideturtle(self) -> None: ... def isvisible(self) -> bool: ... diff --git a/mypy/typeshed/stdlib/types.pyi b/mypy/typeshed/stdlib/types.pyi index 43475d91279d..2f4bd1a88047 100644 --- a/mypy/typeshed/stdlib/types.pyi +++ b/mypy/typeshed/stdlib/types.pyi @@ -17,7 +17,7 @@ from importlib.machinery import ModuleSpec # pytype crashes if types.MappingProxyType inherits from collections.abc.Mapping instead of typing.Mapping from typing import Any, ClassVar, Generic, Mapping, Protocol, TypeVar, overload # noqa: Y022 -from typing_extensions import Literal, ParamSpec, final +from typing_extensions import Literal, ParamSpec, Self, TypeVarTuple, final __all__ = [ "FunctionType", @@ -63,17 +63,15 @@ if sys.version_info >= (3, 12): _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) _KT = TypeVar("_KT") _VT_co = TypeVar("_VT_co", covariant=True) -_V_co = TypeVar("_V_co", covariant=True) @final class _Cell: if sys.version_info >= (3, 8): def __init__(self, __contents: object = ...) -> None: ... + def __eq__(self, __value: object) -> bool: ... __hash__: ClassVar[None] # type: ignore[assignment] cell_contents: Any @@ -94,6 +92,8 @@ class FunctionType: if sys.version_info >= (3, 10): @property def __builtins__(self) -> dict[str, Any]: ... + if sys.version_info >= (3, 12): + __type_params__: tuple[TypeVar | ParamSpec | TypeVarTuple, ...] __module__: str def __init__( @@ -114,6 +114,8 @@ LambdaType = FunctionType @final class CodeType: + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... @property def co_argcount(self) -> int: ... if sys.version_info >= (3, 8): @@ -327,6 +329,7 @@ class MappingProxyType(Mapping[_KT, _VT_co], Generic[_KT, _VT_co]): class SimpleNamespace: __hash__: ClassVar[None] # type: ignore[assignment] def __init__(self, **kwargs: Any) -> None: ... + def __eq__(self, __value: object) -> bool: ... def __getattribute__(self, __name: str) -> Any: ... def __setattr__(self, __name: str, __value: Any) -> None: ... def __delattr__(self, __name: str) -> None: ... @@ -349,46 +352,54 @@ class ModuleType: # using `builtins.__import__` or `importlib.import_module` less painful def __getattr__(self, name: str) -> Any: ... +_YieldT_co = TypeVar("_YieldT_co", covariant=True) +_SendT_contra = TypeVar("_SendT_contra", contravariant=True) +_ReturnT_co = TypeVar("_ReturnT_co", covariant=True) + @final -class GeneratorType(Generator[_T_co, _T_contra, _V_co]): +class GeneratorType(Generator[_YieldT_co, _SendT_contra, _ReturnT_co]): @property - def gi_yieldfrom(self) -> GeneratorType[_T_co, _T_contra, Any] | None: ... + def gi_yieldfrom(self) -> GeneratorType[_YieldT_co, _SendT_contra, Any] | None: ... if sys.version_info >= (3, 11): @property def gi_suspended(self) -> bool: ... __name__: str __qualname__: str - def __iter__(self) -> GeneratorType[_T_co, _T_contra, _V_co]: ... - def __next__(self) -> _T_co: ... - def send(self, __arg: _T_contra) -> _T_co: ... + def __iter__(self) -> Self: ... + def __next__(self) -> _YieldT_co: ... + def send(self, __arg: _SendT_contra) -> _YieldT_co: ... @overload def throw( self, __typ: type[BaseException], __val: BaseException | object = ..., __tb: TracebackType | None = ... - ) -> _T_co: ... + ) -> _YieldT_co: ... @overload - def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = ...) -> _T_co: ... + def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = ...) -> _YieldT_co: ... @final -class AsyncGeneratorType(AsyncGenerator[_T_co, _T_contra]): +class AsyncGeneratorType(AsyncGenerator[_YieldT_co, _SendT_contra]): @property def ag_await(self) -> Awaitable[Any] | None: ... __name__: str __qualname__: str - def __aiter__(self) -> AsyncGeneratorType[_T_co, _T_contra]: ... - def __anext__(self) -> Coroutine[Any, Any, _T_co]: ... - def asend(self, __val: _T_contra) -> Coroutine[Any, Any, _T_co]: ... + if sys.version_info >= (3, 12): + @property + def ag_suspended(self) -> bool: ... + + def __aiter__(self) -> Self: ... + def __anext__(self) -> Coroutine[Any, Any, _YieldT_co]: ... + def asend(self, __val: _SendT_contra) -> Coroutine[Any, Any, _YieldT_co]: ... @overload async def athrow( self, __typ: type[BaseException], __val: BaseException | object = ..., __tb: TracebackType | None = ... - ) -> _T_co: ... + ) -> _YieldT_co: ... @overload - async def athrow(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = ...) -> _T_co: ... + async def athrow(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = ...) -> _YieldT_co: ... def aclose(self) -> Coroutine[Any, Any, None]: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @final -class CoroutineType(Coroutine[_T_co, _T_contra, _V_co]): +class CoroutineType(Coroutine[_YieldT_co, _SendT_contra, _ReturnT_co]): __name__: str __qualname__: str @property @@ -398,14 +409,14 @@ class CoroutineType(Coroutine[_T_co, _T_contra, _V_co]): def cr_suspended(self) -> bool: ... def close(self) -> None: ... - def __await__(self) -> Generator[Any, None, _V_co]: ... - def send(self, __arg: _T_contra) -> _T_co: ... + def __await__(self) -> Generator[Any, None, _ReturnT_co]: ... + def send(self, __arg: _SendT_contra) -> _YieldT_co: ... @overload def throw( self, __typ: type[BaseException], __val: BaseException | object = ..., __tb: TracebackType | None = ... - ) -> _T_co: ... + ) -> _YieldT_co: ... @overload - def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = ...) -> _T_co: ... + def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = ...) -> _YieldT_co: ... class _StaticFunctionType: # Fictional type to correct the type of MethodType.__func__. @@ -435,6 +446,8 @@ class MethodType: def __qualname__(self) -> str: ... # inherited from the added function def __init__(self, __func: Callable[..., Any], __obj: object) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... @final class BuiltinFunctionType: @@ -445,6 +458,8 @@ class BuiltinFunctionType: @property def __qualname__(self) -> str: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... BuiltinMethodType = BuiltinFunctionType @@ -472,6 +487,7 @@ class MethodWrapperType: def __call__(self, *args: Any, **kwargs: Any) -> Any: ... def __eq__(self, __value: object) -> bool: ... def __ne__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... @final class MethodDescriptorType: @@ -596,6 +612,8 @@ if sys.version_info >= (3, 9): def __parameters__(self) -> tuple[Any, ...]: ... def __init__(self, origin: type, args: Any) -> None: ... def __getitem__(self, __typeargs: Any) -> GenericAlias: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... if sys.version_info >= (3, 11): @property def __unpacked__(self) -> bool: ... @@ -619,3 +637,5 @@ if sys.version_info >= (3, 10): def __args__(self) -> tuple[Any, ...]: ... def __or__(self, __value: Any) -> UnionType: ... def __ror__(self, __value: Any) -> UnionType: ... + def __eq__(self, __value: object) -> bool: ... + def __hash__(self) -> int: ... diff --git a/mypy/typeshed/stdlib/typing.pyi b/mypy/typeshed/stdlib/typing.pyi index db042dc440ae..a9bffdf5214f 100644 --- a/mypy/typeshed/stdlib/typing.pyi +++ b/mypy/typeshed/stdlib/typing.pyi @@ -226,12 +226,14 @@ if sys.version_info >= (3, 10): @property def __origin__(self) -> ParamSpec: ... def __init__(self, origin: ParamSpec) -> None: ... + def __eq__(self, other: object) -> bool: ... @_final class ParamSpecKwargs: @property def __origin__(self) -> ParamSpec: ... def __init__(self, origin: ParamSpec) -> None: ... + def __eq__(self, other: object) -> bool: ... @_final class ParamSpec: @@ -289,10 +291,8 @@ _S = TypeVar("_S") _KT = TypeVar("_KT") # Key type. _VT = TypeVar("_VT") # Value type. _T_co = TypeVar("_T_co", covariant=True) # Any type covariant containers. -_V_co = TypeVar("_V_co", covariant=True) # Any type covariant containers. _KT_co = TypeVar("_KT_co", covariant=True) # Key type covariant containers. _VT_co = TypeVar("_VT_co", covariant=True) # Value type covariant containers. -_T_contra = TypeVar("_T_contra", contravariant=True) # Ditto contravariant. _TC = TypeVar("_TC", bound=Type[object]) def no_type_check(arg: _F) -> _F: ... @@ -397,20 +397,24 @@ class Reversible(Iterable[_T_co], Protocol[_T_co]): @abstractmethod def __reversed__(self) -> Iterator[_T_co]: ... -class Generator(Iterator[_T_co], Generic[_T_co, _T_contra, _V_co]): - def __next__(self) -> _T_co: ... +_YieldT_co = TypeVar("_YieldT_co", covariant=True) +_SendT_contra = TypeVar("_SendT_contra", contravariant=True) +_ReturnT_co = TypeVar("_ReturnT_co", covariant=True) + +class Generator(Iterator[_YieldT_co], Generic[_YieldT_co, _SendT_contra, _ReturnT_co]): + def __next__(self) -> _YieldT_co: ... @abstractmethod - def send(self, __value: _T_contra) -> _T_co: ... + def send(self, __value: _SendT_contra) -> _YieldT_co: ... @overload @abstractmethod def throw( self, __typ: Type[BaseException], __val: BaseException | object = None, __tb: TracebackType | None = None - ) -> _T_co: ... + ) -> _YieldT_co: ... @overload @abstractmethod - def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = None) -> _T_co: ... + def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = None) -> _YieldT_co: ... def close(self) -> None: ... - def __iter__(self) -> Generator[_T_co, _T_contra, _V_co]: ... + def __iter__(self) -> Generator[_YieldT_co, _SendT_contra, _ReturnT_co]: ... @property def gi_code(self) -> CodeType: ... @property @@ -425,7 +429,7 @@ class Awaitable(Protocol[_T_co]): @abstractmethod def __await__(self) -> Generator[Any, None, _T_co]: ... -class Coroutine(Awaitable[_V_co], Generic[_T_co, _T_contra, _V_co]): +class Coroutine(Awaitable[_ReturnT_co], Generic[_YieldT_co, _SendT_contra, _ReturnT_co]): __name__: str __qualname__: str @property @@ -437,15 +441,15 @@ class Coroutine(Awaitable[_V_co], Generic[_T_co, _T_contra, _V_co]): @property def cr_running(self) -> bool: ... @abstractmethod - def send(self, __value: _T_contra) -> _T_co: ... + def send(self, __value: _SendT_contra) -> _YieldT_co: ... @overload @abstractmethod def throw( self, __typ: Type[BaseException], __val: BaseException | object = None, __tb: TracebackType | None = None - ) -> _T_co: ... + ) -> _YieldT_co: ... @overload @abstractmethod - def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = None) -> _T_co: ... + def throw(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = None) -> _YieldT_co: ... @abstractmethod def close(self) -> None: ... @@ -453,7 +457,10 @@ class Coroutine(Awaitable[_V_co], Generic[_T_co, _T_contra, _V_co]): # The parameters correspond to Generator, but the 4th is the original type. @type_check_only class AwaitableGenerator( - Awaitable[_V_co], Generator[_T_co, _T_contra, _V_co], Generic[_T_co, _T_contra, _V_co, _S], metaclass=ABCMeta + Awaitable[_ReturnT_co], + Generator[_YieldT_co, _SendT_contra, _ReturnT_co], + Generic[_YieldT_co, _SendT_contra, _ReturnT_co, _S], + metaclass=ABCMeta, ): ... @runtime_checkable @@ -467,18 +474,18 @@ class AsyncIterator(AsyncIterable[_T_co], Protocol[_T_co]): def __anext__(self) -> Awaitable[_T_co]: ... def __aiter__(self) -> AsyncIterator[_T_co]: ... -class AsyncGenerator(AsyncIterator[_T_co], Generic[_T_co, _T_contra]): - def __anext__(self) -> Awaitable[_T_co]: ... +class AsyncGenerator(AsyncIterator[_YieldT_co], Generic[_YieldT_co, _SendT_contra]): + def __anext__(self) -> Awaitable[_YieldT_co]: ... @abstractmethod - def asend(self, __value: _T_contra) -> Awaitable[_T_co]: ... + def asend(self, __value: _SendT_contra) -> Awaitable[_YieldT_co]: ... @overload @abstractmethod def athrow( self, __typ: Type[BaseException], __val: BaseException | object = None, __tb: TracebackType | None = None - ) -> Awaitable[_T_co]: ... + ) -> Awaitable[_YieldT_co]: ... @overload @abstractmethod - def athrow(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = None) -> Awaitable[_T_co]: ... + def athrow(self, __typ: BaseException, __val: None = None, __tb: TracebackType | None = None) -> Awaitable[_YieldT_co]: ... def aclose(self) -> Awaitable[None]: ... @property def ag_await(self) -> Any: ... @@ -558,6 +565,7 @@ class AbstractSet(Collection[_T_co], Generic[_T_co]): def __or__(self, other: AbstractSet[_T]) -> AbstractSet[_T_co | _T]: ... def __sub__(self, other: AbstractSet[Any]) -> AbstractSet[_T_co]: ... def __xor__(self, other: AbstractSet[_T]) -> AbstractSet[_T_co | _T]: ... + def __eq__(self, other: object) -> bool: ... def isdisjoint(self, other: Iterable[Any]) -> bool: ... class MutableSet(AbstractSet[_T], Generic[_T]): @@ -631,6 +639,7 @@ class Mapping(Collection[_KT], Generic[_KT, _VT_co]): def keys(self) -> KeysView[_KT]: ... def values(self) -> ValuesView[_VT_co]: ... def __contains__(self, __key: object) -> bool: ... + def __eq__(self, __other: object) -> bool: ... class MutableMapping(Mapping[_KT, _VT], Generic[_KT, _VT]): @abstractmethod @@ -641,7 +650,9 @@ class MutableMapping(Mapping[_KT, _VT], Generic[_KT, _VT]): @overload def pop(self, __key: _KT) -> _VT: ... @overload - def pop(self, __key: _KT, default: _VT | _T) -> _VT | _T: ... + def pop(self, __key: _KT, default: _VT) -> _VT: ... + @overload + def pop(self, __key: _KT, default: _T) -> _VT | _T: ... def popitem(self) -> tuple[_KT, _VT]: ... # This overload should be allowed only if the value type is compatible with None. # @@ -853,9 +864,9 @@ class NamedTuple(tuple[Any, ...]): if sys.version_info >= (3, 12): __orig_bases__: ClassVar[tuple[Any, ...]] @overload - def __init__(self, typename: str, fields: Iterable[tuple[str, Any]] = ...) -> None: ... + def __init__(self, __typename: str, __fields: Iterable[tuple[str, Any]]) -> None: ... @overload - def __init__(self, typename: str, fields: None = None, **kwargs: Any) -> None: ... + def __init__(self, __typename: str, __fields: None = None, **kwargs: Any) -> None: ... @classmethod def _make(cls, iterable: Iterable[Any]) -> typing_extensions.Self: ... if sys.version_info >= (3, 8): @@ -889,8 +900,16 @@ class _TypedDict(Mapping[str, object], metaclass=ABCMeta): def keys(self) -> dict_keys[str, object]: ... def values(self) -> dict_values[str, object]: ... if sys.version_info >= (3, 9): + @overload def __or__(self, __value: typing_extensions.Self) -> typing_extensions.Self: ... - def __ior__(self, __value: typing_extensions.Self) -> typing_extensions.Self: ... + @overload + def __or__(self, __value: dict[str, Any]) -> dict[str, object]: ... + @overload + def __ror__(self, __value: typing_extensions.Self) -> typing_extensions.Self: ... + @overload + def __ror__(self, __value: dict[str, Any]) -> dict[str, object]: ... + # supposedly incompatible definitions of __or__ and __ior__ + def __ior__(self, __value: typing_extensions.Self) -> typing_extensions.Self: ... # type: ignore[misc] @_final class ForwardRef: @@ -915,6 +934,7 @@ class ForwardRef: def _evaluate(self, globalns: dict[str, Any] | None, localns: dict[str, Any] | None) -> Any | None: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... if sys.version_info >= (3, 11): def __or__(self, other: Any) -> _SpecialForm: ... def __ror__(self, other: Any) -> _SpecialForm: ... @@ -946,3 +966,7 @@ if sys.version_info >= (3, 12): if sys.version_info >= (3, 10): def __or__(self, right: Any) -> _SpecialForm: ... def __ror__(self, left: Any) -> _SpecialForm: ... + +if sys.version_info >= (3, 13): + def is_protocol(__tp: type) -> bool: ... + def get_protocol_members(__tp: type) -> frozenset[str]: ... diff --git a/mypy/typeshed/stdlib/typing_extensions.pyi b/mypy/typeshed/stdlib/typing_extensions.pyi index 93087a45a108..9320dc50b6bb 100644 --- a/mypy/typeshed/stdlib/typing_extensions.pyi +++ b/mypy/typeshed/stdlib/typing_extensions.pyi @@ -4,26 +4,52 @@ import sys import typing from _collections_abc import dict_items, dict_keys, dict_values from _typeshed import IdentityFunction, Incomplete -from collections.abc import Iterable -from typing import ( # noqa: Y022,Y039 +from typing import ( # noqa: Y022,Y037,Y038,Y039 + IO as IO, TYPE_CHECKING as TYPE_CHECKING, + AbstractSet as AbstractSet, Any as Any, + AnyStr as AnyStr, AsyncContextManager as AsyncContextManager, AsyncGenerator as AsyncGenerator, AsyncIterable as AsyncIterable, AsyncIterator as AsyncIterator, Awaitable as Awaitable, - Callable, + BinaryIO as BinaryIO, + Callable as Callable, ChainMap as ChainMap, ClassVar as ClassVar, + Collection as Collection, + Container as Container, ContextManager as ContextManager, Coroutine as Coroutine, Counter as Counter, DefaultDict as DefaultDict, Deque as Deque, - Mapping, + Dict as Dict, + ForwardRef as ForwardRef, + FrozenSet as FrozenSet, + Generator as Generator, + Generic as Generic, + Hashable as Hashable, + ItemsView as ItemsView, + Iterable as Iterable, + Iterator as Iterator, + KeysView as KeysView, + List as List, + Mapping as Mapping, + MappingView as MappingView, + Match as Match, + MutableMapping as MutableMapping, + MutableSequence as MutableSequence, + MutableSet as MutableSet, NoReturn as NoReturn, - Sequence, + Optional as Optional, + Pattern as Pattern, + Reversible as Reversible, + Sequence as Sequence, + Set as Set, + Sized as Sized, SupportsAbs as SupportsAbs, SupportsBytes as SupportsBytes, SupportsComplex as SupportsComplex, @@ -31,8 +57,15 @@ from typing import ( # noqa: Y022,Y039 SupportsInt as SupportsInt, SupportsRound as SupportsRound, Text as Text, + TextIO as TextIO, + Tuple as Tuple, Type as Type, + Union as Union, + ValuesView as ValuesView, _Alias, + cast as cast, + no_type_check as no_type_check, + no_type_check_decorator as no_type_check_decorator, overload as overload, type_check_only, ) @@ -109,11 +142,50 @@ __all__ = [ "get_original_bases", "get_overloads", "get_type_hints", + "AbstractSet", + "AnyStr", + "BinaryIO", + "Callable", + "Collection", + "Container", + "Dict", + "ForwardRef", + "FrozenSet", + "Generator", + "Generic", + "Hashable", + "IO", + "ItemsView", + "Iterable", + "Iterator", + "KeysView", + "List", + "Mapping", + "MappingView", + "Match", + "MutableMapping", + "MutableSequence", + "MutableSet", + "Optional", + "Pattern", + "Reversible", + "Sequence", + "Set", + "Sized", + "TextIO", + "Tuple", + "Union", + "ValuesView", + "cast", + "get_protocol_members", + "is_protocol", + "no_type_check", + "no_type_check_decorator", ] _T = typing.TypeVar("_T") _F = typing.TypeVar("_F", bound=Callable[..., Any]) -_TC = typing.TypeVar("_TC", bound=Type[object]) +_TC = typing.TypeVar("_TC", bound=type[object]) # unfortunately we have to duplicate this class definition from typing.pyi or we break pytype class _SpecialForm: @@ -161,8 +233,16 @@ class _TypedDict(Mapping[str, object], metaclass=abc.ABCMeta): def values(self) -> dict_values[str, object]: ... def __delitem__(self, k: Never) -> None: ... if sys.version_info >= (3, 9): + @overload def __or__(self, __value: Self) -> Self: ... - def __ior__(self, __value: Self) -> Self: ... + @overload + def __or__(self, __value: dict[str, Any]) -> dict[str, object]: ... + @overload + def __ror__(self, __value: Self) -> Self: ... + @overload + def __ror__(self, __value: dict[str, Any]) -> dict[str, object]: ... + # supposedly incompatible definitions of `__ior__` and `__or__`: + def __ior__(self, __value: Self) -> Self: ... # type: ignore[misc] # TypedDict is a (non-subscriptable) special form. TypedDict: object @@ -403,3 +483,9 @@ else: # Not actually a Protocol at runtime; see # https://github.com/python/typeshed/issues/10224 for why we're defining it this way def __buffer__(self, __flags: int) -> memoryview: ... + +if sys.version_info >= (3, 13): + from typing import get_protocol_members as get_protocol_members, is_protocol as is_protocol +else: + def is_protocol(__tp: type) -> bool: ... + def get_protocol_members(__tp: type) -> frozenset[str]: ... diff --git a/mypy/typeshed/stdlib/unittest/case.pyi b/mypy/typeshed/stdlib/unittest/case.pyi index 45c39e3f3010..1f58f266ee89 100644 --- a/mypy/typeshed/stdlib/unittest/case.pyi +++ b/mypy/typeshed/stdlib/unittest/case.pyi @@ -86,6 +86,7 @@ class TestCase: _testMethodDoc: str def __init__(self, methodName: str = "runTest") -> None: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... def setUp(self) -> None: ... def tearDown(self) -> None: ... @classmethod @@ -304,6 +305,8 @@ class FunctionTestCase(TestCase): description: str | None = None, ) -> None: ... def runTest(self) -> None: ... + def __hash__(self) -> int: ... + def __eq__(self, other: object) -> bool: ... class _AssertRaisesContext(Generic[_E]): exception: _E diff --git a/mypy/typeshed/stdlib/unittest/mock.pyi b/mypy/typeshed/stdlib/unittest/mock.pyi index 1f554da52d5d..66120197b269 100644 --- a/mypy/typeshed/stdlib/unittest/mock.pyi +++ b/mypy/typeshed/stdlib/unittest/mock.pyi @@ -3,13 +3,14 @@ from collections.abc import Awaitable, Callable, Coroutine, Iterable, Mapping, S from contextlib import _GeneratorContextManager from types import TracebackType from typing import Any, Generic, TypeVar, overload -from typing_extensions import Final, Literal, Self, TypeAlias +from typing_extensions import Final, Literal, ParamSpec, Self, TypeAlias _T = TypeVar("_T") _TT = TypeVar("_TT", bound=type[Any]) _R = TypeVar("_R") _F = TypeVar("_F", bound=Callable[..., Any]) _AF = TypeVar("_AF", bound=Callable[..., Coroutine[Any, Any, Any]]) +_P = ParamSpec("_P") if sys.version_info >= (3, 8): __all__ = ( @@ -233,8 +234,10 @@ class _patch(Generic[_T]): def copy(self) -> _patch[_T]: ... @overload def __call__(self, func: _TT) -> _TT: ... + # If new==DEFAULT, this should add a MagicMock parameter to the function + # arguments. See the _patch_default_new class below for this functionality. @overload - def __call__(self, func: Callable[..., _R]) -> Callable[..., _R]: ... + def __call__(self, func: Callable[_P, _R]) -> Callable[_P, _R]: ... if sys.version_info >= (3, 8): def decoration_helper( self, patched: _patch[Any], args: Sequence[Any], keywargs: Any @@ -256,6 +259,22 @@ class _patch(Generic[_T]): def start(self) -> _T: ... def stop(self) -> None: ... +if sys.version_info >= (3, 8): + _Mock: TypeAlias = MagicMock | AsyncMock +else: + _Mock: TypeAlias = MagicMock + +# This class does not exist at runtime, it's a hack to make this work: +# @patch("foo") +# def bar(..., mock: MagicMock) -> None: ... +class _patch_default_new(_patch[_Mock]): + @overload + def __call__(self, func: _TT) -> _TT: ... + # Can't use the following as ParamSpec is only allowed as last parameter: + # def __call__(self, func: Callable[_P, _R]) -> Callable[Concatenate[_P, MagicMock], _R]: ... + @overload + def __call__(self, func: Callable[..., _R]) -> Callable[..., _R]: ... + class _patch_dict: in_dict: Any values: Any @@ -272,11 +291,8 @@ class _patch_dict: start: Any stop: Any -if sys.version_info >= (3, 8): - _Mock: TypeAlias = MagicMock | AsyncMock -else: - _Mock: TypeAlias = MagicMock - +# This class does not exist at runtime, it's a hack to add methods to the +# patch() function. class _patcher: TEST_PREFIX: str dict: type[_patch_dict] @@ -306,7 +322,7 @@ class _patcher: autospec: Any | None = ..., new_callable: Any | None = ..., **kwargs: Any, - ) -> _patch[_Mock]: ... + ) -> _patch_default_new: ... @overload @staticmethod def object( # type: ignore[misc] @@ -373,7 +389,11 @@ if sys.version_info >= (3, 8): class AsyncMagicMixin(MagicMixin): def __init__(self, *args: Any, **kw: Any) -> None: ... - class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock): ... + class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock): + # Improving the `reset_mock` signature. + # It is defined on `AsyncMockMixin` with `*args, **kwargs`, which is not ideal. + # But, `NonCallableMock` super-class has the better version. + def reset_mock(self, visited: Any = None, *, return_value: bool = False, side_effect: bool = False) -> None: ... class MagicProxy: name: str diff --git a/mypy/typeshed/stdlib/urllib/request.pyi b/mypy/typeshed/stdlib/urllib/request.pyi index 8f99c5837871..079c9755528c 100644 --- a/mypy/typeshed/stdlib/urllib/request.pyi +++ b/mypy/typeshed/stdlib/urllib/request.pyi @@ -173,7 +173,7 @@ class HTTPPasswordMgr: def add_password(self, realm: str, uri: str | Sequence[str], user: str, passwd: str) -> None: ... def find_user_password(self, realm: str, authuri: str) -> tuple[str | None, str | None]: ... def is_suburi(self, base: str, test: str) -> bool: ... # undocumented - def reduce_uri(self, uri: str, default_port: bool = True) -> str: ... # undocumented + def reduce_uri(self, uri: str, default_port: bool = True) -> tuple[str, str]: ... # undocumented class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): def add_password(self, realm: str | None, uri: str | Sequence[str], user: str, passwd: str) -> None: ... @@ -184,7 +184,7 @@ class HTTPPasswordMgrWithPriorAuth(HTTPPasswordMgrWithDefaultRealm): self, realm: str | None, uri: str | Sequence[str], user: str, passwd: str, is_authenticated: bool = False ) -> None: ... def update_authenticated(self, uri: str | Sequence[str], is_authenticated: bool = False) -> None: ... - def is_authenticated(self, authuri: str) -> bool: ... + def is_authenticated(self, authuri: str) -> bool | None: ... class AbstractBasicAuthHandler: rx: ClassVar[Pattern[str]] # undocumented @@ -212,7 +212,7 @@ class AbstractDigestAuthHandler: def http_error_auth_reqed(self, auth_header: str, host: str, req: Request, headers: HTTPMessage) -> None: ... def retry_http_digest_auth(self, req: Request, auth: str) -> _UrlopenRet | None: ... def get_cnonce(self, nonce: str) -> str: ... - def get_authorization(self, req: Request, chal: Mapping[str, str]) -> str: ... + def get_authorization(self, req: Request, chal: Mapping[str, str]) -> str | None: ... def get_algorithm_impls(self, algorithm: str) -> tuple[Callable[[str], str], Callable[[str, str], str]]: ... def get_entity_digest(self, data: ReadableBuffer | None, chal: Mapping[str, str]) -> str | None: ... @@ -269,7 +269,7 @@ class ftpwrapper: # undocumented def file_close(self) -> None: ... def init(self) -> None: ... def real_close(self) -> None: ... - def retrfile(self, file: str, type: str) -> tuple[addclosehook, int]: ... + def retrfile(self, file: str, type: str) -> tuple[addclosehook, int | None]: ... class FTPHandler(BaseHandler): def ftp_open(self, req: Request) -> addinfourl: ... diff --git a/mypy/typeshed/stdlib/uuid.pyi b/mypy/typeshed/stdlib/uuid.pyi index 935e44e80dfa..e1ea424f9680 100644 --- a/mypy/typeshed/stdlib/uuid.pyi +++ b/mypy/typeshed/stdlib/uuid.pyi @@ -1,11 +1,9 @@ +import builtins import sys from _typeshed import Unused from enum import Enum from typing_extensions import TypeAlias -# Because UUID has properties called int and bytes we need to rename these temporarily. -_Int: TypeAlias = int -_Bytes: TypeAlias = bytes _FieldsType: TypeAlias = tuple[int, int, int, int, int, int] class SafeUUID(Enum): @@ -17,54 +15,55 @@ class UUID: def __init__( self, hex: str | None = None, - bytes: _Bytes | None = None, - bytes_le: _Bytes | None = None, + bytes: builtins.bytes | None = None, + bytes_le: builtins.bytes | None = None, fields: _FieldsType | None = None, - int: _Int | None = None, - version: _Int | None = None, + int: builtins.int | None = None, + version: builtins.int | None = None, *, is_safe: SafeUUID = ..., ) -> None: ... @property def is_safe(self) -> SafeUUID: ... @property - def bytes(self) -> _Bytes: ... + def bytes(self) -> builtins.bytes: ... @property - def bytes_le(self) -> _Bytes: ... + def bytes_le(self) -> builtins.bytes: ... @property - def clock_seq(self) -> _Int: ... + def clock_seq(self) -> builtins.int: ... @property - def clock_seq_hi_variant(self) -> _Int: ... + def clock_seq_hi_variant(self) -> builtins.int: ... @property - def clock_seq_low(self) -> _Int: ... + def clock_seq_low(self) -> builtins.int: ... @property def fields(self) -> _FieldsType: ... @property def hex(self) -> str: ... @property - def int(self) -> _Int: ... + def int(self) -> builtins.int: ... @property - def node(self) -> _Int: ... + def node(self) -> builtins.int: ... @property - def time(self) -> _Int: ... + def time(self) -> builtins.int: ... @property - def time_hi_version(self) -> _Int: ... + def time_hi_version(self) -> builtins.int: ... @property - def time_low(self) -> _Int: ... + def time_low(self) -> builtins.int: ... @property - def time_mid(self) -> _Int: ... + def time_mid(self) -> builtins.int: ... @property def urn(self) -> str: ... @property def variant(self) -> str: ... @property - def version(self) -> _Int | None: ... - def __int__(self) -> _Int: ... + def version(self) -> builtins.int | None: ... + def __int__(self) -> builtins.int: ... def __eq__(self, other: object) -> bool: ... def __lt__(self, other: UUID) -> bool: ... def __le__(self, other: UUID) -> bool: ... def __gt__(self, other: UUID) -> bool: ... def __ge__(self, other: UUID) -> bool: ... + def __hash__(self) -> builtins.int: ... if sys.version_info >= (3, 9): def getnode() -> int: ... @@ -72,7 +71,7 @@ if sys.version_info >= (3, 9): else: def getnode(*, getters: Unused = None) -> int: ... # undocumented -def uuid1(node: _Int | None = None, clock_seq: _Int | None = None) -> UUID: ... +def uuid1(node: int | None = None, clock_seq: int | None = None) -> UUID: ... if sys.version_info >= (3, 12): def uuid3(namespace: UUID, name: str | bytes) -> UUID: ... @@ -96,3 +95,6 @@ RESERVED_NCS: str RFC_4122: str RESERVED_MICROSOFT: str RESERVED_FUTURE: str + +if sys.version_info >= (3, 12): + def main() -> None: ... diff --git a/mypy/typeshed/stdlib/weakref.pyi b/mypy/typeshed/stdlib/weakref.pyi index 13f48fe85a8d..ecb98d4269d5 100644 --- a/mypy/typeshed/stdlib/weakref.pyi +++ b/mypy/typeshed/stdlib/weakref.pyi @@ -45,6 +45,7 @@ class WeakMethod(ref[_CallableT], Generic[_CallableT]): def __call__(self) -> _CallableT | None: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... class WeakValueDictionary(MutableMapping[_KT, _VT]): @overload @@ -74,7 +75,9 @@ class WeakValueDictionary(MutableMapping[_KT, _VT]): @overload def pop(self, key: _KT) -> _VT: ... @overload - def pop(self, key: _KT, default: _VT | _T = ...) -> _VT | _T: ... + def pop(self, key: _KT, default: _VT) -> _VT: ... + @overload + def pop(self, key: _KT, default: _T) -> _VT | _T: ... if sys.version_info >= (3, 9): def __or__(self, other: Mapping[_T1, _T2]) -> WeakValueDictionary[_KT | _T1, _VT | _T2]: ... def __ror__(self, other: Mapping[_T1, _T2]) -> WeakValueDictionary[_KT | _T1, _VT | _T2]: ... @@ -117,7 +120,9 @@ class WeakKeyDictionary(MutableMapping[_KT, _VT]): @overload def pop(self, key: _KT) -> _VT: ... @overload - def pop(self, key: _KT, default: _VT | _T = ...) -> _VT | _T: ... + def pop(self, key: _KT, default: _VT) -> _VT: ... + @overload + def pop(self, key: _KT, default: _T) -> _VT | _T: ... if sys.version_info >= (3, 9): def __or__(self, other: Mapping[_T1, _T2]) -> WeakKeyDictionary[_KT | _T1, _VT | _T2]: ... def __ror__(self, other: Mapping[_T1, _T2]) -> WeakKeyDictionary[_KT | _T1, _VT | _T2]: ... diff --git a/mypy/typeshed/stdlib/webbrowser.pyi b/mypy/typeshed/stdlib/webbrowser.pyi index 02edd42e7d59..99c7bb5846b6 100644 --- a/mypy/typeshed/stdlib/webbrowser.pyi +++ b/mypy/typeshed/stdlib/webbrowser.pyi @@ -43,8 +43,12 @@ class UnixBrowser(BaseBrowser): class Mozilla(UnixBrowser): ... -class Galeon(UnixBrowser): - raise_opts: list[str] +if sys.version_info < (3, 12): + class Galeon(UnixBrowser): + raise_opts: list[str] + + class Grail(BaseBrowser): + def open(self, url: str, new: int = 0, autoraise: bool = True) -> bool: ... class Chrome(UnixBrowser): ... class Opera(UnixBrowser): ... @@ -53,9 +57,6 @@ class Elinks(UnixBrowser): ... class Konqueror(BaseBrowser): def open(self, url: str, new: int = 0, autoraise: bool = True) -> bool: ... -class Grail(BaseBrowser): - def open(self, url: str, new: int = 0, autoraise: bool = True) -> bool: ... - if sys.platform == "win32": class WindowsDefault(BaseBrowser): def open(self, url: str, new: int = 0, autoraise: bool = True) -> bool: ... diff --git a/mypy/typeshed/stdlib/winreg.pyi b/mypy/typeshed/stdlib/winreg.pyi index 70ea6a1ced11..337bd9706050 100644 --- a/mypy/typeshed/stdlib/winreg.pyi +++ b/mypy/typeshed/stdlib/winreg.pyi @@ -98,3 +98,4 @@ if sys.platform == "win32": ) -> bool | None: ... def Close(self) -> None: ... def Detach(self) -> int: ... + def __hash__(self) -> int: ... diff --git a/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi b/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi index 2cf8dbbe7025..d8ff2f5b6090 100644 --- a/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi +++ b/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi @@ -142,6 +142,7 @@ class QName: def __gt__(self, other: QName | str) -> bool: ... def __ge__(self, other: QName | str) -> bool: ... def __eq__(self, other: object) -> bool: ... + def __hash__(self) -> int: ... class ElementTree: def __init__(self, element: Element | None = None, file: _FileRead | None = None) -> None: ... diff --git a/mypy/typeshed/stdlib/zipimport.pyi b/mypy/typeshed/stdlib/zipimport.pyi index ee97faace379..0189bfe712b5 100644 --- a/mypy/typeshed/stdlib/zipimport.pyi +++ b/mypy/typeshed/stdlib/zipimport.pyi @@ -17,8 +17,10 @@ class zipimporter: else: def __init__(self, path: StrOrBytesPath) -> None: ... - def find_loader(self, fullname: str, path: str | None = None) -> tuple[zipimporter | None, list[str]]: ... # undocumented - def find_module(self, fullname: str, path: str | None = None) -> zipimporter | None: ... + if sys.version_info < (3, 12): + def find_loader(self, fullname: str, path: str | None = None) -> tuple[zipimporter | None, list[str]]: ... # undocumented + def find_module(self, fullname: str, path: str | None = None) -> zipimporter | None: ... + def get_code(self, fullname: str) -> CodeType: ... def get_data(self, pathname: str) -> bytes: ... def get_filename(self, fullname: str) -> str: ... diff --git a/mypy/typeshed/stdlib/zoneinfo/__init__.pyi b/mypy/typeshed/stdlib/zoneinfo/__init__.pyi index fe994be3e8ff..a95530ed461a 100644 --- a/mypy/typeshed/stdlib/zoneinfo/__init__.pyi +++ b/mypy/typeshed/stdlib/zoneinfo/__init__.pyi @@ -17,9 +17,9 @@ class ZoneInfo(tzinfo): @classmethod def no_cache(cls, key: str) -> Self: ... @classmethod - def from_file(cls, __fobj: _IOBytes, key: str | None = ...) -> Self: ... + def from_file(cls, __fobj: _IOBytes, key: str | None = None) -> Self: ... @classmethod - def clear_cache(cls, *, only_keys: Iterable[str] | None = ...) -> None: ... + def clear_cache(cls, *, only_keys: Iterable[str] | None = None) -> None: ... def tzname(self, __dt: datetime | None) -> str | None: ... def utcoffset(self, __dt: datetime | None) -> timedelta | None: ... def dst(self, __dt: datetime | None) -> timedelta | None: ... @@ -30,7 +30,7 @@ class ZoneInfo(tzinfo): def reset_tzpath(to: Sequence[StrPath] | None = None) -> None: ... def available_timezones() -> set[str]: ... -TZPATH: Sequence[str] +TZPATH: tuple[str, ...] class ZoneInfoNotFoundError(KeyError): ... class InvalidTZPathWarning(RuntimeWarning): ... diff --git a/mypy/typeshed/stubs/mypy-extensions/mypy_extensions.pyi b/mypy/typeshed/stubs/mypy-extensions/mypy_extensions.pyi index 40e24645fb77..b6358a0022f3 100644 --- a/mypy/typeshed/stubs/mypy-extensions/mypy_extensions.pyi +++ b/mypy/typeshed/stubs/mypy-extensions/mypy_extensions.pyi @@ -146,3 +146,73 @@ class i32: def __ge__(self, x: i32) -> bool: ... def __gt__(self, x: i32) -> bool: ... def __index__(self) -> int: ... + +class i16: + @overload + def __new__(cls, __x: str | ReadableBuffer | SupportsInt | SupportsIndex | SupportsTrunc = ...) -> i16: ... + @overload + def __new__(cls, __x: str | bytes | bytearray, base: SupportsIndex) -> i16: ... + + def __add__(self, x: i16) -> i16: ... + def __radd__(self, x: i16) -> i16: ... + def __sub__(self, x: i16) -> i16: ... + def __rsub__(self, x: i16) -> i16: ... + def __mul__(self, x: i16) -> i16: ... + def __rmul__(self, x: i16) -> i16: ... + def __floordiv__(self, x: i16) -> i16: ... + def __rfloordiv__(self, x: i16) -> i16: ... + def __mod__(self, x: i16) -> i16: ... + def __rmod__(self, x: i16) -> i16: ... + def __and__(self, x: i16) -> i16: ... + def __rand__(self, x: i16) -> i16: ... + def __or__(self, x: i16) -> i16: ... + def __ror__(self, x: i16) -> i16: ... + def __xor__(self, x: i16) -> i16: ... + def __rxor__(self, x: i16) -> i16: ... + def __lshift__(self, x: i16) -> i16: ... + def __rlshift__(self, x: i16) -> i16: ... + def __rshift__(self, x: i16) -> i16: ... + def __rrshift__(self, x: i16) -> i16: ... + def __neg__(self) -> i16: ... + def __invert__(self) -> i16: ... + def __pos__(self) -> i16: ... + def __lt__(self, x: i16) -> bool: ... + def __le__(self, x: i16) -> bool: ... + def __ge__(self, x: i16) -> bool: ... + def __gt__(self, x: i16) -> bool: ... + def __index__(self) -> int: ... + +class u8: + @overload + def __new__(cls, __x: str | ReadableBuffer | SupportsInt | SupportsIndex | SupportsTrunc = ...) -> u8: ... + @overload + def __new__(cls, __x: str | bytes | bytearray, base: SupportsIndex) -> u8: ... + + def __add__(self, x: u8) -> u8: ... + def __radd__(self, x: u8) -> u8: ... + def __sub__(self, x: u8) -> u8: ... + def __rsub__(self, x: u8) -> u8: ... + def __mul__(self, x: u8) -> u8: ... + def __rmul__(self, x: u8) -> u8: ... + def __floordiv__(self, x: u8) -> u8: ... + def __rfloordiv__(self, x: u8) -> u8: ... + def __mod__(self, x: u8) -> u8: ... + def __rmod__(self, x: u8) -> u8: ... + def __and__(self, x: u8) -> u8: ... + def __rand__(self, x: u8) -> u8: ... + def __or__(self, x: u8) -> u8: ... + def __ror__(self, x: u8) -> u8: ... + def __xor__(self, x: u8) -> u8: ... + def __rxor__(self, x: u8) -> u8: ... + def __lshift__(self, x: u8) -> u8: ... + def __rlshift__(self, x: u8) -> u8: ... + def __rshift__(self, x: u8) -> u8: ... + def __rrshift__(self, x: u8) -> u8: ... + def __neg__(self) -> u8: ... + def __invert__(self) -> u8: ... + def __pos__(self) -> u8: ... + def __lt__(self, x: u8) -> bool: ... + def __le__(self, x: u8) -> bool: ... + def __ge__(self, x: u8) -> bool: ... + def __gt__(self, x: u8) -> bool: ... + def __index__(self) -> int: ... diff --git a/mypy/typestate.py b/mypy/typestate.py index 9f65481e5e94..b32fb0ef6df1 100644 --- a/mypy/typestate.py +++ b/mypy/typestate.py @@ -5,8 +5,8 @@ from __future__ import annotations -from typing import Dict, Set, Tuple -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing import Dict, Final, Set, Tuple +from typing_extensions import TypeAlias as _TypeAlias from mypy.nodes import TypeInfo from mypy.server.trigger import make_trigger @@ -93,6 +93,9 @@ class TypeState: inferring: Final[list[tuple[Type, Type]]] # Whether to use joins or unions when solving constraints, see checkexpr.py for details. infer_unions: bool + # Whether to use new type inference algorithm that can infer polymorphic types. + # This is temporary and will be removed soon when new algorithm is more polished. + infer_polymorphic: bool # N.B: We do all of the accesses to these properties through # TypeState, instead of making these classmethods and accessing @@ -110,6 +113,7 @@ def __init__(self) -> None: self._assuming_proper = [] self.inferring = [] self.infer_unions = False + self.infer_polymorphic = False def is_assumed_subtype(self, left: Type, right: Type) -> bool: for l, r in reversed(self._assuming): @@ -311,7 +315,7 @@ def add_all_protocol_deps(self, deps: dict[str, set[str]]) -> None: def reset_global_state() -> None: """Reset most existing global state. - Currently most of it is in this module. Few exceptions are strict optional status and + Currently most of it is in this module. Few exceptions are strict optional status and functools.lru_cache. """ type_state.reset_all_subtype_caches() diff --git a/mypy/typevartuples.py b/mypy/typevartuples.py index ac5f4e43c3bf..bcb5e96b615c 100644 --- a/mypy/typevartuples.py +++ b/mypy/typevartuples.py @@ -4,32 +4,17 @@ from typing import Sequence -from mypy.nodes import ARG_POS, ARG_STAR from mypy.types import ( - CallableType, Instance, ProperType, Type, UnpackType, + find_unpack_in_list, get_proper_type, split_with_prefix_and_suffix, ) -def find_unpack_in_list(items: Sequence[Type]) -> int | None: - unpack_index: int | None = None - for i, item in enumerate(items): - if isinstance(item, UnpackType): - # We cannot fail here, so we must check this in an earlier - # semanal phase. - # Funky code here avoids mypyc narrowing the type of unpack_index. - old_index = unpack_index - assert old_index is None - # Don't return so that we can also sanity check there is only one. - unpack_index = i - return unpack_index - - def split_with_instance( typ: Instance, ) -> tuple[tuple[Type, ...], tuple[Type, ...], tuple[Type, ...]]: @@ -179,20 +164,3 @@ def extract_unpack(types: Sequence[Type]) -> ProperType | None: if isinstance(types[0], UnpackType): return get_proper_type(types[0].type) return None - - -def replace_starargs(callable: CallableType, types: list[Type]) -> CallableType: - star_index = callable.arg_kinds.index(ARG_STAR) - arg_kinds = ( - callable.arg_kinds[:star_index] - + [ARG_POS] * len(types) - + callable.arg_kinds[star_index + 1 :] - ) - arg_names = ( - callable.arg_names[:star_index] - + [None] * len(types) - + callable.arg_names[star_index + 1 :] - ) - arg_types = callable.arg_types[:star_index] + types + callable.arg_types[star_index + 1 :] - - return callable.copy_modified(arg_types=arg_types, arg_names=arg_names, arg_kinds=arg_kinds) diff --git a/mypy/util.py b/mypy/util.py index 2c225c7fe651..d0f2f8c6cc36 100644 --- a/mypy/util.py +++ b/mypy/util.py @@ -11,8 +11,8 @@ import sys import time from importlib import resources as importlib_resources -from typing import IO, Callable, Container, Iterable, Sequence, Sized, TypeVar -from typing_extensions import Final, Literal +from typing import IO, Callable, Container, Final, Iterable, Sequence, Sized, TypeVar +from typing_extensions import Literal try: import curses @@ -308,17 +308,6 @@ def get_prefix(fullname: str) -> str: return fullname.rsplit(".", 1)[0] -def get_top_two_prefixes(fullname: str) -> tuple[str, str]: - """Return one and two component prefixes of a fully qualified name. - - Given 'a.b.c.d', return ('a', 'a.b'). - - If fullname has only one component, return (fullname, fullname). - """ - components = fullname.split(".", 3) - return components[0], ".".join(components[:2]) - - def correct_relative_import( cur_mod_id: str, relative: int, target: str, is_cur_package_init_file: bool ) -> tuple[str, bool]: @@ -421,10 +410,10 @@ def get_unique_redefinition_name(name: str, existing: Container[str]) -> str: def check_python_version(program: str) -> None: """Report issues with the Python used to run mypy, dmypy, or stubgen""" # Check for known bad Python versions. - if sys.version_info[:2] < (3, 7): + if sys.version_info[:2] < (3, 8): sys.exit( - "Running {name} with Python 3.6 or lower is not supported; " - "please upgrade to 3.7 or newer".format(name=program) + "Running {name} with Python 3.7 or lower is not supported; " + "please upgrade to 3.8 or newer".format(name=program) ) @@ -820,3 +809,20 @@ def plural_s(s: int | Sized) -> str: return "s" else: return "" + + +def quote_docstring(docstr: str) -> str: + """Returns docstring correctly encapsulated in a single or double quoted form.""" + # Uses repr to get hint on the correct quotes and escape everything properly. + # Creating multiline string for prettier output. + docstr_repr = "\n".join(re.split(r"(?<=[^\\])\\n", repr(docstr))) + + if docstr_repr.startswith("'"): + # Enforce double quotes when it's safe to do so. + # That is when double quotes are not in the string + # or when it doesn't end with a single quote. + if '"' not in docstr_repr[1:-1] and docstr_repr[-2] != "'": + return f'"""{docstr_repr[1:-1]}"""' + return f"''{docstr_repr}''" + else: + return f'""{docstr_repr}""' diff --git a/mypy/version.py b/mypy/version.py index 826ba0020100..9271eba74aa1 100644 --- a/mypy/version.py +++ b/mypy/version.py @@ -8,7 +8,7 @@ # - Release versions have the form "1.2.3". # - Dev versions have the form "1.2.3+dev" (PLUS sign to conform to PEP 440). # - Before 1.0 we had the form "0.NNN". -__version__ = "1.4.0+dev" +__version__ = "1.6.0" base_version = __version__ mypy_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) diff --git a/mypy_self_check.ini b/mypy_self_check.ini index d20fcd60a9cb..fcdbe641d6d6 100644 --- a/mypy_self_check.ini +++ b/mypy_self_check.ini @@ -6,9 +6,11 @@ show_traceback = True pretty = True always_false = MYPYC plugins = misc/proper_plugin.py -python_version = 3.7 +python_version = 3.8 exclude = mypy/typeshed/|mypyc/test-data/|mypyc/lib-rt/ +new_type_inference = True enable_error_code = ignore-without-code,redundant-expr +show_error_code_links = True [mypy-mypy.visitor] # See docstring for NodeVisitor for motivation. diff --git a/mypyc/.readthedocs.yaml b/mypyc/.readthedocs.yaml new file mode 100644 index 000000000000..90831dfd7069 --- /dev/null +++ b/mypyc/.readthedocs.yaml @@ -0,0 +1,18 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.11" + +sphinx: + configuration: mypyc/doc/conf.py + +formats: [pdf, htmlzip, epub] + +python: + install: + - requirements: docs/requirements-docs.txt diff --git a/mypyc/analysis/attrdefined.py b/mypyc/analysis/attrdefined.py index 02e02a82a4f9..350158246cdb 100644 --- a/mypyc/analysis/attrdefined.py +++ b/mypyc/analysis/attrdefined.py @@ -63,8 +63,7 @@ def foo(self) -> int: from __future__ import annotations -from typing import Set, Tuple -from typing_extensions import Final +from typing import Final, Set, Tuple from mypyc.analysis.dataflow import ( CFG, @@ -414,7 +413,7 @@ def update_always_defined_attrs_using_subclasses(cl: ClassIR, seen: set[ClassIR] seen.add(cl) -def detect_undefined_bitmap(cl: ClassIR, seen: Set[ClassIR]) -> None: +def detect_undefined_bitmap(cl: ClassIR, seen: set[ClassIR]) -> None: if cl.is_trait: return diff --git a/mypyc/build.py b/mypyc/build.py index 5fc041e2dcf2..9889577d4add 100644 --- a/mypyc/build.py +++ b/mypyc/build.py @@ -40,23 +40,31 @@ from mypyc.namegen import exported_name from mypyc.options import CompilerOptions -if TYPE_CHECKING: - from distutils.core import Extension as _distutils_Extension - from typing_extensions import TypeAlias +if sys.version_info < (3, 12): + if TYPE_CHECKING: + from distutils.core import Extension as _distutils_Extension + from typing_extensions import TypeAlias - from setuptools import Extension as _setuptools_Extension + from setuptools import Extension as _setuptools_Extension - Extension: TypeAlias = Union[_setuptools_Extension, _distutils_Extension] + Extension: TypeAlias = Union[_setuptools_Extension, _distutils_Extension] - -try: - # Import setuptools so that it monkey-patch overrides distutils + try: + # Import setuptools so that it monkey-patch overrides distutils + import setuptools + except ImportError: + pass + from distutils import ccompiler, sysconfig +else: import setuptools -except ImportError: - if sys.version_info >= (3, 12): - # Raise on Python 3.12, since distutils will go away forever - raise -from distutils import ccompiler, sysconfig + from setuptools import Extension + from setuptools._distutils import ( + ccompiler as _ccompiler, # type: ignore[attr-defined] + sysconfig as _sysconfig, # type: ignore[attr-defined] + ) + + ccompiler = _ccompiler + sysconfig = _sysconfig def get_extension() -> type[Extension]: @@ -65,11 +73,13 @@ def get_extension() -> type[Extension]: use_setuptools = "setuptools" in sys.modules extension_class: type[Extension] - if not use_setuptools: + if sys.version_info < (3, 12) and not use_setuptools: import distutils.core extension_class = distutils.core.Extension else: + if not use_setuptools: + sys.exit("error: setuptools not installed") extension_class = setuptools.Extension return extension_class diff --git a/mypyc/codegen/cstring.py b/mypyc/codegen/cstring.py index e006f12e09ec..853787f8161d 100644 --- a/mypyc/codegen/cstring.py +++ b/mypyc/codegen/cstring.py @@ -21,7 +21,7 @@ from __future__ import annotations import string -from typing_extensions import Final +from typing import Final CHAR_MAP: Final = [f"\\{i:03o}" for i in range(256)] diff --git a/mypyc/codegen/emit.py b/mypyc/codegen/emit.py index a2e3d9849dca..7d41ee7e162b 100644 --- a/mypyc/codegen/emit.py +++ b/mypyc/codegen/emit.py @@ -5,8 +5,7 @@ import pprint import sys import textwrap -from typing import Callable -from typing_extensions import Final +from typing import Callable, Final from mypyc.codegen.literals import Literals from mypyc.common import ( @@ -35,6 +34,7 @@ is_dict_rprimitive, is_fixed_width_rtype, is_float_rprimitive, + is_int16_rprimitive, is_int32_rprimitive, is_int64_rprimitive, is_int_rprimitive, @@ -47,6 +47,7 @@ is_short_int_rprimitive, is_str_rprimitive, is_tuple_rprimitive, + is_uint8_rprimitive, object_rprimitive, optional_value_type, ) @@ -345,12 +346,6 @@ def tuple_c_declaration(self, rtuple: RTuple) -> list[str]: result.append(f"{self.ctype_spaced(typ)}f{i};") i += 1 result.append(f"}} {rtuple.struct_name};") - values = self.tuple_undefined_value_helper(rtuple) - result.append( - "static {} {} = {{ {} }};".format( - self.ctype(rtuple), self.tuple_undefined_value(rtuple), "".join(values) - ) - ) result.append("#endif") result.append("") @@ -470,23 +465,20 @@ def tuple_undefined_check_cond( return check def tuple_undefined_value(self, rtuple: RTuple) -> str: - return "tuple_undefined_" + rtuple.unique_id + """Undefined tuple value suitable in an expression.""" + return f"({rtuple.struct_name}) {self.c_initializer_undefined_value(rtuple)}" - def tuple_undefined_value_helper(self, rtuple: RTuple) -> list[str]: - res = [] - # see tuple_c_declaration() - if len(rtuple.types) == 0: - return [self.c_undefined_value(int_rprimitive)] - for item in rtuple.types: - if not isinstance(item, RTuple): - res.append(self.c_undefined_value(item)) - else: - sub_list = self.tuple_undefined_value_helper(item) - res.append("{ ") - res.extend(sub_list) - res.append(" }") - res.append(", ") - return res[:-1] + def c_initializer_undefined_value(self, rtype: RType) -> str: + """Undefined value represented in a form suitable for variable initialization.""" + if isinstance(rtype, RTuple): + if not rtype.types: + # Empty tuples contain a flag so that they can still indicate + # error values. + return f"{{ {int_rprimitive.c_undefined} }}" + items = ", ".join([self.c_initializer_undefined_value(t) for t in rtype.types]) + return f"{{ {items} }}" + else: + return self.c_undefined_value(rtype) # Higher-level operations @@ -909,28 +901,43 @@ def emit_unbox( self.emit_line(f" {dest} = 1;") elif is_int64_rprimitive(typ): # Whether we are borrowing or not makes no difference. + assert not optional # Not supported for overlapping error values if declare_dest: self.emit_line(f"int64_t {dest};") self.emit_line(f"{dest} = CPyLong_AsInt64({src});") - # TODO: Handle 'optional' - # TODO: Handle 'failure' + if not isinstance(error, AssignHandler): + self.emit_unbox_failure_with_overlapping_error_value(dest, typ, failure) elif is_int32_rprimitive(typ): # Whether we are borrowing or not makes no difference. + assert not optional # Not supported for overlapping error values if declare_dest: self.emit_line(f"int32_t {dest};") self.emit_line(f"{dest} = CPyLong_AsInt32({src});") - # TODO: Handle 'optional' - # TODO: Handle 'failure' + if not isinstance(error, AssignHandler): + self.emit_unbox_failure_with_overlapping_error_value(dest, typ, failure) + elif is_int16_rprimitive(typ): + # Whether we are borrowing or not makes no difference. + assert not optional # Not supported for overlapping error values + if declare_dest: + self.emit_line(f"int16_t {dest};") + self.emit_line(f"{dest} = CPyLong_AsInt16({src});") + if not isinstance(error, AssignHandler): + self.emit_unbox_failure_with_overlapping_error_value(dest, typ, failure) + elif is_uint8_rprimitive(typ): + # Whether we are borrowing or not makes no difference. + assert not optional # Not supported for overlapping error values + if declare_dest: + self.emit_line(f"uint8_t {dest};") + self.emit_line(f"{dest} = CPyLong_AsUInt8({src});") + if not isinstance(error, AssignHandler): + self.emit_unbox_failure_with_overlapping_error_value(dest, typ, failure) elif is_float_rprimitive(typ): + assert not optional # Not supported for overlapping error values if declare_dest: - self.emit_line("double {};".format(dest)) + self.emit_line(f"double {dest};") # TODO: Don't use __float__ and __index__ self.emit_line(f"{dest} = PyFloat_AsDouble({src});") - self.emit_lines( - f"if ({dest} == -1.0 && PyErr_Occurred()) {{", f"{dest} = -113.0;", "}" - ) - # TODO: Handle 'optional' - # TODO: Handle 'failure' + self.emit_lines(f"if ({dest} == -1.0 && PyErr_Occurred()) {{", failure, "}") elif isinstance(typ, RTuple): self.declare_tuple_struct(typ) if declare_dest: @@ -1015,7 +1022,7 @@ def emit_box( self.emit_lines(f"{declaration}{dest} = Py_None;") if not can_borrow: self.emit_inc_ref(dest, object_rprimitive) - elif is_int32_rprimitive(typ): + elif is_int32_rprimitive(typ) or is_int16_rprimitive(typ) or is_uint8_rprimitive(typ): self.emit_line(f"{declaration}{dest} = PyLong_FromLong({src});") elif is_int64_rprimitive(typ): self.emit_line(f"{declaration}{dest} = PyLong_FromLongLong({src});") @@ -1146,6 +1153,13 @@ def _emit_traceback( if DEBUG_ERRORS: self.emit_line('assert(PyErr_Occurred() != NULL && "failure w/o err!");') + def emit_unbox_failure_with_overlapping_error_value( + self, dest: str, typ: RType, failure: str + ) -> None: + self.emit_line(f"if ({dest} == {self.c_error_value(typ)} && PyErr_Occurred()) {{") + self.emit_line(failure) + self.emit_line("}") + def c_array_initializer(components: list[str], *, indented: bool = False) -> str: """Construct an initializer for a C array variable. diff --git a/mypyc/codegen/emitclass.py b/mypyc/codegen/emitclass.py index 6a272d1aee2b..62e1b4b2dea1 100644 --- a/mypyc/codegen/emitclass.py +++ b/mypyc/codegen/emitclass.py @@ -18,7 +18,7 @@ generate_richcompare_wrapper, generate_set_del_item_wrapper, ) -from mypyc.common import BITMAP_BITS, BITMAP_TYPE, NATIVE_PREFIX, PREFIX, REG_PREFIX, use_fastcall +from mypyc.common import BITMAP_BITS, BITMAP_TYPE, NATIVE_PREFIX, PREFIX, REG_PREFIX from mypyc.ir.class_ir import ClassIR, VTableEntries from mypyc.ir.func_ir import FUNC_CLASSMETHOD, FUNC_STATICMETHOD, FuncDecl, FuncIR from mypyc.ir.rtypes import RTuple, RType, object_rprimitive @@ -270,7 +270,7 @@ def emit_line() -> None: # that isn't what we want. # XXX: there is no reason for the __weakref__ stuff to be mixed up with __dict__ - if cl.has_dict: + if cl.has_dict and not has_managed_dict(cl, emitter): # __dict__ lives right after the struct and __weakref__ lives right after that # TODO: They should get members in the struct instead of doing this nonsense. weak_offset = f"{base_size} + sizeof(PyObject *)" @@ -284,8 +284,9 @@ def emit_line() -> None: fields["tp_members"] = members_name fields["tp_basicsize"] = f"{base_size} + 2*sizeof(PyObject *)" - fields["tp_dictoffset"] = base_size - fields["tp_weaklistoffset"] = weak_offset + if emitter.capi_version < (3, 12): + fields["tp_dictoffset"] = base_size + fields["tp_weaklistoffset"] = weak_offset else: fields["tp_basicsize"] = base_size @@ -341,6 +342,8 @@ def emit_line() -> None: # This is just a placeholder to please CPython. It will be # overridden during setup. fields["tp_call"] = "PyVectorcall_Call" + if has_managed_dict(cl, emitter): + flags.append("Py_TPFLAGS_MANAGED_DICT") fields["tp_flags"] = " | ".join(flags) emitter.emit_line(f"static PyTypeObject {emitter.type_struct_name(cl)}_template_ = {{") @@ -578,7 +581,12 @@ def generate_setup_for_class( for base in reversed(cl.base_mro): for attr, rtype in base.attributes.items(): - emitter.emit_line(rf"self->{emitter.attr(attr)} = {emitter.c_undefined_value(rtype)};") + value = emitter.c_undefined_value(rtype) + + # We don't need to set this field to NULL since tp_alloc() already + # zero-initializes `self`. + if value != "NULL": + emitter.emit_line(rf"self->{emitter.attr(attr)} = {value};") # Initialize attributes to default values, if necessary if defaults_fn is not None: @@ -725,7 +733,9 @@ def generate_traverse_for_class(cl: ClassIR, func_name: str, emitter: Emitter) - for base in reversed(cl.base_mro): for attr, rtype in base.attributes.items(): emitter.emit_gc_visit(f"self->{emitter.attr(attr)}", rtype) - if cl.has_dict: + if has_managed_dict(cl, emitter): + emitter.emit_line("_PyObject_VisitManagedDict((PyObject *)self, visit, arg);") + elif cl.has_dict: struct_name = cl.struct_name(emitter.names) # __dict__ lives right after the struct and __weakref__ lives right after that emitter.emit_gc_visit( @@ -746,7 +756,9 @@ def generate_clear_for_class(cl: ClassIR, func_name: str, emitter: Emitter) -> N for base in reversed(cl.base_mro): for attr, rtype in base.attributes.items(): emitter.emit_gc_clear(f"self->{emitter.attr(attr)}", rtype) - if cl.has_dict: + if has_managed_dict(cl, emitter): + emitter.emit_line("_PyObject_ClearManagedDict((PyObject *)self);") + elif cl.has_dict: struct_name = cl.struct_name(emitter.names) # __dict__ lives right after the struct and __weakref__ lives right after that emitter.emit_gc_clear( @@ -782,11 +794,7 @@ def generate_methods_table(cl: ClassIR, name: str, emitter: Emitter) -> None: continue emitter.emit_line(f'{{"{fn.name}",') emitter.emit_line(f" (PyCFunction){PREFIX}{fn.cname(emitter.names)},") - if use_fastcall(emitter.capi_version): - flags = ["METH_FASTCALL"] - else: - flags = ["METH_VARARGS"] - flags.append("METH_KEYWORDS") + flags = ["METH_FASTCALL", "METH_KEYWORDS"] if fn.decl.kind == FUNC_STATICMETHOD: flags.append("METH_STATIC") elif fn.decl.kind == FUNC_CLASSMETHOD: @@ -1035,3 +1043,15 @@ def generate_property_setter( ) emitter.emit_line("return 0;") emitter.emit_line("}") + + +def has_managed_dict(cl: ClassIR, emitter: Emitter) -> bool: + """Should the class get the Py_TPFLAGS_MANAGED_DICT flag?""" + # On 3.11 and earlier the flag doesn't exist and we use + # tp_dictoffset instead. If a class inherits from Exception, the + # flag conflicts with tp_dictoffset set in the base class. + return ( + emitter.capi_version >= (3, 12) + and cl.has_dict + and cl.builtin_base != "PyBaseExceptionObject" + ) diff --git a/mypyc/codegen/emitfunc.py b/mypyc/codegen/emitfunc.py index 7e6d775d74b4..b4d31544b196 100644 --- a/mypyc/codegen/emitfunc.py +++ b/mypyc/codegen/emitfunc.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing_extensions import Final +from typing import Final from mypyc.analysis.blockfreq import frequently_executed_blocks from mypyc.codegen.emit import DEBUG_ERRORS, Emitter, TracebackAndGotoHandler, c_array_initializer @@ -686,10 +686,10 @@ def visit_float_op(self, op: FloatOp) -> None: lhs = self.reg(op.lhs) rhs = self.reg(op.rhs) if op.op != FloatOp.MOD: - self.emit_line("%s = %s %s %s;" % (dest, lhs, op.op_str[op.op], rhs)) + self.emit_line(f"{dest} = {lhs} {op.op_str[op.op]} {rhs};") else: # TODO: This may set errno as a side effect, that is a little sketchy. - self.emit_line("%s = fmod(%s, %s);" % (dest, lhs, rhs)) + self.emit_line(f"{dest} = fmod({lhs}, {rhs});") def visit_float_neg(self, op: FloatNeg) -> None: dest = self.reg(op) @@ -700,7 +700,7 @@ def visit_float_comparison_op(self, op: FloatComparisonOp) -> None: dest = self.reg(op) lhs = self.reg(op.lhs) rhs = self.reg(op.rhs) - self.emit_line("%s = %s %s %s;" % (dest, lhs, op.op_str[op.op], rhs)) + self.emit_line(f"{dest} = {lhs} {op.op_str[op.op]} {rhs};") def visit_load_mem(self, op: LoadMem) -> None: dest = self.reg(op) diff --git a/mypyc/codegen/emitmodule.py b/mypyc/codegen/emitmodule.py index 0e80ff6da1f2..caf2058ea7c4 100644 --- a/mypyc/codegen/emitmodule.py +++ b/mypyc/codegen/emitmodule.py @@ -43,7 +43,6 @@ TOP_LEVEL_NAME, shared_lib_name, short_id_from_name, - use_fastcall, use_vectorcall, ) from mypyc.errors import Errors @@ -51,7 +50,7 @@ from mypyc.ir.func_ir import FuncIR from mypyc.ir.module_ir import ModuleIR, ModuleIRs, deserialize_modules from mypyc.ir.ops import DeserMaps, LoadLiteral -from mypyc.ir.rtypes import RTuple, RType +from mypyc.ir.rtypes import RType from mypyc.irbuild.main import build_ir from mypyc.irbuild.mapper import Mapper from mypyc.irbuild.prepare import load_type_map @@ -1052,11 +1051,7 @@ def declare_finals( def final_definition(self, module: str, name: str, typ: RType, emitter: Emitter) -> str: static_name = emitter.static_name(name, module) # Here we rely on the fact that undefined value and error value are always the same - if isinstance(typ, RTuple): - # We need to inline because initializer must be static - undefined = "{{ {} }}".format("".join(emitter.tuple_undefined_value_helper(typ))) - else: - undefined = emitter.c_undefined_value(typ) + undefined = emitter.c_initializer_undefined_value(typ) return f"{emitter.ctype_spaced(typ)}{static_name} = {undefined};" def declare_static_pyobject(self, identifier: str, emitter: Emitter) -> None: @@ -1111,8 +1106,8 @@ def is_fastcall_supported(fn: FuncIR, capi_version: tuple[int, int]) -> bool: # We can use vectorcalls (PEP 590) when supported return use_vectorcall(capi_version) # TODO: Support fastcall for __init__. - return use_fastcall(capi_version) and fn.name != "__init__" - return use_fastcall(capi_version) + return fn.name != "__init__" + return True def collect_literals(fn: FuncIR, literals: Literals) -> None: diff --git a/mypyc/codegen/literals.py b/mypyc/codegen/literals.py index 8f84089221c3..1f0c3bc6ec7b 100644 --- a/mypyc/codegen/literals.py +++ b/mypyc/codegen/literals.py @@ -1,7 +1,7 @@ from __future__ import annotations -from typing import FrozenSet, List, Tuple, Union -from typing_extensions import Final, TypeGuard +from typing import Final, FrozenSet, Tuple, Union +from typing_extensions import TypeGuard # Supported Python literal types. All tuple / frozenset items must have supported # literal types as well, but we can't represent the type precisely. @@ -140,7 +140,7 @@ def encoded_complex_values(self) -> list[str]: def encoded_tuple_values(self) -> list[str]: return self._encode_collection_values(self.tuple_literals) - def encoded_frozenset_values(self) -> List[str]: + def encoded_frozenset_values(self) -> list[str]: return self._encode_collection_values(self.frozenset_literals) def _encode_collection_values( diff --git a/mypyc/common.py b/mypyc/common.py index 05e13370cb98..3d07f6c3d0d3 100644 --- a/mypyc/common.py +++ b/mypyc/common.py @@ -2,8 +2,7 @@ import sys import sysconfig -from typing import Any, Dict -from typing_extensions import Final +from typing import Any, Dict, Final from mypy.util import unnamed_function @@ -99,11 +98,6 @@ def short_name(name: str) -> str: return name -def use_fastcall(capi_version: tuple[int, int]) -> bool: - # We can use METH_FASTCALL for faster wrapper functions on Python 3.7+. - return capi_version >= (3, 7) - - def use_vectorcall(capi_version: tuple[int, int]) -> bool: # We can use vectorcalls to make calls on Python 3.8+ (PEP 590). return capi_version >= (3, 8) diff --git a/mypyc/doc/differences_from_python.rst b/mypyc/doc/differences_from_python.rst index 16faae60303f..f1d4d05a3a87 100644 --- a/mypyc/doc/differences_from_python.rst +++ b/mypyc/doc/differences_from_python.rst @@ -268,19 +268,27 @@ used in compiled code, or there are some limitations. You can partially work around some of these limitations by running your code in interpreted mode. -Operator overloading -******************** +Nested classes +************** -Native classes can only use these dunder methods to override operators: +Nested classes are not supported. -* ``__eq__`` -* ``__ne__`` -* ``__getitem__`` -* ``__setitem__`` +Conditional functions or classes +******************************** -.. note:: +Function and class definitions guarded by an if-statement are not supported. + +Dunder methods +************** - This limitation will be lifted in the future. +Native classes **cannot** use these dunders. If defined, they will not +work as expected. + +* ``__del__`` +* ``__index__`` +* ``__getattr__``, ``__getattribute__`` +* ``__setattr__`` +* ``__delattr__`` Generator expressions ********************* @@ -299,10 +307,16 @@ Descriptors Native classes can't contain arbitrary descriptors. Properties, static methods and class methods are supported. -Stack introspection -******************* +Introspection +************* + +Various methods of introspection may break by using mypyc. Here's an +non-exhaustive list of what won't work: -Frames of compiled functions can't be inspected using ``inspect``. +- Instance ``__annotations__`` is usually not kept +- Frames of compiled functions can't be inspected using ``inspect`` +- Compiled methods aren't considered methods by ``inspect.ismethod`` +- ``inspect.signature`` chokes on compiled functions Profiling hooks and tracing *************************** diff --git a/mypyc/doc/float_operations.rst b/mypyc/doc/float_operations.rst index 915c184ae8e7..feae5a806c70 100644 --- a/mypyc/doc/float_operations.rst +++ b/mypyc/doc/float_operations.rst @@ -14,6 +14,8 @@ Construction * ``float(x: int)`` * ``float(x: i64)`` * ``float(x: i32)`` +* ``float(x: i16)`` +* ``float(x: u8)`` * ``float(x: str)`` * ``float(x: float)`` (no-op) @@ -28,8 +30,10 @@ Functions --------- * ``int(f)`` -* ``i32(f)`` (convert to ``i32``) -* ``i64(f)`` (convert to ``i64``) +* ``i64(f)`` (convert to 64-bit signed integer) +* ``i32(f)`` (convert to 32-bit signed integer) +* ``i16(f)`` (convert to 16-bit signed integer) +* ``u8(f)`` (convert to 8-bit unsigned integer) * ``abs(f)`` * ``math.sin(f)`` * ``math.cos(f)`` diff --git a/mypyc/doc/int_operations.rst b/mypyc/doc/int_operations.rst index 058fdbd511dd..eb875f5c9452 100644 --- a/mypyc/doc/int_operations.rst +++ b/mypyc/doc/int_operations.rst @@ -8,14 +8,17 @@ Mypyc supports these integer types: * ``int`` (arbitrary-precision integer) * ``i64`` (64-bit signed integer) * ``i32`` (32-bit signed integer) +* ``i16`` (16-bit signed integer) +* ``u8`` (8-bit unsigned integer) -``i64`` and ``i32`` are *native integer types* and must be imported -from the ``mypy_extensions`` module. ``int`` corresponds to the Python -``int`` type, but uses a more efficient runtime representation (tagged -pointer). Native integer types are value types. All integer types have -optimized primitive operations, but the native integer types are more -efficient than ``int``, since they don't require range or bounds -checks. +``i64``, ``i32``, ``i16`` and ``u8`` are *native integer types* and +are available in the ``mypy_extensions`` module. ``int`` corresponds +to the Python ``int`` type, but uses a more efficient runtime +representation (tagged pointer). Native integer types are value types. + +All integer types have optimized primitive operations, but the native +integer types are more efficient than ``int``, since they don't +require range or bounds checks. Operations on integers that are listed here have fast, optimized implementations. Other integer operations use generic implementations @@ -31,6 +34,8 @@ Construction * ``int(x: float)`` * ``int(x: i64)`` * ``int(x: i32)`` +* ``int(x: i16)`` +* ``int(x: u8)`` * ``int(x: str)`` * ``int(x: str, base: int)`` * ``int(x: int)`` (no-op) @@ -39,19 +44,34 @@ Construction * ``i64(x: int)`` * ``i64(x: float)`` +* ``i64(x: i64)`` (no-op) * ``i64(x: i32)`` +* ``i64(x: i16)`` +* ``i64(x: u8)`` * ``i64(x: str)`` * ``i64(x: str, base: int)`` -* ``i64(x: i64)`` (no-op) ``i32`` type: * ``i32(x: int)`` * ``i32(x: float)`` * ``i32(x: i64)`` (truncate) +* ``i32(x: i32)`` (no-op) +* ``i32(x: i16)`` +* ``i32(x: u8)`` * ``i32(x: str)`` * ``i32(x: str, base: int)`` -* ``i32(x: i32)`` (no-op) + +``i16`` type: + +* ``i16(x: int)`` +* ``i16(x: float)`` +* ``i16(x: i64)`` (truncate) +* ``i16(x: i32)`` (truncate) +* ``i16(x: i16)`` (no-op) +* ``i16(x: u8)`` +* ``i16(x: str)`` +* ``i16(x: str, base: int)`` Conversions from ``int`` to a native integer type raise ``OverflowError`` if the value is too large or small. Conversions from @@ -65,6 +85,8 @@ Implicit conversions ``int`` values can be implicitly converted to a native integer type, for convenience. This means that these are equivalent:: + from mypy_extensions import i64 + def implicit() -> None: # Implicit conversion of 0 (int) to i64 x: i64 = 0 @@ -92,18 +114,23 @@ Operators * Comparisons (``==``, ``!=``, ``<``, etc.) * Augmented assignment (``x += y``, etc.) -If one of the above native integer operations overflows or underflows, -the behavior is undefined. Native integer types should only be used if -all possible values are small enough for the type. For this reason, -the arbitrary-precision ``int`` type is recommended unless the -performance of integer operations is critical. +If one of the above native integer operations overflows or underflows +with signed operands, the behavior is undefined. Signed native integer +types should only be used if all possible values are small enough for +the type. For this reason, the arbitrary-precision ``int`` type is +recommended for signed values unless the performance of integer +operations is critical. + +Operations on unsigned integers (``u8``) wrap around on overflow. It's a compile-time error to mix different native integer types in a binary operation such as addition. An explicit conversion is required:: - def add(x: i64, y: i32) -> None: - a = x + y # Error (i64 + i32) - b = x + i64(y) # OK + from mypy_extensions import i64, i32 + + def add(x: i64, y: i32) -> None: + a = x + y # Error (i64 + i32) + b = x + i64(y) # OK You can freely mix a native integer value and an arbitrary-precision ``int`` value in an operation. The native integer type is "sticky" diff --git a/mypyc/doc/native_classes.rst b/mypyc/doc/native_classes.rst index 2b4a0892b790..b2935a6f7185 100644 --- a/mypyc/doc/native_classes.rst +++ b/mypyc/doc/native_classes.rst @@ -63,6 +63,8 @@ classes: * ``IndexError`` * ``LookupError`` * ``UserWarning`` +* ``typing.NamedTuple`` +* ``enum.Enum`` By default, a non-native class can't inherit a native class, and you can't inherit from a native class outside the compilation unit that @@ -104,6 +106,11 @@ through an instance. Example:: print(o.cv) # OK (2) o.cv = 3 # Error! +.. tip:: + + Constant class variables can be declared using ``typing.Final`` or + ``typing.Final[]``. + Generic native classes ---------------------- @@ -150,9 +157,10 @@ decorators can be used with native classes, however: * ``mypy_extensions.trait`` (for defining :ref:`trait types `) * ``mypy_extensions.mypyc_attr`` (see :ref:`above `) * ``dataclasses.dataclass`` +* ``@attr.s(auto_attribs=True)`` -Dataclasses have partial native support, and they aren't as efficient -as pure native classes. +Dataclasses and attrs classes have partial native support, and they aren't as +efficient as pure native classes. .. note:: diff --git a/mypyc/doc/using_type_annotations.rst b/mypyc/doc/using_type_annotations.rst index 6c9277786751..04c923819d54 100644 --- a/mypyc/doc/using_type_annotations.rst +++ b/mypyc/doc/using_type_annotations.rst @@ -32,6 +32,8 @@ implementations: * ``int`` (:ref:`native operations `) * ``i64`` (:ref:`documentation `, :ref:`native operations `) * ``i32`` (:ref:`documentation `, :ref:`native operations `) +* ``i16`` (:ref:`documentation `, :ref:`native operations `) +* ``u8`` (:ref:`documentation `, :ref:`native operations `) * ``float`` (:ref:`native operations `) * ``bool`` (:ref:`native operations `) * ``str`` (:ref:`native operations `) @@ -193,8 +195,8 @@ Traits have some special properties: * You shouldn't create instances of traits (though mypyc does not prevent it yet). -* Traits can subclass other traits, but they can't subclass non-trait - classes (other than ``object``). +* Traits can subclass other traits or native classes, but the MRO must be + linear (just like with native classes). * Accessing methods or attributes through a trait type is somewhat less efficient than through a native class type, but this is much @@ -342,11 +344,12 @@ Examples:: Native integer types -------------------- -You can use the native integer types ``i64`` (64-bit signed integer) -and ``i32`` (32-bit signed integer) if you know that integer values -will always fit within fixed bounds. These types are faster than the +You can use the native integer types ``i64`` (64-bit signed integer), +``i32`` (32-bit signed integer), ``i16`` (16-bit signed integer), and +``u8`` (8-bit unsigned integer) if you know that integer values will +always fit within fixed bounds. These types are faster than the arbitrary-precision ``int`` type, since they don't require overflow -checks on operations. ``i32`` may also use less memory than ``int`` +checks on operations. They may also use less memory than ``int`` values. The types are imported from the ``mypy_extensions`` module (installed via ``pip install mypy_extensions``). diff --git a/mypyc/ir/class_ir.py b/mypyc/ir/class_ir.py index a5ac2133ce13..682e30629118 100644 --- a/mypyc/ir/class_ir.py +++ b/mypyc/ir/class_ir.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, NamedTuple, Optional +from typing import List, NamedTuple from mypyc.common import PROPSET_PREFIX, JsonDict from mypyc.ir.func_ir import FuncDecl, FuncIR, FuncSignature @@ -73,7 +73,7 @@ class VTableMethod(NamedTuple): cls: "ClassIR" name: str method: FuncIR - shadow_method: Optional[FuncIR] + shadow_method: FuncIR | None VTableEntries = List[VTableMethod] @@ -192,7 +192,7 @@ def __init__( # bitmap for types such as native ints that can't have a dedicated error # value that doesn't overlap a valid value. The bitmap is used if the # value of an attribute is the same as the error value. - self.bitmap_attrs: List[str] = [] + self.bitmap_attrs: list[str] = [] def __repr__(self) -> str: return ( diff --git a/mypyc/ir/func_ir.py b/mypyc/ir/func_ir.py index dbb45fc7ec29..44847c7bb0b3 100644 --- a/mypyc/ir/func_ir.py +++ b/mypyc/ir/func_ir.py @@ -2,8 +2,7 @@ from __future__ import annotations -from typing import Sequence -from typing_extensions import Final +from typing import Final, Sequence from mypy.nodes import ARG_POS, ArgKind, Block, FuncDef from mypyc.common import BITMAP_BITS, JsonDict, bitmap_name, get_id_from_name, short_id_from_name @@ -86,7 +85,7 @@ def real_args(self) -> tuple[RuntimeArg, ...]: return self.args[: -self.num_bitmap_args] return self.args - def bound_sig(self) -> "FuncSignature": + def bound_sig(self) -> FuncSignature: if self.num_bitmap_args: return FuncSignature(self.args[1 : -self.num_bitmap_args], self.ret_type) else: diff --git a/mypyc/ir/ops.py b/mypyc/ir/ops.py index 6007f8a4ce04..d80c479211b7 100644 --- a/mypyc/ir/ops.py +++ b/mypyc/ir/ops.py @@ -12,8 +12,7 @@ from __future__ import annotations from abc import abstractmethod -from typing import TYPE_CHECKING, Dict, Generic, List, NamedTuple, Sequence, TypeVar, Union -from typing_extensions import Final +from typing import TYPE_CHECKING, Final, Generic, List, NamedTuple, Sequence, TypeVar, Union from mypy_extensions import trait @@ -1163,6 +1162,7 @@ class ComparisonOp(RegisterOp): } signed_ops: Final = {"==": EQ, "!=": NEQ, "<": SLT, ">": SGT, "<=": SLE, ">=": SGE} + unsigned_ops: Final = {"==": EQ, "!=": NEQ, "<": ULT, ">": UGT, "<=": ULE, ">=": UGE} def __init__(self, lhs: Value, rhs: Value, op: int, line: int = -1) -> None: super().__init__(line) @@ -1204,10 +1204,10 @@ def __init__(self, lhs: Value, rhs: Value, op: int, line: int = -1) -> None: self.rhs = rhs self.op = op - def sources(self) -> List[Value]: + def sources(self) -> list[Value]: return [self.lhs, self.rhs] - def accept(self, visitor: "OpVisitor[T]") -> T: + def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_float_op(self) @@ -1226,10 +1226,10 @@ def __init__(self, src: Value, line: int = -1) -> None: self.type = float_rprimitive self.src = src - def sources(self) -> List[Value]: + def sources(self) -> list[Value]: return [self.src] - def accept(self, visitor: "OpVisitor[T]") -> T: + def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_float_neg(self) @@ -1254,10 +1254,10 @@ def __init__(self, lhs: Value, rhs: Value, op: int, line: int = -1) -> None: self.rhs = rhs self.op = op - def sources(self) -> List[Value]: + def sources(self) -> list[Value]: return [self.lhs, self.rhs] - def accept(self, visitor: "OpVisitor[T]") -> T: + def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_float_comparison_op(self) @@ -1575,5 +1575,5 @@ def visit_keep_alive(self, op: KeepAlive) -> T: # (Serialization and deserialization *will* be used for incremental # compilation but so far it is not hooked up to anything.) class DeserMaps(NamedTuple): - classes: Dict[str, "ClassIR"] - functions: Dict[str, "FuncIR"] + classes: dict[str, "ClassIR"] + functions: dict[str, "FuncIR"] diff --git a/mypyc/ir/pprint.py b/mypyc/ir/pprint.py index 4d10a91835ca..c86060c49594 100644 --- a/mypyc/ir/pprint.py +++ b/mypyc/ir/pprint.py @@ -3,8 +3,7 @@ from __future__ import annotations from collections import defaultdict -from typing import Any, Sequence, Union -from typing_extensions import Final +from typing import Any, Final, Sequence, Union from mypyc.common import short_name from mypyc.ir.func_ir import FuncIR, all_values_full diff --git a/mypyc/ir/rtypes.py b/mypyc/ir/rtypes.py index 7ff82ac9b297..fa46feb0b59a 100644 --- a/mypyc/ir/rtypes.py +++ b/mypyc/ir/rtypes.py @@ -24,7 +24,7 @@ from abc import abstractmethod from typing import TYPE_CHECKING, ClassVar, Generic, TypeVar -from typing_extensions import Final +from typing_extensions import Final, TypeGuard from mypyc.common import IS_32_BIT_PLATFORM, PLATFORM_SIZE, JsonDict, short_name from mypyc.namegen import NameGenerator @@ -206,13 +206,12 @@ def __init__( self.error_overlap = error_overlap if ctype == "CPyTagged": self.c_undefined = "CPY_INT_TAG" - elif ctype in ("int32_t", "int64_t"): + elif ctype in ("int16_t", "int32_t", "int64_t"): # This is basically an arbitrary value that is pretty # unlikely to overlap with a real value. self.c_undefined = "-113" - elif ctype in ("CPyPtr", "uint32_t", "uint64_t"): - # TODO: For low-level integers, we need to invent an overlapping - # error value, similar to int64_t above. + elif ctype == "CPyPtr": + # TODO: Invent an overlapping error value? self.c_undefined = "0" elif ctype == "PyObject *": # Boxed types use the null pointer as the error value. @@ -223,6 +222,8 @@ def __init__( self.c_undefined = "NULL" elif ctype == "double": self.c_undefined = "-113.0" + elif ctype in ("uint8_t", "uint16_t", "uint32_t", "uint64_t"): + self.c_undefined = "239" # An arbitrary number else: assert False, "Unrecognized ctype: %r" % ctype @@ -290,8 +291,18 @@ def __hash__(self) -> int: # Low level integer types (correspond to C integer types) +int16_rprimitive: Final = RPrimitive( + "i16", + is_unboxed=True, + is_refcounted=False, + is_native_int=True, + is_signed=True, + ctype="int16_t", + size=2, + error_overlap=True, +) int32_rprimitive: Final = RPrimitive( - "int32", + "i32", is_unboxed=True, is_refcounted=False, is_native_int=True, @@ -301,7 +312,7 @@ def __hash__(self) -> int: error_overlap=True, ) int64_rprimitive: Final = RPrimitive( - "int64", + "i64", is_unboxed=True, is_refcounted=False, is_native_int=True, @@ -310,23 +321,49 @@ def __hash__(self) -> int: size=8, error_overlap=True, ) +uint8_rprimitive: Final = RPrimitive( + "u8", + is_unboxed=True, + is_refcounted=False, + is_native_int=True, + is_signed=False, + ctype="uint8_t", + size=1, + error_overlap=True, +) + +# The following unsigned native int types (u16, u32, u64) are not +# exposed to the user. They are for internal use within mypyc only. + +u16_rprimitive: Final = RPrimitive( + "u16", + is_unboxed=True, + is_refcounted=False, + is_native_int=True, + is_signed=False, + ctype="uint16_t", + size=2, + error_overlap=True, +) uint32_rprimitive: Final = RPrimitive( - "uint32", + "u32", is_unboxed=True, is_refcounted=False, is_native_int=True, is_signed=False, ctype="uint32_t", size=4, + error_overlap=True, ) uint64_rprimitive: Final = RPrimitive( - "uint64", + "u64", is_unboxed=True, is_refcounted=False, is_native_int=True, is_signed=False, ctype="uint64_t", size=8, + error_overlap=True, ) # The C 'int' type @@ -432,7 +469,11 @@ def is_short_int_rprimitive(rtype: RType) -> bool: return rtype is short_int_rprimitive -def is_int32_rprimitive(rtype: RType) -> bool: +def is_int16_rprimitive(rtype: RType) -> TypeGuard[RPrimitive]: + return rtype is int16_rprimitive + + +def is_int32_rprimitive(rtype: RType) -> TypeGuard[RPrimitive]: return rtype is int32_rprimitive or ( rtype is c_pyssize_t_rprimitive and rtype._ctype == "int32_t" ) @@ -444,8 +485,17 @@ def is_int64_rprimitive(rtype: RType) -> bool: ) -def is_fixed_width_rtype(rtype: RType) -> bool: - return is_int32_rprimitive(rtype) or is_int64_rprimitive(rtype) +def is_fixed_width_rtype(rtype: RType) -> TypeGuard[RPrimitive]: + return ( + is_int64_rprimitive(rtype) + or is_int32_rprimitive(rtype) + or is_int16_rprimitive(rtype) + or is_uint8_rprimitive(rtype) + ) + + +def is_uint8_rprimitive(rtype: RType) -> TypeGuard[RPrimitive]: + return rtype is uint8_rprimitive def is_uint32_rprimitive(rtype: RType) -> bool: @@ -536,6 +586,10 @@ def visit_rprimitive(self, t: RPrimitive) -> str: return "8" # "8 byte integer" elif t._ctype == "int32_t": return "4" # "4 byte integer" + elif t._ctype == "int16_t": + return "2" # "2 byte integer" + elif t._ctype == "uint8_t": + return "U1" # "1 byte unsigned integer" elif t._ctype == "double": return "F" assert not t.is_unboxed, f"{t} unexpected unboxed type" @@ -970,3 +1024,15 @@ def deserialize(cls, data: JsonDict, ctx: DeserMaps) -> RArray: names=["ob_base", "ob_item", "allocated"], types=[PyVarObject, pointer_rprimitive, c_pyssize_t_rprimitive], ) + + +def check_native_int_range(rtype: RPrimitive, n: int) -> bool: + """Is n within the range of a native, fixed-width int type? + + Assume the type is a fixed-width int type. + """ + if not rtype.is_signed: + return 0 <= n < (1 << (8 * rtype.size)) + else: + limit = 1 << (rtype.size * 8 - 1) + return -limit <= n < limit diff --git a/mypyc/irbuild/builder.py b/mypyc/irbuild/builder.py index 26aa17071974..8c68f91bf633 100644 --- a/mypyc/irbuild/builder.py +++ b/mypyc/irbuild/builder.py @@ -13,8 +13,8 @@ from __future__ import annotations from contextlib import contextmanager -from typing import Any, Callable, Iterator, Sequence, Union -from typing_extensions import Final, overload +from typing import Any, Callable, Final, Iterator, Sequence, Union +from typing_extensions import overload from mypy.build import Graph from mypy.maptype import map_instance_to_supertype @@ -159,7 +159,7 @@ def __init__( options: CompilerOptions, singledispatch_impls: dict[FuncDef, list[RegisterImplInfo]], ) -> None: - self.builder = LowLevelIRBuilder(current_module, mapper, options) + self.builder = LowLevelIRBuilder(current_module, errors, mapper, options) self.builders = [self.builder] self.symtables: list[dict[SymbolNode, SymbolTarget]] = [{}] self.runtime_args: list[list[RuntimeArg]] = [[]] @@ -224,6 +224,7 @@ def set_module(self, module_name: str, module_path: str) -> None: """ self.module_name = module_name self.module_path = module_path + self.builder.set_module(module_name, module_path) @overload def accept(self, node: Expression, *, can_borrow: bool = False) -> Value: @@ -535,16 +536,14 @@ def load_final_static( error_msg=f'value for final name "{error_name}" was not set', ) - def load_final_literal_value(self, val: int | str | bytes | float | bool, line: int) -> Value: - """Load value of a final name or class-level attribute.""" + def load_literal_value(self, val: int | str | bytes | float | complex | bool) -> Value: + """Load value of a final name, class-level attribute, or constant folded expression.""" if isinstance(val, bool): if val: return self.true() else: return self.false() elif isinstance(val, int): - # TODO: take care of negative integer initializers - # (probably easier to fix this in mypy itself). return self.builder.load_int(val) elif isinstance(val, float): return self.builder.load_float(val) @@ -552,8 +551,10 @@ def load_final_literal_value(self, val: int | str | bytes | float | bool, line: return self.builder.load_str(val) elif isinstance(val, bytes): return self.builder.load_bytes(val) + elif isinstance(val, complex): + return self.builder.load_complex(val) else: - assert False, "Unsupported final literal value" + assert False, "Unsupported literal value" def get_assignment_target( self, lvalue: Lvalue, line: int = -1, *, for_read: bool = False @@ -1013,7 +1014,7 @@ def emit_load_final( line: line number where loading occurs """ if final_var.final_value is not None: # this is safe even for non-native names - return self.load_final_literal_value(final_var.final_value, line) + return self.load_literal_value(final_var.final_value) elif native: return self.load_final_static(fullname, self.mapper.type_to_rtype(typ), line, name) else: @@ -1102,7 +1103,10 @@ def flatten_classes(self, arg: RefExpr | TupleExpr) -> list[ClassIR] | None: def enter(self, fn_info: FuncInfo | str = "") -> None: if isinstance(fn_info, str): fn_info = FuncInfo(name=fn_info) - self.builder = LowLevelIRBuilder(self.current_module, self.mapper, self.options) + self.builder = LowLevelIRBuilder( + self.current_module, self.errors, self.mapper, self.options + ) + self.builder.set_module(self.module_name, self.module_path) self.builders.append(self.builder) self.symtables.append({}) self.runtime_args.append([]) diff --git a/mypyc/irbuild/callable_class.py b/mypyc/irbuild/callable_class.py index d3ee54a208cd..599dbb81f767 100644 --- a/mypyc/irbuild/callable_class.py +++ b/mypyc/irbuild/callable_class.py @@ -17,7 +17,7 @@ def setup_callable_class(builder: IRBuilder) -> None: - """Generate an (incomplete) callable class representing function. + """Generate an (incomplete) callable class representing a function. This can be a nested function or a function within a non-extension class. Also set up the 'self' variable for that class. diff --git a/mypyc/irbuild/classdef.py b/mypyc/irbuild/classdef.py index 59b1c05a0ddb..fc2bb4a1fc2f 100644 --- a/mypyc/irbuild/classdef.py +++ b/mypyc/irbuild/classdef.py @@ -2,9 +2,9 @@ from __future__ import annotations +import typing_extensions from abc import abstractmethod -from typing import Callable -from typing_extensions import Final +from typing import Callable, Final from mypy.nodes import ( AssignmentStmt, @@ -498,6 +498,14 @@ def populate_non_ext_bases(builder: IRBuilder, cdef: ClassDef) -> Value: if builder.options.capi_version < (3, 8): # TypedDict was added to typing in Python 3.8. module = "typing_extensions" + # TypedDict is not a real type on typing_extensions 4.7.0+ + name = "_TypedDict" + if isinstance(typing_extensions.TypedDict, type): + raise RuntimeError( + "It looks like you may have an old version " + "of typing_extensions installed. " + "typing_extensions>=4.7.0 is required on Python 3.7." + ) else: # In Python 3.9 TypedDict is not a real type. name = "_TypedDict" diff --git a/mypyc/irbuild/constant_fold.py b/mypyc/irbuild/constant_fold.py index bc71052f5418..12a4b15dd40c 100644 --- a/mypyc/irbuild/constant_fold.py +++ b/mypyc/irbuild/constant_fold.py @@ -10,16 +10,12 @@ from __future__ import annotations -from typing import Union -from typing_extensions import Final +from typing import Final, Union -from mypy.constant_fold import ( - constant_fold_binary_int_op, - constant_fold_binary_str_op, - constant_fold_unary_float_op, - constant_fold_unary_int_op, -) +from mypy.constant_fold import constant_fold_binary_op, constant_fold_unary_op from mypy.nodes import ( + BytesExpr, + ComplexExpr, Expression, FloatExpr, IntExpr, @@ -31,10 +27,11 @@ Var, ) from mypyc.irbuild.builder import IRBuilder +from mypyc.irbuild.util import bytes_from_str # All possible result types of constant folding -ConstantValue = Union[int, str, float] -CONST_TYPES: Final = (int, str, float) +ConstantValue = Union[int, float, complex, str, bytes] +CONST_TYPES: Final = (int, float, complex, str, bytes) def constant_fold_expr(builder: IRBuilder, expr: Expression) -> ConstantValue | None: @@ -44,35 +41,55 @@ def constant_fold_expr(builder: IRBuilder, expr: Expression) -> ConstantValue | """ if isinstance(expr, IntExpr): return expr.value + if isinstance(expr, FloatExpr): + return expr.value if isinstance(expr, StrExpr): return expr.value - if isinstance(expr, FloatExpr): + if isinstance(expr, BytesExpr): + return bytes_from_str(expr.value) + if isinstance(expr, ComplexExpr): return expr.value elif isinstance(expr, NameExpr): node = expr.node if isinstance(node, Var) and node.is_final: - value = node.final_value - if isinstance(value, (CONST_TYPES)): - return value + final_value = node.final_value + if isinstance(final_value, (CONST_TYPES)): + return final_value elif isinstance(expr, MemberExpr): final = builder.get_final_ref(expr) if final is not None: fn, final_var, native = final if final_var.is_final: - value = final_var.final_value - if isinstance(value, (CONST_TYPES)): - return value + final_value = final_var.final_value + if isinstance(final_value, (CONST_TYPES)): + return final_value elif isinstance(expr, OpExpr): left = constant_fold_expr(builder, expr.left) right = constant_fold_expr(builder, expr.right) - if isinstance(left, int) and isinstance(right, int): - return constant_fold_binary_int_op(expr.op, left, right) - elif isinstance(left, str) and isinstance(right, str): - return constant_fold_binary_str_op(expr.op, left, right) + if left is not None and right is not None: + return constant_fold_binary_op_extended(expr.op, left, right) elif isinstance(expr, UnaryExpr): value = constant_fold_expr(builder, expr.expr) - if isinstance(value, int): - return constant_fold_unary_int_op(expr.op, value) - if isinstance(value, float): - return constant_fold_unary_float_op(expr.op, value) + if value is not None and not isinstance(value, bytes): + return constant_fold_unary_op(expr.op, value) + return None + + +def constant_fold_binary_op_extended( + op: str, left: ConstantValue, right: ConstantValue +) -> ConstantValue | None: + """Like mypy's constant_fold_binary_op(), but includes bytes support. + + mypy cannot use constant folded bytes easily so it's simpler to only support them in mypyc. + """ + if not isinstance(left, bytes) and not isinstance(right, bytes): + return constant_fold_binary_op(op, left, right) + + if op == "+" and isinstance(left, bytes) and isinstance(right, bytes): + return left + right + elif op == "*" and isinstance(left, bytes) and isinstance(right, int): + return left * right + elif op == "*" and isinstance(left, int) and isinstance(right, bytes): + return left * right + return None diff --git a/mypyc/irbuild/expression.py b/mypyc/irbuild/expression.py index 4ebc422ed535..8d205b432d2d 100644 --- a/mypyc/irbuild/expression.py +++ b/mypyc/irbuild/expression.py @@ -55,7 +55,6 @@ Assign, BasicBlock, ComparisonOp, - Float, Integer, LoadAddress, LoadLiteral, @@ -92,7 +91,6 @@ tokenizer_printf_style, ) from mypyc.irbuild.specialize import apply_function_specialization, apply_method_specialization -from mypyc.irbuild.util import bytes_from_str from mypyc.primitives.bytes_ops import bytes_slice_op from mypyc.primitives.dict_ops import dict_get_item_op, dict_new_op, dict_set_item_op from mypyc.primitives.generic_ops import iter_op @@ -575,12 +573,8 @@ def try_constant_fold(builder: IRBuilder, expr: Expression) -> Value | None: Return None otherwise. """ value = constant_fold_expr(builder, expr) - if isinstance(value, int): - return builder.load_int(value) - elif isinstance(value, str): - return builder.load_str(value) - elif isinstance(value, float): - return Float(value) + if value is not None: + return builder.load_literal_value(value) return None @@ -662,10 +656,6 @@ def set_literal_values(builder: IRBuilder, items: Sequence[Expression]) -> list[ values.append(True) elif item.fullname == "builtins.False": values.append(False) - elif isinstance(item, (BytesExpr, FloatExpr, ComplexExpr)): - # constant_fold_expr() doesn't handle these (yet?) - v = bytes_from_str(item.value) if isinstance(item, BytesExpr) else item.value - values.append(v) elif isinstance(item, TupleExpr): tuple_values = set_literal_values(builder, item.items) if tuple_values is not None: @@ -685,7 +675,6 @@ def precompute_set_literal(builder: IRBuilder, s: SetExpr) -> Value | None: Supported items: - Anything supported by irbuild.constant_fold.constant_fold_expr() - None, True, and False - - Float, byte, and complex literals - Tuple literals with only items listed above """ values = set_literal_values(builder, s.items) @@ -826,21 +815,30 @@ def transform_basic_comparison( return builder.compare_tagged(left, right, op, line) if is_fixed_width_rtype(left.type) and op in int_comparison_op_mapping: if right.type == left.type: - op_id = ComparisonOp.signed_ops[op] + if left.type.is_signed: + op_id = ComparisonOp.signed_ops[op] + else: + op_id = ComparisonOp.unsigned_ops[op] return builder.builder.comparison_op(left, right, op_id, line) elif isinstance(right, Integer): - op_id = ComparisonOp.signed_ops[op] + if left.type.is_signed: + op_id = ComparisonOp.signed_ops[op] + else: + op_id = ComparisonOp.unsigned_ops[op] return builder.builder.comparison_op( - left, Integer(right.value >> 1, left.type), op_id, line + left, builder.coerce(right, left.type, line), op_id, line ) elif ( is_fixed_width_rtype(right.type) and op in int_comparison_op_mapping and isinstance(left, Integer) ): - op_id = ComparisonOp.signed_ops[op] + if right.type.is_signed: + op_id = ComparisonOp.signed_ops[op] + else: + op_id = ComparisonOp.unsigned_ops[op] return builder.builder.comparison_op( - Integer(left.value >> 1, right.type), right, op_id, line + builder.coerce(left, right.type, line), right, op_id, line ) negate = False diff --git a/mypyc/irbuild/format_str_tokenizer.py b/mypyc/irbuild/format_str_tokenizer.py index 480c683aa164..0b46887811fb 100644 --- a/mypyc/irbuild/format_str_tokenizer.py +++ b/mypyc/irbuild/format_str_tokenizer.py @@ -3,7 +3,7 @@ from __future__ import annotations from enum import Enum, unique -from typing_extensions import Final +from typing import Final from mypy.checkstrformat import ( ConversionSpecifier, diff --git a/mypyc/irbuild/ll_builder.py b/mypyc/irbuild/ll_builder.py index aa152d32a144..984b6a4deec0 100644 --- a/mypyc/irbuild/ll_builder.py +++ b/mypyc/irbuild/ll_builder.py @@ -10,8 +10,7 @@ from __future__ import annotations -from typing import Callable, Optional, Sequence, Tuple -from typing_extensions import Final +from typing import Callable, Final, Optional, Sequence, Tuple from mypy.argmap import map_actuals_to_formals from mypy.nodes import ARG_POS, ARG_STAR, ARG_STAR2, ArgKind @@ -28,6 +27,7 @@ use_method_vectorcall, use_vectorcall, ) +from mypyc.errors import Errors from mypyc.ir.class_ir import ClassIR, all_concrete_classes from mypyc.ir.func_ir import FuncDecl, FuncSignature from mypyc.ir.ops import ( @@ -95,6 +95,7 @@ c_pointer_rprimitive, c_pyssize_t_rprimitive, c_size_t_rprimitive, + check_native_int_range, dict_rprimitive, float_rprimitive, int_rprimitive, @@ -104,6 +105,7 @@ is_dict_rprimitive, is_fixed_width_rtype, is_float_rprimitive, + is_int16_rprimitive, is_int32_rprimitive, is_int64_rprimitive, is_int_rprimitive, @@ -114,6 +116,7 @@ is_str_rprimitive, is_tagged, is_tuple_rprimitive, + is_uint8_rprimitive, list_rprimitive, none_rprimitive, object_pointer_rprimitive, @@ -146,6 +149,9 @@ py_vectorcall_op, ) from mypyc.primitives.int_ops import ( + int16_divide_op, + int16_mod_op, + int16_overflow, int32_divide_op, int32_mod_op, int32_overflow, @@ -156,6 +162,7 @@ int_to_int32_op, int_to_int64_op, ssize_t_to_int_op, + uint8_overflow, ) from mypyc.primitives.list_ops import list_build_op, list_extend_op, new_list_op from mypyc.primitives.misc_ops import bool_op, fast_isinstance_op, none_object_op @@ -213,8 +220,11 @@ class LowLevelIRBuilder: - def __init__(self, current_module: str, mapper: Mapper, options: CompilerOptions) -> None: + def __init__( + self, current_module: str, errors: Errors, mapper: Mapper, options: CompilerOptions + ) -> None: self.current_module = current_module + self.errors = errors self.mapper = mapper self.options = options self.args: list[Register] = [] @@ -225,6 +235,11 @@ def __init__(self, current_module: str, mapper: Mapper, options: CompilerOptions # temporaries. Use flush_keep_alives() to mark the end of the live range. self.keep_alives: list[Value] = [] + def set_module(self, module_name: str, module_path: str) -> None: + """Set the name and path of the current module.""" + self.module_name = module_name + self.module_path = module_path + # Basic operations def add(self, op: Op) -> Value: @@ -320,7 +335,9 @@ def coerce( and is_short_int_rprimitive(src_type) and is_fixed_width_rtype(target_type) ): - # TODO: range check + value = src.numeric_value() + if not check_native_int_range(target_type, value): + self.error(f'Value {value} is out of range for "{target_type}"', line) return Integer(src.value >> 1, target_type) elif is_int_rprimitive(src_type) and is_fixed_width_rtype(target_type): return self.coerce_int_to_fixed_width(src, target_type, line) @@ -410,10 +427,16 @@ def coerce_int_to_fixed_width(self, src: Value, target_type: RType, line: int) - # Add a range check when the target type is smaller than the source tyoe fast2, fast3 = BasicBlock(), BasicBlock() upper_bound = 1 << (size * 8 - 1) + if not target_type.is_signed: + upper_bound *= 2 check2 = self.add(ComparisonOp(src, Integer(upper_bound, src.type), ComparisonOp.SLT)) self.add(Branch(check2, fast2, slow, Branch.BOOL)) self.activate_block(fast2) - check3 = self.add(ComparisonOp(src, Integer(-upper_bound, src.type), ComparisonOp.SGE)) + if target_type.is_signed: + lower_bound = -upper_bound + else: + lower_bound = 0 + check3 = self.add(ComparisonOp(src, Integer(lower_bound, src.type), ComparisonOp.SGE)) self.add(Branch(check3, fast3, slow, Branch.BOOL)) self.activate_block(fast3) tmp = self.int_op( @@ -456,6 +479,14 @@ def coerce_int_to_fixed_width(self, src: Value, target_type: RType, line: int) - # Slow path just always generates an OverflowError self.call_c(int32_overflow, [], line) self.add(Unreachable()) + elif is_int16_rprimitive(target_type): + # Slow path just always generates an OverflowError + self.call_c(int16_overflow, [], line) + self.add(Unreachable()) + elif is_uint8_rprimitive(target_type): + # Slow path just always generates an OverflowError + self.call_c(uint8_overflow, [], line) + self.add(Unreachable()) else: assert False, target_type @@ -469,9 +500,13 @@ def coerce_short_int_to_fixed_width(self, src: Value, target_type: RType, line: assert False, (src.type, target_type) def coerce_fixed_width_to_int(self, src: Value, line: int) -> Value: - if is_int32_rprimitive(src.type) and PLATFORM_SIZE == 8: + if ( + (is_int32_rprimitive(src.type) and PLATFORM_SIZE == 8) + or is_int16_rprimitive(src.type) + or is_uint8_rprimitive(src.type) + ): # Simple case -- just sign extend and shift. - extended = self.add(Extend(src, c_pyssize_t_rprimitive, signed=True)) + extended = self.add(Extend(src, c_pyssize_t_rprimitive, signed=src.type.is_signed)) return self.int_op( int_rprimitive, extended, @@ -1303,9 +1338,8 @@ def binary_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value: if is_fixed_width_rtype(rtype) or is_tagged(rtype): return self.fixed_width_int_op(ltype, lreg, rreg, op_id, line) if isinstance(rreg, Integer): - # TODO: Check what kind of Integer return self.fixed_width_int_op( - ltype, lreg, Integer(rreg.value >> 1, ltype), op_id, line + ltype, lreg, self.coerce(rreg, ltype, line), op_id, line ) elif op in ComparisonOp.signed_ops: if is_int_rprimitive(rtype): @@ -1316,7 +1350,7 @@ def binary_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value: if is_fixed_width_rtype(rreg.type): return self.comparison_op(lreg, rreg, op_id, line) if isinstance(rreg, Integer): - return self.comparison_op(lreg, Integer(rreg.value >> 1, ltype), op_id, line) + return self.comparison_op(lreg, self.coerce(rreg, ltype, line), op_id, line) elif is_fixed_width_rtype(rtype): if op in FIXED_WIDTH_INT_BINARY_OPS: if op.endswith("="): @@ -1326,9 +1360,8 @@ def binary_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value: else: op_id = IntOp.DIV if isinstance(lreg, Integer): - # TODO: Check what kind of Integer return self.fixed_width_int_op( - rtype, Integer(lreg.value >> 1, rtype), rreg, op_id, line + rtype, self.coerce(lreg, rtype, line), rreg, op_id, line ) if is_tagged(ltype): return self.fixed_width_int_op(rtype, lreg, rreg, op_id, line) @@ -1342,7 +1375,7 @@ def binary_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value: lreg = self.coerce(lreg, rtype, line) op_id = ComparisonOp.signed_ops[op] if isinstance(lreg, Integer): - return self.comparison_op(Integer(lreg.value >> 1, rtype), rreg, op_id, line) + return self.comparison_op(self.coerce(lreg, rtype, line), rreg, op_id, line) if is_fixed_width_rtype(lreg.type): return self.comparison_op(lreg, rreg, op_id, line) @@ -1604,8 +1637,13 @@ def unary_op(self, value: Value, expr_op: str, line: int) -> Value: # Translate to '0 - x' return self.int_op(typ, Integer(0, typ), value, IntOp.SUB, line) elif expr_op == "~": - # Translate to 'x ^ -1' - return self.int_op(typ, value, Integer(-1, typ), IntOp.XOR, line) + if typ.is_signed: + # Translate to 'x ^ -1' + return self.int_op(typ, value, Integer(-1, typ), IntOp.XOR, line) + else: + # Translate to 'x ^ 0xff...' + mask = (1 << (typ.size * 8)) - 1 + return self.int_op(typ, value, Integer(mask, typ), IntOp.XOR, line) elif expr_op == "+": return value if is_float_rprimitive(typ): @@ -2018,7 +2056,9 @@ def float_mod(self, lhs: Value, rhs: Value, line: int) -> Value: def compare_floats(self, lhs: Value, rhs: Value, op: int, line: int) -> Value: return self.add(FloatComparisonOp(lhs, rhs, op, line)) - def fixed_width_int_op(self, type: RType, lhs: Value, rhs: Value, op: int, line: int) -> Value: + def fixed_width_int_op( + self, type: RPrimitive, lhs: Value, rhs: Value, op: int, line: int + ) -> Value: """Generate a binary op using Python fixed-width integer semantics. These may differ in overflow/rounding behavior from native/C ops. @@ -2030,31 +2070,60 @@ def fixed_width_int_op(self, type: RType, lhs: Value, rhs: Value, op: int, line: lhs = self.coerce(lhs, type, line) rhs = self.coerce(rhs, type, line) if op == IntOp.DIV: - # Inline simple division by a constant, so that C - # compilers can optimize more if isinstance(rhs, Integer) and rhs.value not in (-1, 0): - return self.inline_fixed_width_divide(type, lhs, rhs, line) + if not type.is_signed: + return self.int_op(type, lhs, rhs, IntOp.DIV, line) + else: + # Inline simple division by a constant, so that C + # compilers can optimize more + return self.inline_fixed_width_divide(type, lhs, rhs, line) if is_int64_rprimitive(type): prim = int64_divide_op elif is_int32_rprimitive(type): prim = int32_divide_op + elif is_int16_rprimitive(type): + prim = int16_divide_op + elif is_uint8_rprimitive(type): + self.check_for_zero_division(rhs, type, line) + return self.int_op(type, lhs, rhs, op, line) else: assert False, type return self.call_c(prim, [lhs, rhs], line) if op == IntOp.MOD: - # Inline simple % by a constant, so that C - # compilers can optimize more if isinstance(rhs, Integer) and rhs.value not in (-1, 0): - return self.inline_fixed_width_mod(type, lhs, rhs, line) + if not type.is_signed: + return self.int_op(type, lhs, rhs, IntOp.MOD, line) + else: + # Inline simple % by a constant, so that C + # compilers can optimize more + return self.inline_fixed_width_mod(type, lhs, rhs, line) if is_int64_rprimitive(type): prim = int64_mod_op elif is_int32_rprimitive(type): prim = int32_mod_op + elif is_int16_rprimitive(type): + prim = int16_mod_op + elif is_uint8_rprimitive(type): + self.check_for_zero_division(rhs, type, line) + return self.int_op(type, lhs, rhs, op, line) else: assert False, type return self.call_c(prim, [lhs, rhs], line) return self.int_op(type, lhs, rhs, op, line) + def check_for_zero_division(self, rhs: Value, type: RType, line: int) -> None: + err, ok = BasicBlock(), BasicBlock() + is_zero = self.binary_op(rhs, Integer(0, type), "==", line) + self.add(Branch(is_zero, err, ok, Branch.BOOL)) + self.activate_block(err) + self.add( + RaiseStandardError( + RaiseStandardError.ZERO_DIVISION_ERROR, "integer division or modulo by zero", line + ) + ) + self.add(Unreachable()) + self.activate_block(ok) + def inline_fixed_width_divide(self, type: RType, lhs: Value, rhs: Value, line: int) -> Value: # Perform floor division (native division truncates) res = Register(type) @@ -2321,6 +2390,9 @@ def _create_dict(self, keys: list[Value], values: list[Value], line: int) -> Val else: return self.call_c(dict_new_op, [], line) + def error(self, msg: str, line: int) -> None: + self.errors.error(msg, self.module_path, line) + def num_positional_args(arg_values: list[Value], arg_kinds: list[ArgKind] | None) -> int: if arg_kinds is None: diff --git a/mypyc/irbuild/mapper.py b/mypyc/irbuild/mapper.py index dddb35230fd5..5b77b4b1537b 100644 --- a/mypyc/irbuild/mapper.py +++ b/mypyc/irbuild/mapper.py @@ -32,6 +32,7 @@ bytes_rprimitive, dict_rprimitive, float_rprimitive, + int16_rprimitive, int32_rprimitive, int64_rprimitive, int_rprimitive, @@ -42,6 +43,7 @@ set_rprimitive, str_rprimitive, tuple_rprimitive, + uint8_rprimitive, ) @@ -102,6 +104,10 @@ def type_to_rtype(self, typ: Type | None) -> RType: return int64_rprimitive elif typ.type.fullname == "mypy_extensions.i32": return int32_rprimitive + elif typ.type.fullname == "mypy_extensions.i16": + return int16_rprimitive + elif typ.type.fullname == "mypy_extensions.u8": + return uint8_rprimitive else: return object_rprimitive elif isinstance(typ, TupleType): diff --git a/mypyc/irbuild/specialize.py b/mypyc/irbuild/specialize.py index ff9df0cd597b..7c5958457886 100644 --- a/mypyc/irbuild/specialize.py +++ b/mypyc/irbuild/specialize.py @@ -44,11 +44,13 @@ ) from mypyc.ir.rtypes import ( RInstance, + RPrimitive, RTuple, RType, bool_rprimitive, c_int_rprimitive, dict_rprimitive, + int16_rprimitive, int32_rprimitive, int64_rprimitive, int_rprimitive, @@ -56,13 +58,16 @@ is_dict_rprimitive, is_fixed_width_rtype, is_float_rprimitive, + is_int16_rprimitive, is_int32_rprimitive, is_int64_rprimitive, is_int_rprimitive, is_list_rprimitive, + is_uint8_rprimitive, list_rprimitive, set_rprimitive, str_rprimitive, + uint8_rprimitive, ) from mypyc.irbuild.builder import IRBuilder from mypyc.irbuild.for_helpers import ( @@ -163,15 +168,20 @@ def translate_globals(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Va @specialize_function("builtins.complex") @specialize_function("mypy_extensions.i64") @specialize_function("mypy_extensions.i32") +@specialize_function("mypy_extensions.i16") +@specialize_function("mypy_extensions.u8") def translate_builtins_with_unary_dunder( builder: IRBuilder, expr: CallExpr, callee: RefExpr ) -> Value | None: - """Specialize calls on native classes that implement the associated dunder.""" + """Specialize calls on native classes that implement the associated dunder. + + E.g. i64(x) gets specialized to x.__int__() if x is a native instance. + """ if len(expr.args) == 1 and expr.arg_kinds == [ARG_POS] and isinstance(callee, NameExpr): arg = expr.args[0] arg_typ = builder.node_type(arg) shortname = callee.fullname.split(".")[1] - if shortname in ("i64", "i32"): + if shortname in ("i64", "i32", "i16", "u8"): method = "__int__" else: method = f"__{shortname}__" @@ -446,7 +456,7 @@ def translate_sum_call(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> V # handle 'start' argument, if given if len(expr.args) == 2: # ensure call to sum() was properly constructed - if not expr.arg_kinds[1] in (ARG_POS, ARG_NAMED): + if expr.arg_kinds[1] not in (ARG_POS, ARG_NAMED): return None start_expr = expr.args[1] else: @@ -680,9 +690,12 @@ def translate_i64(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value arg_type = builder.node_type(arg) if is_int64_rprimitive(arg_type): return builder.accept(arg) - elif is_int32_rprimitive(arg_type): + elif is_int32_rprimitive(arg_type) or is_int16_rprimitive(arg_type): val = builder.accept(arg) return builder.add(Extend(val, int64_rprimitive, signed=True, line=expr.line)) + elif is_uint8_rprimitive(arg_type): + val = builder.accept(arg) + return builder.add(Extend(val, int64_rprimitive, signed=False, line=expr.line)) elif is_int_rprimitive(arg_type) or is_bool_rprimitive(arg_type): val = builder.accept(arg) return builder.coerce(val, int64_rprimitive, expr.line) @@ -700,12 +713,78 @@ def translate_i32(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value elif is_int64_rprimitive(arg_type): val = builder.accept(arg) return builder.add(Truncate(val, int32_rprimitive, line=expr.line)) + elif is_int16_rprimitive(arg_type): + val = builder.accept(arg) + return builder.add(Extend(val, int32_rprimitive, signed=True, line=expr.line)) + elif is_uint8_rprimitive(arg_type): + val = builder.accept(arg) + return builder.add(Extend(val, int32_rprimitive, signed=False, line=expr.line)) elif is_int_rprimitive(arg_type) or is_bool_rprimitive(arg_type): val = builder.accept(arg) + val = truncate_literal(val, int32_rprimitive) return builder.coerce(val, int32_rprimitive, expr.line) return None +@specialize_function("mypy_extensions.i16") +def translate_i16(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value | None: + if len(expr.args) != 1 or expr.arg_kinds[0] != ARG_POS: + return None + arg = expr.args[0] + arg_type = builder.node_type(arg) + if is_int16_rprimitive(arg_type): + return builder.accept(arg) + elif is_int32_rprimitive(arg_type) or is_int64_rprimitive(arg_type): + val = builder.accept(arg) + return builder.add(Truncate(val, int16_rprimitive, line=expr.line)) + elif is_uint8_rprimitive(arg_type): + val = builder.accept(arg) + return builder.add(Extend(val, int16_rprimitive, signed=False, line=expr.line)) + elif is_int_rprimitive(arg_type) or is_bool_rprimitive(arg_type): + val = builder.accept(arg) + val = truncate_literal(val, int16_rprimitive) + return builder.coerce(val, int16_rprimitive, expr.line) + return None + + +@specialize_function("mypy_extensions.u8") +def translate_u8(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value | None: + if len(expr.args) != 1 or expr.arg_kinds[0] != ARG_POS: + return None + arg = expr.args[0] + arg_type = builder.node_type(arg) + if is_uint8_rprimitive(arg_type): + return builder.accept(arg) + elif ( + is_int16_rprimitive(arg_type) + or is_int32_rprimitive(arg_type) + or is_int64_rprimitive(arg_type) + ): + val = builder.accept(arg) + return builder.add(Truncate(val, uint8_rprimitive, line=expr.line)) + elif is_int_rprimitive(arg_type) or is_bool_rprimitive(arg_type): + val = builder.accept(arg) + val = truncate_literal(val, uint8_rprimitive) + return builder.coerce(val, uint8_rprimitive, expr.line) + return None + + +def truncate_literal(value: Value, rtype: RPrimitive) -> Value: + """If value is an integer literal value, truncate it to given native int rtype. + + For example, truncate 256 into 0 if rtype is u8. + """ + if not isinstance(value, Integer): + return value # Not a literal, nothing to do + x = value.numeric_value() + max_unsigned = (1 << (rtype.size * 8)) - 1 + x = x & max_unsigned + if rtype.is_signed and x >= (max_unsigned + 1) // 2: + # Adjust to make it a negative value + x -= max_unsigned + 1 + return Integer(x, rtype) + + @specialize_function("builtins.int") def translate_int(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value | None: if len(expr.args) != 1 or expr.arg_kinds[0] != ARG_POS: @@ -732,7 +811,7 @@ def translate_bool(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value @specialize_function("builtins.float") -def translate_float(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Optional[Value]: +def translate_float(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value | None: if len(expr.args) != 1 or expr.arg_kinds[0] != ARG_POS: return None arg = expr.args[0] diff --git a/mypyc/lib-rt/CPy.h b/mypyc/lib-rt/CPy.h index 7a3e16fe9d65..64b716945b94 100644 --- a/mypyc/lib-rt/CPy.h +++ b/mypyc/lib-rt/CPy.h @@ -41,7 +41,6 @@ typedef struct tuple_T3OOO { PyObject *f1; PyObject *f2; } tuple_T3OOO; -static tuple_T3OOO tuple_undefined_T3OOO = { NULL, NULL, NULL }; #endif // Our return tuple wrapper for dictionary iteration helper. @@ -52,7 +51,6 @@ typedef struct tuple_T3CIO { CPyTagged f1; // Last dict offset PyObject *f2; // Next dictionary key or value } tuple_T3CIO; -static tuple_T3CIO tuple_undefined_T3CIO = { 2, CPY_INT_TAG, NULL }; #endif // Same as above but for both key and value. @@ -64,7 +62,6 @@ typedef struct tuple_T4CIOO { PyObject *f2; // Next dictionary key PyObject *f3; // Next dictionary value } tuple_T4CIOO; -static tuple_T4CIOO tuple_undefined_T4CIOO = { 2, CPY_INT_TAG, NULL, NULL }; #endif @@ -158,6 +155,12 @@ int32_t CPyLong_AsInt32(PyObject *o); int32_t CPyInt32_Divide(int32_t x, int32_t y); int32_t CPyInt32_Remainder(int32_t x, int32_t y); void CPyInt32_Overflow(void); +int16_t CPyLong_AsInt16(PyObject *o); +int16_t CPyInt16_Divide(int16_t x, int16_t y); +int16_t CPyInt16_Remainder(int16_t x, int16_t y); +void CPyInt16_Overflow(void); +uint8_t CPyLong_AsUInt8(PyObject *o); +void CPyUInt8_Overflow(void); double CPyTagged_TrueDivide(CPyTagged x, CPyTagged y); static inline int CPyTagged_CheckLong(CPyTagged x) { diff --git a/mypyc/lib-rt/exc_ops.c b/mypyc/lib-rt/exc_ops.c index 219914bf3470..d8307ecf21f8 100644 --- a/mypyc/lib-rt/exc_ops.c +++ b/mypyc/lib-rt/exc_ops.c @@ -24,6 +24,12 @@ void CPy_Reraise(void) { } void CPyErr_SetObjectAndTraceback(PyObject *type, PyObject *value, PyObject *traceback) { + if (!PyType_Check(type) && value == Py_None) { + // The first argument must be an exception instance + value = type; + type = (PyObject *)Py_TYPE(value); + } + // Set the value and traceback of an error. Because calling // PyErr_Restore takes away a reference to each object passed in // as an argument, we manually increase the reference count of @@ -230,7 +236,11 @@ void CPy_AddTraceback(const char *filename, const char *funcname, int line, PyOb return; error: +#if CPY_3_12_FEATURES + _PyErr_ChainExceptions1(exc); +#else _PyErr_ChainExceptions(exc, val, tb); +#endif } CPy_NOINLINE diff --git a/mypyc/lib-rt/int_ops.c b/mypyc/lib-rt/int_ops.c index 843d9b0d2230..b57d88c6ac93 100644 --- a/mypyc/lib-rt/int_ops.c +++ b/mypyc/lib-rt/int_ops.c @@ -308,10 +308,10 @@ PyObject *CPyBool_Str(bool b) { } static void CPyLong_NormalizeUnsigned(PyLongObject *v) { - Py_ssize_t i = v->ob_base.ob_size; - while (i > 0 && v->ob_digit[i - 1] == 0) + Py_ssize_t i = CPY_LONG_SIZE_UNSIGNED(v); + while (i > 0 && CPY_LONG_DIGIT(v, i - 1) == 0) i--; - v->ob_base.ob_size = i; + CPyLong_SetUnsignedSize(v, i); } // Bitwise op '&', '|' or '^' using the generic (slow) API @@ -361,8 +361,8 @@ static digit *GetIntDigits(CPyTagged n, Py_ssize_t *size, digit *buf) { return buf; } else { PyLongObject *obj = (PyLongObject *)CPyTagged_LongAsObject(n); - *size = obj->ob_base.ob_size; - return obj->ob_digit; + *size = CPY_LONG_SIZE_SIGNED(obj); + return &CPY_LONG_DIGIT(obj, 0); } } @@ -399,20 +399,20 @@ static CPyTagged BitwiseLongOp(CPyTagged a, CPyTagged b, char op) { Py_ssize_t i; if (op == '&') { for (i = 0; i < asize; i++) { - r->ob_digit[i] = adigits[i] & bdigits[i]; + CPY_LONG_DIGIT(r, i) = adigits[i] & bdigits[i]; } } else { if (op == '|') { for (i = 0; i < asize; i++) { - r->ob_digit[i] = adigits[i] | bdigits[i]; + CPY_LONG_DIGIT(r, i) = adigits[i] | bdigits[i]; } } else { for (i = 0; i < asize; i++) { - r->ob_digit[i] = adigits[i] ^ bdigits[i]; + CPY_LONG_DIGIT(r, i) = adigits[i] ^ bdigits[i]; } } for (; i < bsize; i++) { - r->ob_digit[i] = bdigits[i]; + CPY_LONG_DIGIT(r, i) = bdigits[i]; } } CPyLong_NormalizeUnsigned(r); @@ -521,7 +521,7 @@ int64_t CPyLong_AsInt64(PyObject *o) { Py_ssize_t size = Py_SIZE(lobj); if (likely(size == 1)) { // Fast path - return lobj->ob_digit[0]; + return CPY_LONG_DIGIT(lobj, 0); } else if (likely(size == 0)) { return 0; } @@ -576,14 +576,25 @@ int64_t CPyInt64_Remainder(int64_t x, int64_t y) { int32_t CPyLong_AsInt32(PyObject *o) { if (likely(PyLong_Check(o))) { + #if CPY_3_12_FEATURES + PyLongObject *lobj = (PyLongObject *)o; + size_t tag = CPY_LONG_TAG(lobj); + if (likely(tag == (1 << CPY_NON_SIZE_BITS))) { + // Fast path + return CPY_LONG_DIGIT(lobj, 0); + } else if (likely(tag == CPY_SIGN_ZERO)) { + return 0; + } + #else PyLongObject *lobj = (PyLongObject *)o; Py_ssize_t size = lobj->ob_base.ob_size; if (likely(size == 1)) { // Fast path - return lobj->ob_digit[0]; + return CPY_LONG_DIGIT(lobj, 0); } else if (likely(size == 0)) { return 0; } + #endif } // Slow path int overflow; @@ -641,6 +652,137 @@ void CPyInt32_Overflow() { PyErr_SetString(PyExc_OverflowError, "int too large to convert to i32"); } +int16_t CPyLong_AsInt16(PyObject *o) { + if (likely(PyLong_Check(o))) { + #if CPY_3_12_FEATURES + PyLongObject *lobj = (PyLongObject *)o; + size_t tag = CPY_LONG_TAG(lobj); + if (likely(tag == (1 << CPY_NON_SIZE_BITS))) { + // Fast path + digit x = CPY_LONG_DIGIT(lobj, 0); + if (x < 0x8000) + return x; + } else if (likely(tag == CPY_SIGN_ZERO)) { + return 0; + } + #else + PyLongObject *lobj = (PyLongObject *)o; + Py_ssize_t size = lobj->ob_base.ob_size; + if (likely(size == 1)) { + // Fast path + digit x = lobj->ob_digit[0]; + if (x < 0x8000) + return x; + } else if (likely(size == 0)) { + return 0; + } + #endif + } + // Slow path + int overflow; + long result = PyLong_AsLongAndOverflow(o, &overflow); + if (result > 0x7fff || result < -0x8000) { + overflow = 1; + result = -1; + } + if (result == -1) { + if (PyErr_Occurred()) { + return CPY_LL_INT_ERROR; + } else if (overflow) { + PyErr_SetString(PyExc_OverflowError, "int too large to convert to i16"); + return CPY_LL_INT_ERROR; + } + } + return result; +} + +int16_t CPyInt16_Divide(int16_t x, int16_t y) { + if (y == 0) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + return CPY_LL_INT_ERROR; + } + if (y == -1 && x == INT16_MIN) { + PyErr_SetString(PyExc_OverflowError, "integer division overflow"); + return CPY_LL_INT_ERROR; + } + int16_t d = x / y; + // Adjust for Python semantics + if (((x < 0) != (y < 0)) && d * y != x) { + d--; + } + return d; +} + +int16_t CPyInt16_Remainder(int16_t x, int16_t y) { + if (y == 0) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + return CPY_LL_INT_ERROR; + } + // Edge case: avoid core dump + if (y == -1 && x == INT16_MIN) { + return 0; + } + int16_t d = x % y; + // Adjust for Python semantics + if (((x < 0) != (y < 0)) && d != 0) { + d += y; + } + return d; +} + +void CPyInt16_Overflow() { + PyErr_SetString(PyExc_OverflowError, "int too large to convert to i16"); +} + + +uint8_t CPyLong_AsUInt8(PyObject *o) { + if (likely(PyLong_Check(o))) { + #if CPY_3_12_FEATURES + PyLongObject *lobj = (PyLongObject *)o; + size_t tag = CPY_LONG_TAG(lobj); + if (likely(tag == (1 << CPY_NON_SIZE_BITS))) { + // Fast path + digit x = CPY_LONG_DIGIT(lobj, 0); + if (x < 256) + return x; + } else if (likely(tag == CPY_SIGN_ZERO)) { + return 0; + } + #else + PyLongObject *lobj = (PyLongObject *)o; + Py_ssize_t size = lobj->ob_base.ob_size; + if (likely(size == 1)) { + // Fast path + digit x = lobj->ob_digit[0]; + if (x < 256) + return x; + } else if (likely(size == 0)) { + return 0; + } + #endif + } + // Slow path + int overflow; + long result = PyLong_AsLongAndOverflow(o, &overflow); + if (result < 0 || result >= 256) { + overflow = 1; + result = -1; + } + if (result == -1) { + if (PyErr_Occurred()) { + return CPY_LL_UINT_ERROR; + } else if (overflow) { + PyErr_SetString(PyExc_OverflowError, "int too large or small to convert to u8"); + return CPY_LL_UINT_ERROR; + } + } + return result; +} + +void CPyUInt8_Overflow() { + PyErr_SetString(PyExc_OverflowError, "int too large or small to convert to u8"); +} + double CPyTagged_TrueDivide(CPyTagged x, CPyTagged y) { if (unlikely(y == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "division by zero"); diff --git a/mypyc/lib-rt/misc_ops.c b/mypyc/lib-rt/misc_ops.c index 88a76fb210d7..f28eeb57e646 100644 --- a/mypyc/lib-rt/misc_ops.c +++ b/mypyc/lib-rt/misc_ops.c @@ -177,42 +177,6 @@ PyObject *CPyType_FromTemplate(PyObject *template, if (!name) goto error; - // If there is a metaclass other than type, we would like to call - // its __new__ function. Unfortunately there doesn't seem to be a - // good way to mix a C extension class and creating it via a - // metaclass. We need to do it anyways, though, in order to - // support subclassing Generic[T] prior to Python 3.7. - // - // We solve this with a kind of atrocious hack: create a parallel - // class using the metaclass, determine the bases of the real - // class by pulling them out of the parallel class, creating the - // real class, and then merging its dict back into the original - // class. There are lots of cases where this won't really work, - // but for the case of GenericMeta setting a bunch of properties - // on the class we should be fine. - if (metaclass != &PyType_Type) { - assert(bases && "non-type metaclasses require non-NULL bases"); - - PyObject *ns = PyDict_New(); - if (!ns) - goto error; - - if (bases != orig_bases) { - if (PyDict_SetItemString(ns, "__orig_bases__", orig_bases) < 0) - goto error; - } - - dummy_class = (PyTypeObject *)PyObject_CallFunctionObjArgs( - (PyObject *)metaclass, name, bases, ns, NULL); - Py_DECREF(ns); - if (!dummy_class) - goto error; - - Py_DECREF(bases); - bases = dummy_class->tp_bases; - Py_INCREF(bases); - } - // Allocate the type and then copy the main stuff in. t = (PyHeapTypeObject*)PyType_GenericAlloc(&PyType_Type, 0); if (!t) diff --git a/mypyc/lib-rt/mypyc_util.h b/mypyc/lib-rt/mypyc_util.h index 13672087fbbc..3c888a581a33 100644 --- a/mypyc/lib-rt/mypyc_util.h +++ b/mypyc/lib-rt/mypyc_util.h @@ -53,9 +53,12 @@ typedef PyObject CPyModule; // Tag bit used for long integers #define CPY_INT_TAG 1 -// Error value for fixed-width (low-level) integers +// Error value for signed fixed-width (low-level) integers #define CPY_LL_INT_ERROR -113 +// Error value for unsigned fixed-width (low-level) integers +#define CPY_LL_UINT_ERROR 239 + // Error value for floats #define CPY_FLOAT_ERROR -113.0 @@ -69,4 +72,47 @@ static inline CPyTagged CPyTagged_ShortFromSsize_t(Py_ssize_t x) { return x << 1; } +// Are we targeting Python 3.12 or newer? +#define CPY_3_12_FEATURES (PY_VERSION_HEX >= 0x030c0000) + +#if CPY_3_12_FEATURES + +// Same as macros in CPython internal/pycore_long.h, but with a CPY_ prefix +#define CPY_NON_SIZE_BITS 3 +#define CPY_SIGN_ZERO 1 +#define CPY_SIGN_NEGATIVE 2 +#define CPY_SIGN_MASK 3 + +#define CPY_LONG_DIGIT(o, n) ((o)->long_value.ob_digit[n]) + +// Only available on Python 3.12 and later +#define CPY_LONG_TAG(o) ((o)->long_value.lv_tag) +#define CPY_LONG_IS_NEGATIVE(o) (((o)->long_value.lv_tag & CPY_SIGN_MASK) == CPY_SIGN_NEGATIVE) +// Only available on Python 3.12 and later +#define CPY_LONG_SIZE(o) ((o)->long_value.lv_tag >> CPY_NON_SIZE_BITS) +// Number of digits; negative for negative ints +#define CPY_LONG_SIZE_SIGNED(o) (CPY_LONG_IS_NEGATIVE(o) ? -CPY_LONG_SIZE(o) : CPY_LONG_SIZE(o)) +// Number of digits, assuming int is non-negative +#define CPY_LONG_SIZE_UNSIGNED(o) CPY_LONG_SIZE(o) + +static inline void CPyLong_SetUnsignedSize(PyLongObject *o, Py_ssize_t n) { + if (n == 0) + o->long_value.lv_tag = CPY_SIGN_ZERO; + else + o->long_value.lv_tag = n << CPY_NON_SIZE_BITS; +} + +#else + +#define CPY_LONG_DIGIT(o, n) ((o)->ob_digit[n]) +#define CPY_LONG_IS_NEGATIVE(o) (((o)->ob_base.ob_size < 0) +#define CPY_LONG_SIZE_SIGNED(o) ((o)->ob_base.ob_size) +#define CPY_LONG_SIZE_UNSIGNED(o) ((o)->ob_base.ob_size) + +static inline void CPyLong_SetUnsignedSize(PyLongObject *o, Py_ssize_t n) { + o->ob_base.ob_size = n; +} + +#endif + #endif diff --git a/mypyc/lib-rt/pythonsupport.h b/mypyc/lib-rt/pythonsupport.h index 8a1159a98853..1d493b45b89d 100644 --- a/mypyc/lib-rt/pythonsupport.h +++ b/mypyc/lib-rt/pythonsupport.h @@ -13,6 +13,10 @@ #include #include "mypyc_util.h" +#if CPY_3_12_FEATURES +#include "internal/pycore_frame.h" +#endif + #ifdef __cplusplus extern "C" { #endif @@ -125,6 +129,65 @@ init_subclass(PyTypeObject *type, PyObject *kwds) return 0; } +#if CPY_3_12_FEATURES + +static inline Py_ssize_t +CPyLong_AsSsize_tAndOverflow(PyObject *vv, int *overflow) +{ + /* This version by Tim Peters */ + PyLongObject *v = (PyLongObject *)vv; + size_t x, prev; + Py_ssize_t res; + Py_ssize_t i; + int sign; + + *overflow = 0; + + res = -1; + i = CPY_LONG_TAG(v); + + // TODO: Combine zero and non-zero cases helow? + if (likely(i == (1 << CPY_NON_SIZE_BITS))) { + res = CPY_LONG_DIGIT(v, 0); + } else if (likely(i == CPY_SIGN_ZERO)) { + res = 0; + } else if (i == ((1 << CPY_NON_SIZE_BITS) | CPY_SIGN_NEGATIVE)) { + res = -(sdigit)CPY_LONG_DIGIT(v, 0); + } else { + sign = 1; + x = 0; + if (i & CPY_SIGN_NEGATIVE) { + sign = -1; + } + i >>= CPY_NON_SIZE_BITS; + while (--i >= 0) { + prev = x; + x = (x << PyLong_SHIFT) + CPY_LONG_DIGIT(v, i); + if ((x >> PyLong_SHIFT) != prev) { + *overflow = sign; + goto exit; + } + } + /* Haven't lost any bits, but casting to long requires extra + * care (see comment above). + */ + if (x <= (size_t)CPY_TAGGED_MAX) { + res = (Py_ssize_t)x * sign; + } + else if (sign < 0 && x == CPY_TAGGED_ABS_MIN) { + res = CPY_TAGGED_MIN; + } + else { + *overflow = sign; + /* res is already set to -1 */ + } + } + exit: + return res; +} + +#else + // Adapted from longobject.c in Python 3.7.0 /* This function adapted from PyLong_AsLongLongAndOverflow, but with @@ -152,11 +215,11 @@ CPyLong_AsSsize_tAndOverflow(PyObject *vv, int *overflow) i = Py_SIZE(v); if (likely(i == 1)) { - res = v->ob_digit[0]; + res = CPY_LONG_DIGIT(v, 0); } else if (likely(i == 0)) { res = 0; } else if (i == -1) { - res = -(sdigit)v->ob_digit[0]; + res = -(sdigit)CPY_LONG_DIGIT(v, 0); } else { sign = 1; x = 0; @@ -166,7 +229,7 @@ CPyLong_AsSsize_tAndOverflow(PyObject *vv, int *overflow) } while (--i >= 0) { prev = x; - x = (x << PyLong_SHIFT) + v->ob_digit[i]; + x = (x << PyLong_SHIFT) + CPY_LONG_DIGIT(v, i); if ((x >> PyLong_SHIFT) != prev) { *overflow = sign; goto exit; @@ -190,6 +253,8 @@ CPyLong_AsSsize_tAndOverflow(PyObject *vv, int *overflow) return res; } +#endif + // Adapted from listobject.c in Python 3.7.0 static int list_resize(PyListObject *self, Py_ssize_t newsize) @@ -389,6 +454,31 @@ _CPyObject_HasAttrId(PyObject *v, _Py_Identifier *name) { _PyObject_CallMethodIdObjArgs((self), (name), (arg), NULL) #endif +#if CPY_3_12_FEATURES + +// These are copied from genobject.c in Python 3.12 + +/* Returns a borrowed reference */ +static inline PyCodeObject * +_PyGen_GetCode(PyGenObject *gen) { + _PyInterpreterFrame *frame = (_PyInterpreterFrame *)(gen->gi_iframe); + return frame->f_code; +} + +static int +gen_is_coroutine(PyObject *o) +{ + if (PyGen_CheckExact(o)) { + PyCodeObject *code = _PyGen_GetCode((PyGenObject*)o); + if (code->co_flags & CO_ITERABLE_COROUTINE) { + return 1; + } + } + return 0; +} + +#else + // Copied from genobject.c in Python 3.10 static int gen_is_coroutine(PyObject *o) @@ -402,6 +492,8 @@ gen_is_coroutine(PyObject *o) return 0; } +#endif + /* * This helper function returns an awaitable for `o`: * - `o` if `o` is a coroutine-object; diff --git a/mypyc/primitives/int_ops.py b/mypyc/primitives/int_ops.py index eff4b4ffd8ab..95f9cc5ff43f 100644 --- a/mypyc/primitives/int_ops.py +++ b/mypyc/primitives/int_ops.py @@ -19,6 +19,7 @@ bool_rprimitive, c_pyssize_t_rprimitive, float_rprimitive, + int16_rprimitive, int32_rprimitive, int64_rprimitive, int_rprimitive, @@ -37,7 +38,13 @@ # Constructors for builtins.int and native int types have the same behavior. In # interpreted mode, native int types are just aliases to 'int'. -for int_name in ("builtins.int", "mypy_extensions.i64", "mypy_extensions.i32"): +for int_name in ( + "builtins.int", + "mypy_extensions.i64", + "mypy_extensions.i32", + "mypy_extensions.i16", + "mypy_extensions.u8", +): # These int constructors produce object_rprimitives that then need to be unboxed # I guess unboxing ourselves would save a check and branch though? @@ -231,6 +238,20 @@ class IntComparisonOpDescription(NamedTuple): error_kind=ERR_MAGIC_OVERLAPPING, ) +int16_divide_op = custom_op( + arg_types=[int16_rprimitive, int16_rprimitive], + return_type=int16_rprimitive, + c_function_name="CPyInt16_Divide", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +int16_mod_op = custom_op( + arg_types=[int16_rprimitive, int16_rprimitive], + return_type=int16_rprimitive, + c_function_name="CPyInt16_Remainder", + error_kind=ERR_MAGIC_OVERLAPPING, +) + # Convert tagged int (as PyObject *) to i64 int_to_int64_op = custom_op( arg_types=[object_rprimitive], @@ -267,3 +288,17 @@ class IntComparisonOpDescription(NamedTuple): c_function_name="CPyInt32_Overflow", error_kind=ERR_ALWAYS, ) + +int16_overflow = custom_op( + arg_types=[], + return_type=void_rtype, + c_function_name="CPyInt16_Overflow", + error_kind=ERR_ALWAYS, +) + +uint8_overflow = custom_op( + arg_types=[], + return_type=void_rtype, + c_function_name="CPyUInt8_Overflow", + error_kind=ERR_ALWAYS, +) diff --git a/mypyc/primitives/registry.py b/mypyc/primitives/registry.py index 1e2cf2695ee7..aa96b35aec56 100644 --- a/mypyc/primitives/registry.py +++ b/mypyc/primitives/registry.py @@ -37,8 +37,7 @@ from __future__ import annotations -from typing import List, NamedTuple, Optional, Tuple -from typing_extensions import Final +from typing import Final, NamedTuple from mypyc.ir.ops import StealsDescription from mypyc.ir.rtypes import RType @@ -50,16 +49,16 @@ class CFunctionDescription(NamedTuple): name: str - arg_types: List[RType] + arg_types: list[RType] return_type: RType - var_arg_type: Optional[RType] - truncated_type: Optional[RType] + var_arg_type: RType | None + truncated_type: RType | None c_function_name: str error_kind: int steals: StealsDescription is_borrowed: bool - ordering: Optional[List[int]] - extra_int_constants: List[Tuple[int, RType]] + ordering: list[int] | None + extra_int_constants: list[tuple[int, RType]] priority: int diff --git a/mypyc/test-data/driver/driver.py b/mypyc/test-data/driver/driver.py index 6717f402f72d..c9d179224a30 100644 --- a/mypyc/test-data/driver/driver.py +++ b/mypyc/test-data/driver/driver.py @@ -18,7 +18,7 @@ try: test_func() except Exception as e: - failures.append(sys.exc_info()) + failures.append((name, sys.exc_info())) if failures: from traceback import print_exception, format_tb @@ -32,12 +32,17 @@ def extract_line(tb): return m.group(1) # Sort failures by line number of test function. - failures = sorted(failures, key=lambda e: extract_line(e[2])) + failures = sorted(failures, key=lambda e: extract_line(e[1][2])) # If there are multiple failures, print stack traces of all but the final failure. - for e in failures[:-1]: + for name, e in failures[:-1]: + print(f'<< {name} >>') + sys.stdout.flush() print_exception(*e) print() + sys.stdout.flush() # Raise exception for the last failure. Test runner will show the traceback. - raise failures[-1][1] + print(f'<< {failures[-1][0]} >>') + sys.stdout.flush() + raise failures[-1][1][1] diff --git a/mypyc/test-data/exceptions.test b/mypyc/test-data/exceptions.test index 16bf8ba1eb89..ed43b86ebdb4 100644 --- a/mypyc/test-data/exceptions.test +++ b/mypyc/test-data/exceptions.test @@ -34,7 +34,7 @@ def f(x, y, z): x :: list y, z :: int r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: object r4 :: bit @@ -528,10 +528,10 @@ def f(): L0: return 0 def g(): - r0 :: int64 + r0 :: i64 r1 :: bit r2 :: object - r3 :: int64 + r3 :: i64 L0: r0 = f() r1 = r0 == -113 @@ -542,7 +542,7 @@ L2: r2 = PyErr_Occurred() if not is_error(r2) goto L3 (error at g:7) else goto L1 L3: - r3 = :: int64 + r3 = :: i64 return r3 [case testExceptionWithNativeAttributeGetAndSet] @@ -612,16 +612,16 @@ def f(c: C) -> None: [out] def C.__init__(self, x, y): self :: __main__.C - x :: int32 - y :: int64 + x :: i32 + y :: i64 L0: self.x = x self.y = y return 1 def f(c): c :: __main__.C - r0 :: int32 - r1 :: int64 + r0 :: i32 + r1 :: i64 L0: r0 = c.x r1 = c.y @@ -636,15 +636,15 @@ def f(x: i64) -> i64: return y [out] def f(x): - x, r0, y :: int64 - __locals_bitmap0 :: uint32 + x, r0, y :: i64 + __locals_bitmap0 :: u32 r1 :: bit - r2, r3 :: uint32 + r2, r3 :: u32 r4 :: bit r5 :: bool - r6 :: int64 + r6 :: i64 L0: - r0 = :: int64 + r0 = :: i64 y = r0 __locals_bitmap0 = 0 r1 = x != 0 @@ -665,7 +665,7 @@ L4: L5: return y L6: - r6 = :: int64 + r6 = :: i64 return r6 [case testExceptionWithFloatAttribute] diff --git a/mypyc/test-data/fixtures/ir.py b/mypyc/test-data/fixtures/ir.py index 0b081b079bda..bf06613ad2a8 100644 --- a/mypyc/test-data/fixtures/ir.py +++ b/mypyc/test-data/fixtures/ir.py @@ -87,6 +87,8 @@ def __init__(self) -> None: pass @overload def __init__(self, x: object) -> None: pass def __add__(self, x: str) -> str: pass + def __mul__(self, x: int) -> str: pass + def __rmul__(self, x: int) -> str: pass def __eq__(self, x: object) -> bool: pass def __ne__(self, x: object) -> bool: pass def __lt__(self, x: str) -> bool: ... @@ -134,7 +136,9 @@ def __ge__(self, x: float) -> bool: ... class complex: def __init__(self, x: object, y: object = None) -> None: pass def __add__(self, n: complex) -> complex: pass + def __radd__(self, n: float) -> complex: pass def __sub__(self, n: complex) -> complex: pass + def __rsub__(self, n: float) -> complex: pass def __mul__(self, n: complex) -> complex: pass def __truediv__(self, n: complex) -> complex: pass def __neg__(self) -> complex: pass @@ -145,6 +149,8 @@ def __init__(self) -> None: ... @overload def __init__(self, x: object) -> None: ... def __add__(self, x: bytes) -> bytes: ... + def __mul__(self, x: int) -> bytes: ... + def __rmul__(self, x: int) -> bytes: ... def __eq__(self, x: object) -> bool: ... def __ne__(self, x: object) -> bool: ... @overload diff --git a/mypyc/test-data/fixtures/testutil.py b/mypyc/test-data/fixtures/testutil.py index 5a4b1d0f549e..7f00ee5aea00 100644 --- a/mypyc/test-data/fixtures/testutil.py +++ b/mypyc/test-data/fixtures/testutil.py @@ -7,7 +7,7 @@ Any, Iterator, TypeVar, Generator, Optional, List, Tuple, Sequence, Union, Callable, Awaitable, ) -from typing_extensions import Final +from typing import Final FLOAT_MAGIC: Final = -113.0 diff --git a/mypyc/test-data/irbuild-any.test b/mypyc/test-data/irbuild-any.test index 8274e3d5c619..98f3dae9ee88 100644 --- a/mypyc/test-data/irbuild-any.test +++ b/mypyc/test-data/irbuild-any.test @@ -51,7 +51,7 @@ def f(a, n, c): r5 :: int r6 :: str r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit L0: r0 = box(int, n) @@ -99,10 +99,10 @@ def f2(a, n, l): n :: int l :: list r0, r1, r2, r3, r4 :: object - r5 :: int32 + r5 :: i32 r6 :: bit r7 :: object - r8 :: int32 + r8 :: i32 r9, r10 :: bit r11 :: list r12 :: object diff --git a/mypyc/test-data/irbuild-basic.test b/mypyc/test-data/irbuild-basic.test index 496eca77e090..33fc8cfaa83b 100644 --- a/mypyc/test-data/irbuild-basic.test +++ b/mypyc/test-data/irbuild-basic.test @@ -713,7 +713,7 @@ def __top_level__(): r18 :: str r19 :: dict r20 :: str - r21 :: int32 + r21 :: i32 r22 :: bit r23 :: object_ptr r24 :: object_ptr[1] @@ -1146,7 +1146,7 @@ def f(x: Any, y: Any, z: Any) -> None: [out] def f(x, y, z): x, y, z :: object - r0 :: int32 + r0 :: i32 r1 :: bit L0: r0 = PyObject_SetItem(x, y, z) @@ -1172,13 +1172,16 @@ L0: [case testLoadComplex] def load() -> complex: - return 5j+1.0 + real = 1 + return 5j+real [out] def load(): + real :: int r0, r1, r2 :: object L0: + real = 2 r0 = 5j - r1 = box(float, 1.0) + r1 = box(int, real) r2 = PyNumber_Add(r0, r1) return r2 @@ -1422,13 +1425,13 @@ def lst(x: List[int]) -> int: [out] def obj(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: bool L0: r0 = PyObject_IsTrue(x) r1 = r0 >= 0 :: signed - r2 = truncate r0: int32 to builtins.bool + r2 = truncate r0: i32 to builtins.bool if r2 goto L1 else goto L2 :: bool L1: return 2 @@ -1530,7 +1533,7 @@ def opt_o(x): r0 :: object r1 :: bit r2 :: object - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: bool L0: @@ -1541,7 +1544,7 @@ L1: r2 = cast(object, x) r3 = PyObject_IsTrue(r2) r4 = r3 >= 0 :: signed - r5 = truncate r3: int32 to builtins.bool + r5 = truncate r3: i32 to builtins.bool if r5 goto L2 else goto L3 :: bool L2: return 2 @@ -1613,7 +1616,7 @@ def __top_level__(): r5 :: dict r6 :: str r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: dict r11 :: str @@ -1728,7 +1731,7 @@ def main() -> None: def foo(x): x :: union[int, str] r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: __main__.B @@ -1737,7 +1740,7 @@ L0: r0 = load_address PyLong_Type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L2 :: bool L1: r4 = B() @@ -1900,7 +1903,7 @@ def g(): r8 :: str r9 :: object r10 :: dict - r11 :: int32 + r11 :: i32 r12 :: bit r13 :: tuple r14 :: object @@ -1930,7 +1933,7 @@ def h(): r6 :: str r7 :: object r8 :: dict - r9 :: int32 + r9 :: i32 r10 :: bit r11 :: object r12 :: tuple @@ -2059,7 +2062,7 @@ def f(): r26, r27 :: bit r28 :: int r29 :: object - r30 :: int32 + r30 :: i32 r31 :: bit r32 :: short_int L0: @@ -2158,7 +2161,7 @@ def f(): r26, r27 :: bit r28 :: int r29, r30 :: object - r31 :: int32 + r31 :: i32 r32 :: bit r33 :: short_int L0: @@ -2427,7 +2430,7 @@ def __top_level__(): r22, r23 :: object r24 :: dict r25 :: str - r26 :: int32 + r26 :: i32 r27 :: bit r28 :: str r29 :: dict @@ -2436,14 +2439,14 @@ def __top_level__(): r34 :: tuple r35 :: dict r36 :: str - r37 :: int32 + r37 :: i32 r38 :: bit r39 :: dict r40 :: str r41, r42, r43 :: object r44 :: dict r45 :: str - r46 :: int32 + r46 :: i32 r47 :: bit r48 :: str r49 :: dict @@ -2454,14 +2457,14 @@ def __top_level__(): r54, r55 :: object r56 :: dict r57 :: str - r58 :: int32 + r58 :: i32 r59 :: bit r60 :: list r61, r62, r63 :: object r64, r65, r66, r67 :: ptr r68 :: dict r69 :: str - r70 :: int32 + r70 :: i32 r71 :: bit L0: r0 = builtins :: module @@ -2629,7 +2632,7 @@ def A.__ne__(__mypyc_self__, rhs): __mypyc_self__ :: __main__.A rhs, r0, r1 :: object r2 :: bit - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: bool r6 :: object @@ -2641,7 +2644,7 @@ L0: L1: r3 = PyObject_Not(r0) r4 = r3 >= 0 :: signed - r5 = truncate r3: int32 to builtins.bool + r5 = truncate r3: i32 to builtins.bool r6 = box(bool, r5) return r6 L2: @@ -2833,7 +2836,7 @@ def c(): r11 :: bool r12 :: dict r13 :: str - r14 :: int32 + r14 :: i32 r15 :: bit r16 :: str r17 :: object @@ -2883,7 +2886,7 @@ def __top_level__(): r18, r19 :: object r20 :: dict r21 :: str - r22 :: int32 + r22 :: i32 r23 :: bit L0: r0 = builtins :: module @@ -3154,7 +3157,7 @@ def lol(x: Any): def lol(x): x :: object r0, r1 :: str - r2 :: int32 + r2 :: i32 r3 :: bit r4 :: object L0: @@ -3459,13 +3462,13 @@ def f(x: object) -> bool: [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: bool L0: r0 = PyObject_IsTrue(x) r1 = r0 >= 0 :: signed - r2 = truncate r0: int32 to builtins.bool + r2 = truncate r0: i32 to builtins.bool return r2 [case testLocalImports] @@ -3490,7 +3493,7 @@ def root(): r7 :: dict r8 :: str r9 :: object - r10 :: int32 + r10 :: i32 r11 :: bit r12 :: dict r13, r14 :: object @@ -3501,7 +3504,7 @@ def root(): r19 :: dict r20 :: str r21 :: object - r22 :: int32 + r22 :: i32 r23 :: bit L0: r0 = __main__.globals :: static @@ -3547,7 +3550,7 @@ def submodule(): r7 :: dict r8 :: str r9 :: object - r10 :: int32 + r10 :: i32 r11 :: bit r12 :: dict r13 :: str @@ -3586,14 +3589,14 @@ def f(x: object) -> bool: [out] def f(x): x, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool L0: r0 = load_address PyBool_Type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool return r3 [case testRangeObject] diff --git a/mypyc/test-data/irbuild-bool.test b/mypyc/test-data/irbuild-bool.test index 9257d8d63f7e..731d393d69ab 100644 --- a/mypyc/test-data/irbuild-bool.test +++ b/mypyc/test-data/irbuild-bool.test @@ -29,18 +29,18 @@ L0: return r0 def bool_to_i64(b): b :: bool - r0 :: int64 + r0 :: i64 L0: - r0 = extend b: builtins.bool to int64 + r0 = extend b: builtins.bool to i64 return r0 def i64_to_bool(n): - n :: int64 + n :: i64 r0 :: bit L0: r0 = n != 0 return r0 def bit_to_int(n1, n2): - n1, n2 :: int64 + n1, n2 :: i64 r0 :: bit r1 :: bool r2 :: int @@ -50,12 +50,12 @@ L0: r2 = extend r1: builtins.bool to builtins.int return r2 def bit_to_i64(n1, n2): - n1, n2 :: int64 + n1, n2 :: i64 r0 :: bit - r1 :: int64 + r1 :: i64 L0: r0 = n1 == n2 - r1 = extend r0: bit to int64 + r1 = extend r0: bit to i64 return r1 [case testConversionToBool] @@ -100,13 +100,13 @@ L0: return r3 def always_truthy_instance_to_bool(o): o :: __main__.C - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: bool L0: r0 = PyObject_IsTrue(o) r1 = r0 >= 0 :: signed - r2 = truncate r0: int32 to builtins.bool + r2 = truncate r0: i32 to builtins.bool return r2 def instance_to_bool(o): o :: __main__.D @@ -236,20 +236,20 @@ L0: r2 = r1 == y return r2 def neq1(x, y): - x :: int64 + x :: i64 y :: bool - r0 :: int64 + r0 :: i64 r1 :: bit L0: - r0 = extend y: builtins.bool to int64 + r0 = extend y: builtins.bool to i64 r1 = x != r0 return r1 def neq2(x, y): x :: bool - y, r0 :: int64 + y, r0 :: i64 r1 :: bit L0: - r0 = extend x: builtins.bool to int64 + r0 = extend x: builtins.bool to i64 r1 = r0 != y return r1 @@ -327,19 +327,19 @@ L3: return r8 def gt1(x, y): x :: bool - y, r0 :: int64 + y, r0 :: i64 r1 :: bit L0: - r0 = extend x: builtins.bool to int64 + r0 = extend x: builtins.bool to i64 r1 = r0 < y :: signed return r1 def gt2(x, y): - x :: int64 + x :: i64 y :: bool - r0 :: int64 + r0 :: i64 r1 :: bit L0: - r0 = extend y: builtins.bool to int64 + r0 = extend y: builtins.bool to i64 r1 = x < r0 :: signed return r1 @@ -386,11 +386,11 @@ L0: r2 = CPyTagged_Invert(r1) return r2 def mixed_bitand(x, y): - x :: int64 + x :: i64 y :: bool - r0, r1 :: int64 + r0, r1 :: i64 L0: - r0 = extend y: builtins.bool to int64 + r0 = extend y: builtins.bool to i64 r1 = x & r0 return r1 diff --git a/mypyc/test-data/irbuild-bytes.test b/mypyc/test-data/irbuild-bytes.test index f13a1a956580..8e97a7f4a569 100644 --- a/mypyc/test-data/irbuild-bytes.test +++ b/mypyc/test-data/irbuild-bytes.test @@ -71,7 +71,7 @@ def neq(x: bytes, y: bytes) -> bool: [out] def eq(x, y): x, y :: bytes - r0 :: int32 + r0 :: i32 r1, r2 :: bit L0: r0 = CPyBytes_Compare(x, y) @@ -80,7 +80,7 @@ L0: return r2 def neq(x, y): x, y :: bytes - r0 :: int32 + r0 :: i32 r1, r2 :: bit L0: r0 = CPyBytes_Compare(x, y) diff --git a/mypyc/test-data/irbuild-classes.test b/mypyc/test-data/irbuild-classes.test index 0a7076e5f0ad..55e55dbf3286 100644 --- a/mypyc/test-data/irbuild-classes.test +++ b/mypyc/test-data/irbuild-classes.test @@ -213,7 +213,7 @@ def __top_level__(): r16, r17 :: object r18 :: dict r19 :: str - r20 :: int32 + r20 :: i32 r21 :: bit r22 :: object r23 :: str @@ -221,22 +221,22 @@ def __top_level__(): r26 :: bool r27 :: str r28 :: tuple - r29 :: int32 + r29 :: i32 r30 :: bit r31 :: dict r32 :: str - r33 :: int32 + r33 :: i32 r34 :: bit r35 :: object r36 :: str r37, r38 :: object r39 :: str r40 :: tuple - r41 :: int32 + r41 :: i32 r42 :: bit r43 :: dict r44 :: str - r45 :: int32 + r45 :: i32 r46 :: bit r47, r48 :: object r49 :: dict @@ -251,11 +251,11 @@ def __top_level__(): r60 :: bool r61, r62 :: str r63 :: tuple - r64 :: int32 + r64 :: i32 r65 :: bit r66 :: dict r67 :: str - r68 :: int32 + r68 :: i32 r69 :: bit L0: r0 = builtins :: module @@ -837,7 +837,7 @@ def Base.__ne__(__mypyc_self__, rhs): __mypyc_self__ :: __main__.Base rhs, r0, r1 :: object r2 :: bit - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: bool r6 :: object @@ -849,7 +849,7 @@ L0: L1: r3 = PyObject_Not(r0) r4 = r3 >= 0 :: signed - r5 = truncate r3: int32 to builtins.bool + r5 = truncate r3: i32 to builtins.bool r6 = box(bool, r5) return r6 L2: @@ -957,7 +957,7 @@ def Derived.__ne__(__mypyc_self__, rhs): __mypyc_self__ :: __main__.Derived rhs, r0, r1 :: object r2 :: bit - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: bool r6 :: object @@ -969,7 +969,7 @@ L0: L1: r3 = PyObject_Not(r0) r4 = r3 >= 0 :: signed - r5 = truncate r3: int32 to builtins.bool + r5 = truncate r3: i32 to builtins.bool r6 = box(bool, r5) return r6 L2: @@ -1029,7 +1029,7 @@ def foo(x: WelpDict) -> None: [out] def foo(x): x :: dict - r0 :: int32 + r0 :: i32 r1 :: bit L0: r0 = CPyDict_Update(x, x) diff --git a/mypyc/test-data/irbuild-constant-fold.test b/mypyc/test-data/irbuild-constant-fold.test index 866953f0c09a..97b13ab337c7 100644 --- a/mypyc/test-data/irbuild-constant-fold.test +++ b/mypyc/test-data/irbuild-constant-fold.test @@ -3,6 +3,7 @@ def bin_ops() -> None: add = 15 + 47 add_mul = (2 + 3) * 5 sub = 7 - 11 + div = 3 / 2 bit_and = 6 & 10 bit_or = 6 | 10 bit_xor = 6 ^ 10 @@ -25,11 +26,14 @@ def pow() -> None: p3 = 0**0 [out] def bin_ops(): - add, add_mul, sub, bit_and, bit_or, bit_xor, lshift, rshift, lshift0, rshift0 :: int + add, add_mul, sub :: int + div :: float + bit_and, bit_or, bit_xor, lshift, rshift, lshift0, rshift0 :: int L0: add = 124 add_mul = 50 sub = -8 + div = 1.5 bit_and = 4 bit_or = 28 bit_xor = 24 @@ -117,35 +121,28 @@ L0: [case testIntConstantFoldingUnsupportedCases] def error_cases() -> None: - div_by_zero = 5 // 0 + div_by_zero = 5 / 0 + floor_div_by_zero = 5 // 0 mod_by_zero = 5 % 0 lshift_neg = 6 << -1 rshift_neg = 7 >> -1 -def unsupported_div() -> None: - x = 4 / 6 - y = 10 / 5 def unsupported_pow() -> None: p = 3 ** (-1) [out] def error_cases(): - r0, div_by_zero, r1, mod_by_zero, r2, lshift_neg, r3, rshift_neg :: int + r0, div_by_zero :: float + r1, floor_div_by_zero, r2, mod_by_zero, r3, lshift_neg, r4, rshift_neg :: int L0: - r0 = CPyTagged_FloorDivide(10, 0) + r0 = CPyTagged_TrueDivide(10, 0) div_by_zero = r0 - r1 = CPyTagged_Remainder(10, 0) - mod_by_zero = r1 - r2 = CPyTagged_Lshift(12, -2) - lshift_neg = r2 - r3 = CPyTagged_Rshift(14, -2) - rshift_neg = r3 - return 1 -def unsupported_div(): - r0, x, r1, y :: float -L0: - r0 = CPyTagged_TrueDivide(8, 12) - x = r0 - r1 = CPyTagged_TrueDivide(20, 10) - y = r1 + r1 = CPyTagged_FloorDivide(10, 0) + floor_div_by_zero = r1 + r2 = CPyTagged_Remainder(10, 0) + mod_by_zero = r2 + r3 = CPyTagged_Lshift(12, -2) + lshift_neg = r3 + r4 = CPyTagged_Rshift(14, -2) + rshift_neg = r4 return 1 def unsupported_pow(): r0, r1, r2 :: object @@ -224,20 +221,260 @@ L0: a = 12 return 1 +[case testFloatConstantFolding] +from typing_extensions import Final + +N: Final = 1.5 +N2: Final = 1.5 * 2 + +def bin_ops() -> None: + add = 0.5 + 0.5 + add_mul = (1.5 + 3.5) * 5.0 + sub = 7.0 - 7.5 + div = 3.0 / 2.0 + floor_div = 3.0 // 2.0 +def bin_ops_neg() -> None: + add = 0.5 + -0.5 + add_mul = (-1.5 + 3.5) * -5.0 + add_mul2 = (1.5 + -3.5) * -5.0 + sub = 7.0 - -7.5 + div = 3.0 / -2.0 + floor_div = 3.0 // -2.0 +def unary_ops() -> None: + neg1 = -5.5 + neg2 = --1.5 + neg3 = -0.0 + pos = +5.5 +def pow() -> None: + p0 = 16.0**0 + p1 = 16.0**0.5 + p2 = (-5.0)**3 + p3 = 16.0**(-0) + p4 = 16.0**(-0.5) + p5 = (-2.0)**(-1) +def error_cases() -> None: + div = 2.0 / 0.0 + floor_div = 2.0 // 0.0 + power_imag = (-2.0)**0.5 + power_imag2 = (-2.0)**(-0.5) + power_overflow = 2.0**10000.0 +def final_floats() -> None: + add1 = N + 1.2 + add2 = N + N2 + add3 = -1.2 + N2 +[out] +def bin_ops(): + add, add_mul, sub, div, floor_div :: float +L0: + add = 1.0 + add_mul = 25.0 + sub = -0.5 + div = 1.5 + floor_div = 1.0 + return 1 +def bin_ops_neg(): + add, add_mul, add_mul2, sub, div, floor_div :: float +L0: + add = 0.0 + add_mul = -10.0 + add_mul2 = 10.0 + sub = 14.5 + div = -1.5 + floor_div = -2.0 + return 1 +def unary_ops(): + neg1, neg2, neg3, pos :: float +L0: + neg1 = -5.5 + neg2 = 1.5 + neg3 = -0.0 + pos = 5.5 + return 1 +def pow(): + p0, p1, p2, p3, p4, p5 :: float +L0: + p0 = 1.0 + p1 = 4.0 + p2 = -125.0 + p3 = 1.0 + p4 = 0.25 + p5 = -0.5 + return 1 +def error_cases(): + r0 :: bit + r1 :: bool + r2, div, r3, floor_div :: float + r4, r5, r6 :: object + r7, power_imag :: float + r8, r9, r10 :: object + r11, power_imag2 :: float + r12, r13, r14 :: object + r15, power_overflow :: float +L0: + r0 = 0.0 == 0.0 + if r0 goto L1 else goto L2 :: bool +L1: + r1 = raise ZeroDivisionError('float division by zero') + unreachable +L2: + r2 = 2.0 / 0.0 + div = r2 + r3 = CPyFloat_FloorDivide(2.0, 0.0) + floor_div = r3 + r4 = box(float, -2.0) + r5 = box(float, 0.5) + r6 = CPyNumber_Power(r4, r5) + r7 = unbox(float, r6) + power_imag = r7 + r8 = box(float, -2.0) + r9 = box(float, -0.5) + r10 = CPyNumber_Power(r8, r9) + r11 = unbox(float, r10) + power_imag2 = r11 + r12 = box(float, 2.0) + r13 = box(float, 10000.0) + r14 = CPyNumber_Power(r12, r13) + r15 = unbox(float, r14) + power_overflow = r15 + return 1 +def final_floats(): + add1, add2, add3 :: float +L0: + add1 = 2.7 + add2 = 4.5 + add3 = 1.8 + return 1 + +[case testMixedFloatIntConstantFolding] +def bin_ops() -> None: + add = 1 + 0.5 + sub = 1 - 0.5 + mul = 0.5 * 5 + div = 5 / 0.5 + floor_div = 9.5 // 5 +def error_cases() -> None: + div = 2.0 / 0 + floor_div = 2.0 // 0 + power_overflow = 2.0**10000 +[out] +def bin_ops(): + add, sub, mul, div, floor_div :: float +L0: + add = 1.5 + sub = 0.5 + mul = 2.5 + div = 10.0 + floor_div = 1.0 + return 1 +def error_cases(): + r0 :: bit + r1 :: bool + r2, div, r3, floor_div :: float + r4, r5, r6 :: object + r7, power_overflow :: float +L0: + r0 = 0.0 == 0.0 + if r0 goto L1 else goto L2 :: bool +L1: + r1 = raise ZeroDivisionError('float division by zero') + unreachable +L2: + r2 = 2.0 / 0.0 + div = r2 + r3 = CPyFloat_FloorDivide(2.0, 0.0) + floor_div = r3 + r4 = box(float, 2.0) + r5 = box(float, 10000.0) + r6 = CPyNumber_Power(r4, r5) + r7 = unbox(float, r6) + power_overflow = r7 + return 1 + [case testStrConstantFolding] from typing_extensions import Final S: Final = 'z' +N: Final = 2 def f() -> None: x = 'foo' + 'bar' y = 'x' + 'y' + S + mul = "foobar" * 2 + mul2 = N * "foobar" [out] def f(): - r0, x, r1, y :: str + r0, x, r1, y, r2, mul, r3, mul2 :: str L0: r0 = 'foobar' x = r0 r1 = 'xyz' y = r1 + r2 = 'foobarfoobar' + mul = r2 + r3 = 'foobarfoobar' + mul2 = r3 + return 1 + +[case testBytesConstantFolding] +from typing_extensions import Final + +N: Final = 2 + +def f() -> None: + # Unfortunately, mypy doesn't store the bytes value of final refs. + x = b'foo' + b'bar' + mul = b"foobar" * 2 + mul2 = N * b"foobar" +[out] +def f(): + r0, x, r1, mul, r2, mul2 :: bytes +L0: + r0 = b'foobar' + x = r0 + r1 = b'foobarfoobar' + mul = r1 + r2 = b'foobarfoobar' + mul2 = r2 + return 1 + +[case testComplexConstantFolding] +from typing_extensions import Final + +N: Final = 1 +FLOAT_N: Final = 1.5 + +def integral() -> None: + pos = 1+2j + pos_2 = 2j+N + neg = 1-2j + neg_2 = 2j-N +def floating() -> None: + pos = 1.5+2j + pos_2 = 2j+FLOAT_N + neg = 1.5-2j + neg_2 = 2j-FLOAT_N +[out] +def integral(): + r0, pos, r1, pos_2, r2, neg, r3, neg_2 :: object +L0: + r0 = (1+2j) + pos = r0 + r1 = (1+2j) + pos_2 = r1 + r2 = (1-2j) + neg = r2 + r3 = (-1+2j) + neg_2 = r3 + return 1 +def floating(): + r0, pos, r1, pos_2, r2, neg, r3, neg_2 :: object +L0: + r0 = (1.5+2j) + pos = r0 + r1 = (1.5+2j) + pos_2 = r1 + r2 = (1.5-2j) + neg = r2 + r3 = (-1.5+2j) + neg_2 = r3 return 1 diff --git a/mypyc/test-data/irbuild-dict.test b/mypyc/test-data/irbuild-dict.test index 362031b84e76..1a84f3fe3098 100644 --- a/mypyc/test-data/irbuild-dict.test +++ b/mypyc/test-data/irbuild-dict.test @@ -21,7 +21,7 @@ def f(d: Dict[int, bool]) -> None: def f(d): d :: dict r0, r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit L0: r0 = object 0 @@ -83,14 +83,14 @@ def f(d: Dict[int, int]) -> bool: def f(d): d :: dict r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool L0: r0 = object 4 r1 = PyDict_Contains(d, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L2 :: bool L1: return 1 @@ -110,14 +110,14 @@ def f(d: Dict[int, int]) -> bool: def f(d): d :: dict r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3, r4 :: bool L0: r0 = object 4 r1 = PyDict_Contains(d, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool r4 = r3 ^ 1 if r4 goto L1 else goto L2 :: bool L1: @@ -134,7 +134,7 @@ def f(a: Dict[int, int], b: Dict[int, int]) -> None: [out] def f(a, b): a, b :: dict - r0 :: int32 + r0 :: i32 r1 :: bit L0: r0 = CPyDict_Update(a, b) @@ -160,7 +160,7 @@ def increment(d): r7 :: object r8, k :: str r9, r10, r11 :: object - r12 :: int32 + r12 :: i32 r13, r14, r15 :: bit L0: r0 = 0 @@ -201,10 +201,10 @@ def f(x, y): r0 :: str r1 :: object r2 :: dict - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: object - r6 :: int32 + r6 :: i32 r7 :: bit L0: r0 = 'z' @@ -252,7 +252,7 @@ def print_dict_methods(d1, d2): r7 :: object r8, v :: int r9 :: object - r10 :: int32 + r10 :: i32 r11 :: bit r12 :: bool r13, r14 :: bit @@ -266,7 +266,7 @@ def print_dict_methods(d1, d2): r22, r23 :: object r24, r25, k :: int r26, r27, r28, r29, r30 :: object - r31 :: int32 + r31 :: i32 r32, r33, r34 :: bit L0: r0 = 0 @@ -286,7 +286,7 @@ L2: r9 = box(int, v) r10 = PyDict_Contains(d2, r9) r11 = r10 >= 0 :: signed - r12 = truncate r10: int32 to builtins.bool + r12 = truncate r10: i32 to builtins.bool if r12 goto L3 else goto L4 :: bool L3: return 1 @@ -345,7 +345,7 @@ def union_of_dicts(d): r12, r13 :: object r14 :: int r15 :: object - r16 :: int32 + r16 :: i32 r17, r18, r19 :: bit L0: r0 = PyDict_New() @@ -393,7 +393,7 @@ def typeddict(d): r9, k :: str v :: object r10 :: str - r11 :: int32 + r11 :: i32 r12 :: bit r13 :: object r14, r15, r16 :: bit diff --git a/mypyc/test-data/irbuild-dunders.test b/mypyc/test-data/irbuild-dunders.test index 3c140d927c0f..b50b6eeae162 100644 --- a/mypyc/test-data/irbuild-dunders.test +++ b/mypyc/test-data/irbuild-dunders.test @@ -103,14 +103,14 @@ L0: def h(d): d :: __main__.D r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3, r4 :: bool L0: r0 = d.__contains__(14) r1 = PyObject_IsTrue(r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool r4 = r3 ^ 1 return r4 diff --git a/mypyc/test-data/irbuild-float.test b/mypyc/test-data/irbuild-float.test index e3a60852574b..35e2eff62b86 100644 --- a/mypyc/test-data/irbuild-float.test +++ b/mypyc/test-data/irbuild-float.test @@ -241,7 +241,7 @@ def f(x: float = 1.5) -> float: [out] def f(x, __bitmap): x :: float - __bitmap, r0 :: uint32 + __bitmap, r0 :: u32 r1 :: bit L0: r0 = __bitmap & 1 diff --git a/mypyc/test-data/irbuild-generics.test b/mypyc/test-data/irbuild-generics.test index fe4a94992717..35920889e596 100644 --- a/mypyc/test-data/irbuild-generics.test +++ b/mypyc/test-data/irbuild-generics.test @@ -132,7 +132,7 @@ def f(x: T, y: T) -> T: [out] def f(x, y): x, y, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: object @@ -140,7 +140,7 @@ L0: r0 = PyObject_RichCompare(y, x, 4) r1 = PyObject_IsTrue(r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L2 :: bool L1: r4 = y diff --git a/mypyc/test-data/irbuild-glue-methods.test b/mypyc/test-data/irbuild-glue-methods.test index 6d749bf5dd84..3012c79586f2 100644 --- a/mypyc/test-data/irbuild-glue-methods.test +++ b/mypyc/test-data/irbuild-glue-methods.test @@ -343,8 +343,8 @@ L0: return 1 def D.f(self, x, __bitmap): self :: __main__.D - x :: int64 - __bitmap, r0 :: uint32 + x :: i64 + __bitmap, r0 :: u32 r1 :: bit L0: r0 = __bitmap & 1 @@ -371,8 +371,8 @@ class D(C): [out] def C.f(self, x, __bitmap): self :: __main__.C - x :: int64 - __bitmap, r0 :: uint32 + x :: i64 + __bitmap, r0 :: u32 r1 :: bit L0: r0 = __bitmap & 1 @@ -384,10 +384,10 @@ L2: return 1 def D.f(self, x, y, __bitmap): self :: __main__.D - x, y :: int64 - __bitmap, r0 :: uint32 + x, y :: i64 + __bitmap, r0 :: u32 r1 :: bit - r2 :: uint32 + r2 :: u32 r3 :: bit L0: r0 = __bitmap & 1 @@ -405,8 +405,8 @@ L4: return 1 def D.f__C_glue(self, x, __bitmap): self :: __main__.D - x :: int64 - __bitmap :: uint32 + x :: i64 + __bitmap :: u32 r0 :: None L0: r0 = D.f(self, x, 0, __bitmap) @@ -432,6 +432,6 @@ class E: class EE(E): def f(self, x: int) -> None: pass # Line 18 [out] -main:7: error: An argument with type "int64" cannot be given a default value in a method override -main:13: error: Incompatible argument type "int64" (base class has type "int") -main:18: error: Incompatible argument type "int" (base class has type "int64") +main:7: error: An argument with type "i64" cannot be given a default value in a method override +main:13: error: Incompatible argument type "i64" (base class has type "int") +main:18: error: Incompatible argument type "int" (base class has type "i64") diff --git a/mypyc/test-data/irbuild-i16.test b/mypyc/test-data/irbuild-i16.test new file mode 100644 index 000000000000..a03c9df2c6ac --- /dev/null +++ b/mypyc/test-data/irbuild-i16.test @@ -0,0 +1,526 @@ +# Test cases for i16 native ints. Focus on things that are different from i64; no need to +# duplicate all i64 test cases here. + +[case testI16BinaryOp] +from mypy_extensions import i16 + +def add_op(x: i16, y: i16) -> i16: + x = y + x + y = x + 5 + y += x + y += 7 + x = 5 + y + return x +def compare(x: i16, y: i16) -> None: + a = x == y + b = x == -5 + c = x < y + d = x < -5 + e = -5 == x + f = -5 < x +[out] +def add_op(x, y): + x, y, r0, r1, r2, r3, r4 :: i16 +L0: + r0 = y + x + x = r0 + r1 = x + 5 + y = r1 + r2 = y + x + y = r2 + r3 = y + 7 + y = r3 + r4 = 5 + y + x = r4 + return x +def compare(x, y): + x, y :: i16 + r0 :: bit + a :: bool + r1 :: bit + b :: bool + r2 :: bit + c :: bool + r3 :: bit + d :: bool + r4 :: bit + e :: bool + r5 :: bit + f :: bool +L0: + r0 = x == y + a = r0 + r1 = x == -5 + b = r1 + r2 = x < y :: signed + c = r2 + r3 = x < -5 :: signed + d = r3 + r4 = -5 == x + e = r4 + r5 = -5 < x :: signed + f = r5 + return 1 + +[case testI16UnaryOp] +from mypy_extensions import i16 + +def unary(x: i16) -> i16: + y = -x + x = ~y + y = +x + return y +[out] +def unary(x): + x, r0, y, r1 :: i16 +L0: + r0 = 0 - x + y = r0 + r1 = y ^ -1 + x = r1 + y = x + return y + +[case testI16DivisionByConstant] +from mypy_extensions import i16 + +def div_by_constant(x: i16) -> i16: + x = x // 5 + x //= 17 + return x +[out] +def div_by_constant(x): + x, r0, r1 :: i16 + r2, r3, r4 :: bit + r5 :: i16 + r6 :: bit + r7, r8, r9 :: i16 + r10, r11, r12 :: bit + r13 :: i16 + r14 :: bit + r15 :: i16 +L0: + r0 = x / 5 + r1 = r0 + r2 = x < 0 :: signed + r3 = 5 < 0 :: signed + r4 = r2 == r3 + if r4 goto L3 else goto L1 :: bool +L1: + r5 = r1 * 5 + r6 = r5 == x + if r6 goto L3 else goto L2 :: bool +L2: + r7 = r1 - 1 + r1 = r7 +L3: + x = r1 + r8 = x / 17 + r9 = r8 + r10 = x < 0 :: signed + r11 = 17 < 0 :: signed + r12 = r10 == r11 + if r12 goto L6 else goto L4 :: bool +L4: + r13 = r9 * 17 + r14 = r13 == x + if r14 goto L6 else goto L5 :: bool +L5: + r15 = r9 - 1 + r9 = r15 +L6: + x = r9 + return x + +[case testI16ModByConstant] +from mypy_extensions import i16 + +def mod_by_constant(x: i16) -> i16: + x = x % 5 + x %= 17 + return x +[out] +def mod_by_constant(x): + x, r0, r1 :: i16 + r2, r3, r4, r5 :: bit + r6, r7, r8 :: i16 + r9, r10, r11, r12 :: bit + r13 :: i16 +L0: + r0 = x % 5 + r1 = r0 + r2 = x < 0 :: signed + r3 = 5 < 0 :: signed + r4 = r2 == r3 + if r4 goto L3 else goto L1 :: bool +L1: + r5 = r1 == 0 + if r5 goto L3 else goto L2 :: bool +L2: + r6 = r1 + 5 + r1 = r6 +L3: + x = r1 + r7 = x % 17 + r8 = r7 + r9 = x < 0 :: signed + r10 = 17 < 0 :: signed + r11 = r9 == r10 + if r11 goto L6 else goto L4 :: bool +L4: + r12 = r8 == 0 + if r12 goto L6 else goto L5 :: bool +L5: + r13 = r8 + 17 + r8 = r13 +L6: + x = r8 + return x + +[case testI16DivModByVariable] +from mypy_extensions import i16 + +def divmod(x: i16, y: i16) -> i16: + a = x // y + return a % y +[out] +def divmod(x, y): + x, y, r0, a, r1 :: i16 +L0: + r0 = CPyInt16_Divide(x, y) + a = r0 + r1 = CPyInt16_Remainder(a, y) + return r1 + +[case testI16BinaryOperationWithOutOfRangeOperand] +from mypy_extensions import i16 + +def out_of_range(x: i16) -> None: + x + (-32769) + (-32770) + x + x * 32768 + x + 32767 # OK + (-32768) + x # OK +[out] +main:4: error: Value -32769 is out of range for "i16" +main:5: error: Value -32770 is out of range for "i16" +main:6: error: Value 32768 is out of range for "i16" + +[case testI16BoxAndUnbox] +from typing import Any +from mypy_extensions import i16 + +def f(x: Any) -> Any: + y: i16 = x + return y +[out] +def f(x): + x :: object + r0, y :: i16 + r1 :: object +L0: + r0 = unbox(i16, x) + y = r0 + r1 = box(i16, y) + return r1 + +[case testI16MixedCompare1] +from mypy_extensions import i16 +def f(x: int, y: i16) -> bool: + return x == y +[out] +def f(x, y): + x :: int + y :: i16 + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6 :: i16 + r7 :: bit +L0: + r0 = x & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = x < 65536 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = x >= -65536 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = x >> 1 + r5 = truncate r4: native_int to i16 + r6 = r5 + goto L5 +L4: + CPyInt16_Overflow() + unreachable +L5: + r7 = r6 == y + return r7 + +[case testI16MixedCompare2] +from mypy_extensions import i16 +def f(x: i16, y: int) -> bool: + return x == y +[out] +def f(x, y): + x :: i16 + y :: int + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6 :: i16 + r7 :: bit +L0: + r0 = y & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = y < 65536 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = y >= -65536 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = y >> 1 + r5 = truncate r4: native_int to i16 + r6 = r5 + goto L5 +L4: + CPyInt16_Overflow() + unreachable +L5: + r7 = x == r6 + return r7 + +[case testI16ConvertToInt] +from mypy_extensions import i16 + +def i16_to_int(a: i16) -> int: + return a +[out] +def i16_to_int(a): + a :: i16 + r0 :: native_int + r1 :: int +L0: + r0 = extend signed a: i16 to native_int + r1 = r0 << 1 + return r1 + +[case testI16OperatorAssignmentMixed] +from mypy_extensions import i16 + +def f(a: i16) -> None: + x = 0 + x += a +[out] +def f(a): + a :: i16 + x :: int + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6, r7 :: i16 + r8 :: native_int + r9 :: int +L0: + x = 0 + r0 = x & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = x < 65536 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = x >= -65536 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = x >> 1 + r5 = truncate r4: native_int to i16 + r6 = r5 + goto L5 +L4: + CPyInt16_Overflow() + unreachable +L5: + r7 = r6 + a + r8 = extend signed r7: i16 to native_int + r9 = r8 << 1 + x = r9 + return 1 + +[case testI16InitializeFromLiteral] +from mypy_extensions import i16, i64 + +def f() -> None: + x: i16 = 0 + y: i16 = -127 + z: i16 = 5 + 7 +[out] +def f(): + x, y, z :: i16 +L0: + x = 0 + y = -127 + z = 12 + return 1 + +[case testI16ExplicitConversionFromNativeInt] +from mypy_extensions import i64, i32, i16 + +def from_i16(x: i16) -> i16: + return i16(x) + +def from_i32(x: i32) -> i16: + return i16(x) + +def from_i64(x: i64) -> i16: + return i16(x) +[out] +def from_i16(x): + x :: i16 +L0: + return x +def from_i32(x): + x :: i32 + r0 :: i16 +L0: + r0 = truncate x: i32 to i16 + return r0 +def from_i64(x): + x :: i64 + r0 :: i16 +L0: + r0 = truncate x: i64 to i16 + return r0 + +[case testI16ExplicitConversionFromInt] +from mypy_extensions import i16 + +def f(x: int) -> i16: + return i16(x) +[out] +def f(x): + x :: int + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6 :: i16 +L0: + r0 = x & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = x < 65536 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = x >= -65536 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = x >> 1 + r5 = truncate r4: native_int to i16 + r6 = r5 + goto L5 +L4: + CPyInt16_Overflow() + unreachable +L5: + return r6 + +[case testI16ExplicitConversionFromLiteral] +from mypy_extensions import i16 + +def f() -> None: + x = i16(0) + y = i16(11) + z = i16(-3) + a = i16(32767) + b = i16(32768) # Truncate + c = i16(-32768) + d = i16(-32769) # Truncate +[out] +def f(): + x, y, z, a, b, c, d :: i16 +L0: + x = 0 + y = 11 + z = -3 + a = 32767 + b = -32768 + c = -32768 + d = 32767 + return 1 + +[case testI16ExplicitConversionFromVariousTypes] +from mypy_extensions import i16 + +def bool_to_i16(b: bool) -> i16: + return i16(b) + +def str_to_i16(s: str) -> i16: + return i16(s) + +class C: + def __int__(self) -> i16: + return 5 + +def instance_to_i16(c: C) -> i16: + return i16(c) + +def float_to_i16(x: float) -> i16: + return i16(x) +[out] +def bool_to_i16(b): + b :: bool + r0 :: i16 +L0: + r0 = extend b: builtins.bool to i16 + return r0 +def str_to_i16(s): + s :: str + r0 :: object + r1 :: i16 +L0: + r0 = CPyLong_FromStr(s) + r1 = unbox(i16, r0) + return r1 +def C.__int__(self): + self :: __main__.C +L0: + return 5 +def instance_to_i16(c): + c :: __main__.C + r0 :: i16 +L0: + r0 = c.__int__() + return r0 +def float_to_i16(x): + x :: float + r0 :: int + r1 :: native_int + r2, r3, r4 :: bit + r5 :: native_int + r6, r7 :: i16 +L0: + r0 = CPyTagged_FromFloat(x) + r1 = r0 & 1 + r2 = r1 == 0 + if r2 goto L1 else goto L4 :: bool +L1: + r3 = r0 < 65536 :: signed + if r3 goto L2 else goto L4 :: bool +L2: + r4 = r0 >= -65536 :: signed + if r4 goto L3 else goto L4 :: bool +L3: + r5 = r0 >> 1 + r6 = truncate r5: native_int to i16 + r7 = r6 + goto L5 +L4: + CPyInt16_Overflow() + unreachable +L5: + return r7 diff --git a/mypyc/test-data/irbuild-i32.test b/mypyc/test-data/irbuild-i32.test index 725e183657b1..7dcb722ec906 100644 --- a/mypyc/test-data/irbuild-i32.test +++ b/mypyc/test-data/irbuild-i32.test @@ -20,7 +20,7 @@ def compare(x: i32, y: i32) -> None: f = -5 < x [out] def add_op(x, y): - x, y, r0, r1, r2, r3, r4 :: int32 + x, y, r0, r1, r2, r3, r4 :: i32 L0: r0 = y + x x = r0 @@ -34,7 +34,7 @@ L0: x = r4 return x def compare(x, y): - x, y :: int32 + x, y :: i32 r0 :: bit a :: bool r1 :: bit @@ -72,7 +72,7 @@ def unary(x: i32) -> i32: return y [out] def unary(x): - x, r0, y, r1 :: int32 + x, r0, y, r1 :: i32 L0: r0 = 0 - x y = r0 @@ -90,15 +90,15 @@ def div_by_constant(x: i32) -> i32: return x [out] def div_by_constant(x): - x, r0, r1 :: int32 + x, r0, r1 :: i32 r2, r3, r4 :: bit - r5 :: int32 + r5 :: i32 r6 :: bit - r7, r8, r9 :: int32 + r7, r8, r9 :: i32 r10, r11, r12 :: bit - r13 :: int32 + r13 :: i32 r14 :: bit - r15 :: int32 + r15 :: i32 L0: r0 = x / 5 r1 = r0 @@ -141,11 +141,11 @@ def mod_by_constant(x: i32) -> i32: return x [out] def mod_by_constant(x): - x, r0, r1 :: int32 + x, r0, r1 :: i32 r2, r3, r4, r5 :: bit - r6, r7, r8 :: int32 + r6, r7, r8 :: i32 r9, r10, r11, r12 :: bit - r13 :: int32 + r13 :: i32 L0: r0 = x % 5 r1 = r0 @@ -185,7 +185,7 @@ def divmod(x: i32, y: i32) -> i32: return a % y [out] def divmod(x, y): - x, y, r0, a, r1 :: int32 + x, y, r0, a, r1 :: i32 L0: r0 = CPyInt32_Divide(x, y) a = r0 @@ -202,12 +202,12 @@ def f(x: Any) -> Any: [out] def f(x): x :: object - r0, y :: int32 + r0, y :: i32 r1 :: object L0: - r0 = unbox(int32, x) + r0 = unbox(i32, x) y = r0 - r1 = box(int32, y) + r1 = box(i32, y) return r1 [case testI32MixedCompare1_64bit] @@ -217,11 +217,11 @@ def f(x: int, y: i32) -> bool: [out] def f(x, y): x :: int - y :: int32 + y :: i32 r0 :: native_int r1, r2, r3 :: bit r4 :: native_int - r5, r6 :: int32 + r5, r6 :: i32 r7 :: bit L0: r0 = x & 1 @@ -235,7 +235,7 @@ L2: if r3 goto L3 else goto L4 :: bool L3: r4 = x >> 1 - r5 = truncate r4: native_int to int32 + r5 = truncate r4: native_int to i32 r6 = r5 goto L5 L4: @@ -251,12 +251,12 @@ def f(x: i32, y: int) -> bool: return x == y [out] def f(x, y): - x :: int32 + x :: i32 y :: int r0 :: native_int r1, r2, r3 :: bit r4 :: native_int - r5, r6 :: int32 + r5, r6 :: i32 r7 :: bit L0: r0 = y & 1 @@ -270,7 +270,7 @@ L2: if r3 goto L3 else goto L4 :: bool L3: r4 = y >> 1 - r5 = truncate r4: native_int to int32 + r5 = truncate r4: native_int to i32 r6 = r5 goto L5 L4: @@ -287,13 +287,13 @@ def f(x: int, y: i32) -> bool: [out] def f(x, y): x :: int - y :: int32 + y :: i32 r0 :: native_int r1 :: bit - r2, r3 :: int32 + r2, r3 :: i32 r4 :: ptr r5 :: c_ptr - r6 :: int32 + r6 :: i32 r7 :: bit L0: r0 = x & 1 @@ -320,11 +320,11 @@ def i32_to_int(a: i32) -> int: return a [out] def i32_to_int(a): - a :: int32 + a :: i32 r0 :: native_int r1 :: int L0: - r0 = extend signed a: int32 to native_int + r0 = extend signed a: i32 to native_int r1 = r0 << 1 return r1 @@ -335,7 +335,7 @@ def i32_to_int(a: i32) -> int: return a [out] def i32_to_int(a): - a :: int32 + a :: i32 r0, r1 :: bit r2, r3, r4 :: int L0: @@ -362,12 +362,12 @@ def f(a: i32) -> None: x += a [out] def f(a): - a :: int32 + a :: i32 x :: int r0 :: native_int r1, r2, r3 :: bit r4 :: native_int - r5, r6, r7 :: int32 + r5, r6, r7 :: i32 r8 :: native_int r9 :: int L0: @@ -383,7 +383,7 @@ L2: if r3 goto L3 else goto L4 :: bool L3: r4 = x >> 1 - r5 = truncate r4: native_int to int32 + r5 = truncate r4: native_int to i32 r6 = r5 goto L5 L4: @@ -391,7 +391,7 @@ L4: unreachable L5: r7 = r6 + a - r8 = extend signed r7: int32 to native_int + r8 = extend signed r7: i32 to native_int r9 = r8 << 1 x = r9 return 1 @@ -405,7 +405,7 @@ def f() -> None: z: i32 = 5 + 7 [out] def f(): - x, y, z :: int32 + x, y, z :: i32 L0: x = 0 y = -127 @@ -413,7 +413,10 @@ L0: return 1 [case testI32ExplicitConversionFromNativeInt] -from mypy_extensions import i64, i32 +from mypy_extensions import i64, i32, i16 + +def from_i16(x: i16) -> i32: + return i32(x) def from_i32(x: i32) -> i32: return i32(x) @@ -421,15 +424,21 @@ def from_i32(x: i32) -> i32: def from_i64(x: i64) -> i32: return i32(x) [out] +def from_i16(x): + x :: i16 + r0 :: i32 +L0: + r0 = extend signed x: i16 to i32 + return r0 def from_i32(x): - x :: int32 + x :: i32 L0: return x def from_i64(x): - x :: int64 - r0 :: int32 + x :: i64 + r0 :: i32 L0: - r0 = truncate x: int64 to int32 + r0 = truncate x: i64 to i32 return r0 [case testI32ExplicitConversionFromInt_64bit] @@ -443,7 +452,7 @@ def f(x): r0 :: native_int r1, r2, r3 :: bit r4 :: native_int - r5, r6 :: int32 + r5, r6 :: i32 L0: r0 = x & 1 r1 = r0 == 0 @@ -456,7 +465,7 @@ L2: if r3 goto L3 else goto L4 :: bool L3: r4 = x >> 1 - r5 = truncate r4: native_int to int32 + r5 = truncate r4: native_int to i32 r6 = r5 goto L5 L4: @@ -465,20 +474,22 @@ L4: L5: return r6 -[case testI32ExplicitConversionFromLiteral] +[case testI32ExplicitConversionFromLiteral_64bit] from mypy_extensions import i32 def f() -> None: x = i32(0) y = i32(11) z = i32(-3) + a = i32(2**31) [out] def f(): - x, y, z :: int32 + x, y, z, a :: i32 L0: x = 0 y = 11 z = -3 + a = -2147483648 return 1 [case testI32ExplicitConversionFromVariousTypes_64bit] @@ -502,17 +513,17 @@ def float_to_i32(x: float) -> i32: [out] def bool_to_i32(b): b :: bool - r0 :: int32 + r0 :: i32 L0: - r0 = extend b: builtins.bool to int32 + r0 = extend b: builtins.bool to i32 return r0 def str_to_i32(s): s :: str r0 :: object - r1 :: int32 + r1 :: i32 L0: r0 = CPyLong_FromStr(s) - r1 = unbox(int32, r0) + r1 = unbox(i32, r0) return r1 def C.__int__(self): self :: __main__.C @@ -520,7 +531,7 @@ L0: return 5 def instance_to_i32(c): c :: __main__.C - r0 :: int32 + r0 :: i32 L0: r0 = c.__int__() return r0 @@ -530,7 +541,7 @@ def float_to_i32(x): r1 :: native_int r2, r3, r4 :: bit r5 :: native_int - r6, r7 :: int32 + r6, r7 :: i32 L0: r0 = CPyTagged_FromFloat(x) r1 = r0 & 1 @@ -544,7 +555,7 @@ L2: if r4 goto L3 else goto L4 :: bool L3: r5 = r0 >> 1 - r6 = truncate r5: native_int to int32 + r6 = truncate r5: native_int to i32 r7 = r6 goto L5 L4: @@ -564,10 +575,10 @@ def float_to_i32(x): r0 :: int r1 :: native_int r2 :: bit - r3, r4 :: int32 + r3, r4 :: i32 r5 :: ptr r6 :: c_ptr - r7 :: int32 + r7 :: i32 L0: r0 = CPyTagged_FromFloat(x) r1 = r0 & 1 diff --git a/mypyc/test-data/irbuild-i64.test b/mypyc/test-data/irbuild-i64.test index a18171c41d57..07f549c9fcc2 100644 --- a/mypyc/test-data/irbuild-i64.test +++ b/mypyc/test-data/irbuild-i64.test @@ -7,7 +7,7 @@ def f() -> i64: return y [out] def f(): - x, y :: int64 + x, y :: i64 L0: x = 5 y = x @@ -40,7 +40,7 @@ def all_comparisons(x: i64) -> int: return y [out] def min(x, y): - x, y :: int64 + x, y :: i64 r0 :: bit L0: r0 = x < y :: signed @@ -52,7 +52,7 @@ L2: L3: unreachable def all_comparisons(x): - x :: int64 + x :: i64 r0 :: bit y :: int r1, r2, r3, r4, r5 :: bit @@ -110,7 +110,7 @@ def f(x: i64, y: i64) -> i64: return y - z [out] def f(x, y): - x, y, r0, z, r1 :: int64 + x, y, r0, z, r1 :: i64 L0: r0 = x + y z = r0 @@ -125,7 +125,7 @@ def f() -> i64: return -i [out] def f(): - i, r0 :: int64 + i, r0 :: i64 L0: i = -3 r0 = 0 - i @@ -140,7 +140,7 @@ def unary(x: i64) -> i64: return x [out] def unary(x): - x, r0, y :: int64 + x, r0, y :: i64 L0: r0 = x ^ -1 y = r0 @@ -157,12 +157,12 @@ def f(a: Any) -> None: [out] def f(a): a :: object - r0, b :: int64 + r0, b :: i64 r1 :: object L0: - r0 = unbox(int64, a) + r0 = unbox(i64, a) b = r0 - r1 = box(int64, b) + r1 = box(i64, b) a = r1 return 1 @@ -177,20 +177,20 @@ def set(a: List[i64], i: i64, x: i64) -> None: [out] def get(a, i): a :: list - i :: int64 + i :: i64 r0 :: object - r1 :: int64 + r1 :: i64 L0: r0 = CPyList_GetItemInt64(a, i) - r1 = unbox(int64, r0) + r1 = unbox(i64, r0) return r1 def set(a, i, x): a :: list - i, x :: int64 + i, x :: i64 r0 :: object r1 :: bit L0: - r0 = box(int64, x) + r0 = box(i64, x) r1 = CPyList_SetItemInt64(a, i, r0) return 1 @@ -203,7 +203,7 @@ def f() -> i64: return 3 - b [out] def f(): - a, r0, b, r1 :: int64 + a, r0, b, r1 :: i64 L0: a = 1 r0 = a + 2 @@ -222,7 +222,7 @@ def f(a: i64) -> i64: return 3 [out] def f(a): - a :: int64 + a :: i64 r0, r1 :: bit L0: r0 = a < 3 :: signed @@ -257,7 +257,7 @@ def others(a: i64, b: i64) -> i64: return a [out] def add(a): - a, b, r0, r1 :: int64 + a, b, r0, r1 :: i64 L0: b = a r0 = b + 1 @@ -266,7 +266,7 @@ L0: a = r1 return a def others(a, b): - a, b, r0, r1, r2, r3, r4, r5, r6 :: int64 + a, b, r0, r1, r2, r3, r4, r5, r6 :: i64 L0: r0 = a - b a = r0 @@ -307,7 +307,7 @@ def unary(a: i64) -> i64: return ~a [out] def forward(a, b): - a, b, r0, r1, r2, r3, r4 :: int64 + a, b, r0, r1, r2, r3, r4 :: i64 L0: r0 = a & 1 b = r0 @@ -321,7 +321,7 @@ L0: b = r4 return b def reverse(a, b): - a, b, r0, r1, r2, r3, r4 :: int64 + a, b, r0, r1, r2, r3, r4 :: i64 L0: r0 = 1 & a b = r0 @@ -335,7 +335,7 @@ L0: b = r4 return b def unary(a): - a, r0 :: int64 + a, r0 :: i64 L0: r0 = a ^ -1 return r0 @@ -355,11 +355,11 @@ def divide_by_zero(x: i64) -> i64: return x // 0 [out] def constant_divisor(x): - x, r0, r1 :: int64 + x, r0, r1 :: i64 r2, r3, r4 :: bit - r5 :: int64 + r5 :: i64 r6 :: bit - r7 :: int64 + r7 :: i64 L0: r0 = x / 7 r1 = r0 @@ -377,22 +377,22 @@ L2: L3: return r1 def variable_divisor(x, y): - x, y, r0 :: int64 + x, y, r0 :: i64 L0: r0 = CPyInt64_Divide(x, y) return r0 def constant_lhs(x): - x, r0 :: int64 + x, r0 :: i64 L0: r0 = CPyInt64_Divide(27, x) return r0 def divide_by_neg_one(x): - x, r0 :: int64 + x, r0 :: i64 L0: r0 = CPyInt64_Divide(x, -1) return r0 def divide_by_zero(x): - x, r0 :: int64 + x, r0 :: i64 L0: r0 = CPyInt64_Divide(x, 0) return r0 @@ -410,9 +410,9 @@ def mod_by_zero(x: i64) -> i64: return x % 0 [out] def constant_divisor(x): - x, r0, r1 :: int64 + x, r0, r1 :: i64 r2, r3, r4, r5 :: bit - r6 :: int64 + r6 :: i64 L0: r0 = x % 7 r1 = r0 @@ -429,17 +429,17 @@ L2: L3: return r1 def variable_divisor(x, y): - x, y, r0 :: int64 + x, y, r0 :: i64 L0: r0 = CPyInt64_Remainder(x, y) return r0 def constant_lhs(x): - x, r0 :: int64 + x, r0 :: i64 L0: r0 = CPyInt64_Remainder(27, x) return r0 def mod_by_zero(x): - x, r0 :: int64 + x, r0 :: i64 L0: r0 = CPyInt64_Remainder(x, 0) return r0 @@ -455,11 +455,11 @@ def by_variable(x: i64, y: i64) -> i64: return x [out] def by_constant(x): - x, r0, r1 :: int64 + x, r0, r1 :: i64 r2, r3, r4 :: bit - r5 :: int64 + r5 :: i64 r6 :: bit - r7 :: int64 + r7 :: i64 L0: r0 = x / 7 r1 = r0 @@ -478,7 +478,7 @@ L3: x = r1 return x def by_variable(x, y): - x, y, r0 :: int64 + x, y, r0 :: i64 L0: r0 = CPyInt64_Divide(x, y) x = r0 @@ -495,9 +495,9 @@ def by_variable(x: i64, y: i64) -> i64: return x [out] def by_constant(x): - x, r0, r1 :: int64 + x, r0, r1 :: i64 r2, r3, r4, r5 :: bit - r6 :: int64 + r6 :: i64 L0: r0 = x % 7 r1 = r0 @@ -515,7 +515,7 @@ L3: x = r1 return x def by_variable(x, y): - x, y, r0 :: int64 + x, y, r0 :: i64 L0: r0 = CPyInt64_Remainder(x, y) x = r0 @@ -532,14 +532,14 @@ def f(x: i64) -> None: g(n) [out] def g(a): - a :: int64 + a :: i64 L0: return 1 def f(x): - x, r0, n :: int64 + x, r0, n :: i64 r1 :: bit r2 :: None - r3 :: int64 + r3 :: i64 L0: r0 = 0 n = r0 @@ -566,10 +566,10 @@ def int_to_i64(a): a :: int r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6 :: int64 + r6 :: i64 L0: r0 = a & 1 r1 = r0 == 0 @@ -594,7 +594,7 @@ def i64_to_int(a: i64) -> int: return a [out] def i64_to_int(a): - a :: int64 + a :: i64 r0, r1 :: bit r2, r3, r4 :: int L0: @@ -620,7 +620,7 @@ def i64_to_int(a: i64) -> int: return a [out] def i64_to_int(a): - a :: int64 + a :: i64 r0, r1 :: bit r2, r3 :: int r4 :: native_int @@ -636,7 +636,7 @@ L2: r3 = r2 goto L4 L3: - r4 = truncate a: int64 to native_int + r4 = truncate a: i64 to native_int r5 = r4 << 1 r3 = r5 L4: @@ -658,23 +658,23 @@ def h() -> i64: return x + y + t[0] [out] def f(x, y): - x, y :: int64 - r0 :: tuple[int64, int64] + x, y :: i64 + r0 :: tuple[i64, i64] L0: r0 = (x, y) return r0 def g(): r0 :: tuple[int, int] - r1 :: tuple[int64, int64] + r1 :: tuple[i64, i64] L0: r0 = (2, 4) r1 = (1, 2) return r1 def h(): - r0 :: tuple[int64, int64] - r1, x, r2, y :: int64 - r3, t :: tuple[int64, int64] - r4, r5, r6 :: int64 + r0 :: tuple[i64, i64] + r1, x, r2, y :: i64 + r3, t :: tuple[i64, i64] + r4, r5, r6 :: i64 L0: r0 = g() r1 = r0[0] @@ -694,14 +694,14 @@ def f(x: i64, y: int) -> i64: return x + y [out] def f(x, y): - x :: int64 + x :: i64 y :: int r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6, r7 :: int64 + r6, r7 :: i64 L0: r0 = y & 1 r1 = r0 == 0 @@ -727,13 +727,13 @@ def f(x: int, y: i64) -> i64: [out] def f(x, y): x :: int - y :: int64 + y :: i64 r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6, r7 :: int64 + r6, r7 :: i64 L0: r0 = x & 1 r1 = r0 == 0 @@ -760,14 +760,14 @@ def f(y: i64) -> int: return x [out] def f(y): - y :: int64 + y :: i64 x :: int r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6, r7 :: int64 + r6, r7 :: i64 r8, r9 :: bit r10, r11, r12 :: int L0: @@ -812,13 +812,13 @@ def f(y: int) -> i64: [out] def f(y): y :: int - x :: int64 + x :: i64 r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6, r7 :: int64 + r6, r7 :: i64 L0: x = 0 r0 = y & 1 @@ -846,13 +846,13 @@ def f(x: int, y: i64) -> bool: [out] def f(x, y): x :: int - y :: int64 + y :: i64 r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6 :: int64 + r6 :: i64 r7 :: bit L0: r0 = x & 1 @@ -878,14 +878,14 @@ def f(x: i64, y: int) -> bool: return x == y [out] def f(x, y): - x :: int64 + x :: i64 y :: int r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6 :: int64 + r6 :: i64 r7 :: bit L0: r0 = y & 1 @@ -912,20 +912,20 @@ def f(x: int, y: i64) -> bool: [out] def f(x, y): x :: int - y :: int64 + y :: i64 r0 :: native_int r1 :: bit - r2, r3, r4 :: int64 + r2, r3, r4 :: i64 r5 :: ptr r6 :: c_ptr - r7 :: int64 + r7 :: i64 r8 :: bit L0: r0 = x & 1 r1 = r0 == 0 if r1 goto L1 else goto L2 :: bool L1: - r2 = extend signed x: builtins.int to int64 + r2 = extend signed x: builtins.int to i64 r3 = r2 >> 1 r4 = r3 goto L3 @@ -949,7 +949,7 @@ def f(x: i64) -> i64: return 3 [out] def f(x): - x :: int64 + x :: i64 r0, r1 :: bit L0: r0 = x != 0 @@ -975,14 +975,14 @@ def g(x: i64, y: int) -> int: return y [out] def f(x, y): - x :: int64 + x :: i64 y :: int r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6 :: int64 + r6 :: i64 L0: r0 = y & 1 r1 = r0 == 0 @@ -1001,7 +1001,7 @@ L3: x = r3 return x def g(x, y): - x :: int64 + x :: i64 y :: int r0, r1 :: bit r2, r3, r4 :: int @@ -1043,7 +1043,7 @@ class C: [out] def add_simple(c): c :: __main__.C - r0, r1, r2 :: int64 + r0, r1, r2 :: i64 L0: r0 = c.x r1 = c.y @@ -1051,7 +1051,7 @@ L0: return r2 def inplace_add_simple(c): c :: __main__.C - r0, r1, r2 :: int64 + r0, r1, r2 :: i64 r3 :: bool L0: r0 = c.x @@ -1062,9 +1062,9 @@ L0: def add_borrow(d): d :: __main__.D r0 :: __main__.C - r1 :: int64 + r1 :: i64 r2 :: __main__.C - r3, r4 :: int64 + r3, r4 :: i64 L0: r0 = borrow d.c r1 = r0.x @@ -1095,7 +1095,7 @@ class C: [out] def bitwise_simple(c): c :: __main__.C - r0, r1, r2 :: int64 + r0, r1, r2 :: i64 L0: r0 = c.x r1 = c.y @@ -1103,7 +1103,7 @@ L0: return r2 def inplace_bitwide_simple(c): c :: __main__.C - r0, r1, r2 :: int64 + r0, r1, r2 :: i64 r3 :: bool L0: r0 = c.x @@ -1114,9 +1114,9 @@ L0: def bitwise_borrow(d): d :: __main__.D r0 :: __main__.C - r1 :: int64 + r1 :: i64 r2 :: __main__.C - r3, r4 :: int64 + r3, r4 :: i64 L0: r0 = borrow d.c r1 = r0.x @@ -1137,7 +1137,7 @@ class C: s: str [out] def f(n): - n :: int64 + n :: i64 r0 :: __main__.C r1 :: list r2, r3 :: ptr @@ -1170,13 +1170,13 @@ def f(a: List[i64], n: i64) -> bool: [out] def f(a, n): a :: list - n :: int64 + n :: i64 r0 :: object - r1 :: int64 + r1 :: i64 r2 :: bit L0: r0 = CPyList_GetItemInt64Borrow(a, n) - r1 = unbox(int64, r0) + r1 = unbox(i64, r0) r2 = r1 == 0 keep_alive a, n if r2 goto L1 else goto L2 :: bool @@ -1201,11 +1201,11 @@ def g(a: List[i64], y: i64) -> bool: [out] def f(a, y): a :: list - y :: int64 + y :: i64 r0 :: ptr r1 :: native_int r2 :: short_int - r3 :: int64 + r3 :: i64 r4 :: bit L0: r0 = get_element_ptr a ob_size :: PyVarObject @@ -1221,11 +1221,11 @@ L2: return 0 def g(a, y): a :: list - y :: int64 + y :: i64 r0 :: ptr r1 :: native_int r2 :: short_int - r3 :: int64 + r3 :: i64 r4 :: bit L0: r0 = get_element_ptr a ob_size :: PyVarObject @@ -1248,7 +1248,7 @@ def f(n: i64) -> List[i64]: return [n] * n [out] def f(n): - n :: int64 + n :: i64 r0 :: list r1 :: object r2, r3 :: ptr @@ -1257,7 +1257,7 @@ def f(n): r9 :: list L0: r0 = PyList_New(1) - r1 = box(int64, n) + r1 = box(i64, n) r2 = get_element_ptr r0 ob_item :: PyListObject r3 = load_mem r2 :: ptr* set_mem r3, r1 :: builtins.object* @@ -1297,11 +1297,11 @@ def lt_i64(a: List[i64], n: i64) -> bool: [out] def add_i64(a, n): a :: list - n :: int64 + n :: i64 r0 :: ptr r1 :: native_int r2 :: short_int - r3, r4 :: int64 + r3, r4 :: i64 L0: r0 = get_element_ptr a ob_size :: PyVarObject r1 = load_mem r0 :: native_int* @@ -1312,11 +1312,11 @@ L0: return r4 def add_i64_2(a, n): a :: list - n :: int64 + n :: i64 r0 :: ptr r1 :: native_int r2 :: short_int - r3, r4 :: int64 + r3, r4 :: i64 L0: r0 = get_element_ptr a ob_size :: PyVarObject r1 = load_mem r0 :: native_int* @@ -1327,11 +1327,11 @@ L0: return r4 def eq_i64(a, n): a :: list - n :: int64 + n :: i64 r0 :: ptr r1 :: native_int r2 :: short_int - r3 :: int64 + r3 :: i64 r4 :: bit L0: r0 = get_element_ptr a ob_size :: PyVarObject @@ -1347,11 +1347,11 @@ L2: return 0 def lt_i64(a, n): a :: list - n :: int64 + n :: i64 r0 :: ptr r1 :: native_int r2 :: short_int - r3 :: int64 + r3 :: i64 r4 :: bit L0: r0 = get_element_ptr a ob_size :: PyVarObject @@ -1376,10 +1376,10 @@ def f(x: Optional[i64]) -> i64: return x [out] def f(x): - x :: union[int64, None] + x :: union[i64, None] r0 :: object r1 :: bit - r2 :: int64 + r2 :: i64 L0: r0 = load_address _Py_NoneStruct r1 = x == r0 @@ -1387,7 +1387,7 @@ L0: L1: return 1 L2: - r2 = unbox(int64, x) + r2 = unbox(i64, x) return r2 [case testI64DefaultValueSingle] @@ -1400,10 +1400,10 @@ def g() -> i64: return f(7) + f(8, 9) [out] def f(x, y, __bitmap): - x, y :: int64 - __bitmap, r0 :: uint32 + x, y :: i64 + __bitmap, r0 :: u32 r1 :: bit - r2 :: int64 + r2 :: i64 L0: r0 = __bitmap & 1 r1 = r0 == 0 @@ -1414,7 +1414,7 @@ L2: r2 = x + y return r2 def g(): - r0, r1, r2 :: int64 + r0, r1, r2 :: i64 L0: r0 = f(7, 0, 0) r1 = f(8, 9, 1) @@ -1431,12 +1431,12 @@ def g() -> i64: return f(7) + f(8, 9) + f(1, 2, 3) + f(4, 5, 6, 7) [out] def f(a, b, c, d, __bitmap): - a, b :: int64 + a, b :: i64 c :: int - d :: int64 - __bitmap, r0 :: uint32 + d :: i64 + __bitmap, r0 :: u32 r1 :: bit - r2 :: uint32 + r2 :: u32 r3 :: bit L0: r0 = __bitmap & 1 @@ -1458,9 +1458,9 @@ L6: return 0 def g(): r0 :: int - r1 :: int64 + r1 :: i64 r2 :: int - r3, r4, r5, r6, r7, r8 :: int64 + r3, r4, r5, r6, r7, r8 :: i64 L0: r0 = :: int r1 = f(7, 0, r0, 0, 0) @@ -1486,8 +1486,8 @@ def f(c: C) -> None: [out] def C.m(self, x, __bitmap): self :: __main__.C - x :: int64 - __bitmap, r0 :: uint32 + x :: i64 + __bitmap, r0 :: u32 r1 :: bit L0: r0 = __bitmap & 1 @@ -1506,7 +1506,10 @@ L0: return 1 [case testI64ExplicitConversionFromNativeInt] -from mypy_extensions import i64, i32 +from mypy_extensions import i64, i32, i16 + +def from_i16(x: i16) -> i64: + return i64(x) def from_i32(x: i32) -> i64: return i64(x) @@ -1514,14 +1517,20 @@ def from_i32(x: i32) -> i64: def from_i64(x: i64) -> i64: return i64(x) [out] +def from_i16(x): + x :: i16 + r0 :: i64 +L0: + r0 = extend signed x: i16 to i64 + return r0 def from_i32(x): - x :: int32 - r0 :: int64 + x :: i32 + r0 :: i64 L0: - r0 = extend signed x: int32 to int64 + r0 = extend signed x: i32 to i64 return r0 def from_i64(x): - x :: int64 + x :: i64 L0: return x @@ -1535,10 +1544,10 @@ def f(x): x :: int r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6 :: int64 + r6 :: i64 L0: r0 = x & 1 r1 = r0 == 0 @@ -1563,7 +1572,7 @@ def f(x: i64) -> int: return int(x) [out] def f(x): - x :: int64 + x :: i64 r0, r1 :: bit r2, r3, r4 :: int L0: @@ -1591,7 +1600,7 @@ def f() -> None: z = i64(-3) [out] def f(): - x, y, z :: int64 + x, y, z :: i64 L0: x = 0 y = 11 @@ -1606,9 +1615,9 @@ def f() -> None: y = x [out] def f(): - r0, x :: int64 + r0, x :: i64 r1 :: bit - y, r2 :: int64 + y, r2 :: i64 L0: r0 = 0 x = r0 @@ -1633,9 +1642,9 @@ def f() -> None: y = x [out] def f(): - r0, x :: int64 + r0, x :: i64 r1 :: bit - y, r2 :: int64 + y, r2 :: i64 L0: r0 = 0 x = r0 @@ -1662,8 +1671,8 @@ class D(C): [out] def C.f(self, x, __bitmap): self :: __main__.C - x :: int64 - __bitmap, r0 :: uint32 + x :: i64 + __bitmap, r0 :: u32 r1 :: bit L0: r0 = __bitmap & 1 @@ -1675,8 +1684,8 @@ L2: return 1 def D.f(self, x, __bitmap): self :: __main__.D - x :: int64 - __bitmap, r0 :: uint32 + x :: i64 + __bitmap, r0 :: u32 r1 :: bit L0: r0 = __bitmap & 1 @@ -1742,26 +1751,26 @@ def compare_bool_to_i64(n: i64, b: bool) -> bool: return True [out] def add_bool_to_int(n, b): - n :: int64 + n :: i64 b :: bool - r0, r1 :: int64 + r0, r1 :: i64 L0: - r0 = extend b: builtins.bool to int64 + r0 = extend b: builtins.bool to i64 r1 = n + r0 return r1 def compare_bool_to_i64(n, b): - n :: int64 + n :: i64 b :: bool - r0 :: int64 + r0 :: i64 r1 :: bit - r2 :: int64 + r2 :: i64 r3 :: bit L0: - r0 = extend b: builtins.bool to int64 + r0 = extend b: builtins.bool to i64 r1 = n == r0 if r1 goto L1 else goto L2 :: bool L1: - r2 = extend b: builtins.bool to int64 + r2 = extend b: builtins.bool to i64 r3 = r2 != n return r3 L2: @@ -1779,18 +1788,18 @@ def cast_int(x: int) -> i64: [out] def cast_object(o): o :: object - r0 :: int64 + r0 :: i64 L0: - r0 = unbox(int64, o) + r0 = unbox(i64, o) return r0 def cast_int(x): x :: int r0 :: native_int r1 :: bit - r2, r3 :: int64 + r2, r3 :: i64 r4 :: ptr r5 :: c_ptr - r6 :: int64 + r6 :: i64 L0: r0 = x & 1 r1 = r0 == 0 @@ -1819,16 +1828,16 @@ def cast_int(x): x :: int r0 :: native_int r1 :: bit - r2, r3, r4 :: int64 + r2, r3, r4 :: i64 r5 :: ptr r6 :: c_ptr - r7 :: int64 + r7 :: i64 L0: r0 = x & 1 r1 = r0 == 0 if r1 goto L1 else goto L2 :: bool L1: - r2 = extend signed x: builtins.int to int64 + r2 = extend signed x: builtins.int to i64 r3 = r2 >> 1 r4 = r3 goto L3 @@ -1865,25 +1874,25 @@ def float_to_i64(x: float) -> i64: [out] def bool_to_i64(b): b :: bool - r0 :: int64 + r0 :: i64 L0: - r0 = extend b: builtins.bool to int64 + r0 = extend b: builtins.bool to i64 return r0 def str_to_i64(s): s :: str r0 :: object - r1 :: int64 + r1 :: i64 L0: r0 = CPyLong_FromStr(s) - r1 = unbox(int64, r0) + r1 = unbox(i64, r0) return r1 def str_to_i64_with_base(s): s :: str r0 :: object - r1 :: int64 + r1 :: i64 L0: r0 = CPyLong_FromStrWithBase(s, 4) - r1 = unbox(int64, r0) + r1 = unbox(i64, r0) return r1 def C.__int__(self): self :: __main__.C @@ -1891,7 +1900,7 @@ L0: return 5 def instance_to_i64(c): c :: __main__.C - r0 :: int64 + r0 :: i64 L0: r0 = c.__int__() return r0 @@ -1900,10 +1909,10 @@ def float_to_i64(x): r0 :: int r1 :: native_int r2 :: bit - r3, r4 :: int64 + r3, r4 :: i64 r5 :: ptr r6 :: c_ptr - r7 :: int64 + r7 :: i64 L0: r0 = CPyTagged_FromFloat(x) r1 = r0 & 1 @@ -1933,17 +1942,17 @@ def float_to_i64(x): r0 :: int r1 :: native_int r2 :: bit - r3, r4, r5 :: int64 + r3, r4, r5 :: i64 r6 :: ptr r7 :: c_ptr - r8 :: int64 + r8 :: i64 L0: r0 = CPyTagged_FromFloat(x) r1 = r0 & 1 r2 = r1 == 0 if r2 goto L1 else goto L2 :: bool L1: - r3 = extend signed r0: builtins.int to int64 + r3 = extend signed r0: builtins.int to i64 r4 = r3 >> 1 r5 = r4 goto L3 @@ -1963,7 +1972,7 @@ def i64_to_float(x: i64) -> float: return float(x) [out] def i64_to_float(x): - x :: int64 + x :: i64 r0, r1 :: bit r2, r3, r4 :: int r5 :: float @@ -1991,7 +2000,7 @@ def i64_to_float(x: i64) -> float: return float(x) [out] def i64_to_float(x): - x :: int64 + x :: i64 r0, r1 :: bit r2, r3 :: int r4 :: native_int @@ -2008,7 +2017,7 @@ L2: r3 = r2 goto L4 L3: - r4 = truncate x: int64 to native_int + r4 = truncate x: i64 to native_int r5 = r4 << 1 r3 = r5 L4: @@ -2033,22 +2042,22 @@ def narrow2(x: Union[C, i64]) -> i64: return x.a [out] def narrow1(x): - x :: union[__main__.C, int64] + x :: union[__main__.C, i64] r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool - r4 :: int64 + r4 :: i64 r5 :: __main__.C - r6 :: int64 + r6 :: i64 L0: r0 = load_address PyLong_Type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L2 :: bool L1: - r4 = unbox(int64, x) + r4 = unbox(i64, x) return r4 L2: r5 = borrow cast(__main__.C, x) @@ -2056,22 +2065,22 @@ L2: keep_alive x return r6 def narrow2(x): - x :: union[__main__.C, int64] + x :: union[__main__.C, i64] r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool - r4 :: int64 + r4 :: i64 r5 :: __main__.C - r6 :: int64 + r6 :: i64 L0: r0 = load_address PyLong_Type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L2 :: bool L1: - r4 = unbox(int64, x) + r4 = unbox(i64, x) return r4 L2: r5 = borrow cast(__main__.C, x) @@ -2090,17 +2099,17 @@ def g(n: int) -> None: t: tuple[i64, i64] = (1, n) [out] def f(t): - t :: tuple[int, int64, int] + t :: tuple[int, i64, int] r0 :: int - r1 :: int64 + r1 :: i64 r2 :: int r3 :: native_int r4 :: bit - r5, r6 :: int64 + r5, r6 :: i64 r7 :: ptr r8 :: c_ptr - r9 :: int64 - r10, tt :: tuple[int, int64, int64] + r9 :: i64 + r10, tt :: tuple[int, i64, i64] L0: r0 = t[0] r1 = t[1] @@ -2128,11 +2137,11 @@ def g(n): r1 :: int r2 :: native_int r3 :: bit - r4, r5 :: int64 + r4, r5 :: i64 r6 :: ptr r7 :: c_ptr - r8 :: int64 - r9, t :: tuple[int64, int64] + r8 :: i64 + r9, t :: tuple[i64, i64] L0: r0 = (2, n) r1 = r0[1] diff --git a/mypyc/test-data/irbuild-isinstance.test b/mypyc/test-data/irbuild-isinstance.test index 6bb92d0a947e..78da2e9c1e19 100644 --- a/mypyc/test-data/irbuild-isinstance.test +++ b/mypyc/test-data/irbuild-isinstance.test @@ -5,14 +5,14 @@ def is_int(value: object) -> bool: [out] def is_int(value): value, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool L0: r0 = load_address PyLong_Type r1 = PyObject_IsInstance(value, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool return r3 [case testIsinstanceNotBool1] @@ -22,14 +22,14 @@ def is_not_bool(value: object) -> bool: [out] def is_not_bool(value): value, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3, r4 :: bool L0: r0 = load_address PyBool_Type r1 = PyObject_IsInstance(value, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool r4 = r3 ^ 1 return r4 @@ -42,18 +42,18 @@ def is_not_bool_and_is_int(value: object) -> bool: [out] def is_not_bool_and_is_int(value): value, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3, r4 :: bool r5 :: object - r6 :: int32 + r6 :: i32 r7 :: bit r8, r9 :: bool L0: r0 = load_address PyLong_Type r1 = PyObject_IsInstance(value, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L2 else goto L1 :: bool L1: r4 = r3 @@ -62,7 +62,7 @@ L2: r5 = load_address PyBool_Type r6 = PyObject_IsInstance(value, r5) r7 = r6 >= 0 :: signed - r8 = truncate r6: int32 to builtins.bool + r8 = truncate r6: i32 to builtins.bool r9 = r8 ^ 1 r4 = r9 L3: diff --git a/mypyc/test-data/irbuild-lists.test b/mypyc/test-data/irbuild-lists.test index eaeff9432446..80c4fe5fcd5e 100644 --- a/mypyc/test-data/irbuild-lists.test +++ b/mypyc/test-data/irbuild-lists.test @@ -197,7 +197,7 @@ def f(a, x): a :: list x :: int r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit L0: r0 = box(int, x) @@ -255,7 +255,7 @@ def f(x, y): r1, r2 :: object r3, r4, r5 :: ptr r6, r7, r8 :: object - r9 :: int32 + r9 :: i32 r10 :: bit L0: r0 = PyList_New(2) @@ -283,14 +283,14 @@ def f(x, y): x :: list y :: int r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool L0: r0 = box(int, y) r1 = PySequence_Contains(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool return r3 [case testListInsert] @@ -302,7 +302,7 @@ def f(x, y): x :: list y :: int r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit L0: r0 = box(int, y) @@ -462,7 +462,7 @@ def nested_union(a: Union[List[str], List[Optional[str]]]) -> None: def narrow(a): a :: union[list, int] r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: list @@ -474,7 +474,7 @@ L0: r0 = load_address PyList_Type r1 = PyObject_IsInstance(a, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L2 :: bool L1: r4 = borrow cast(list, a) diff --git a/mypyc/test-data/irbuild-match.test b/mypyc/test-data/irbuild-match.test index 2afe3d862f51..a078ae0defdb 100644 --- a/mypyc/test-data/irbuild-match.test +++ b/mypyc/test-data/irbuild-match.test @@ -608,22 +608,22 @@ L0: return 1 def f(x): x, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: str r5, r6, r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11 :: str r12, r13, r14 :: object - r15 :: int32 + r15 :: i32 r16 :: bit r17 :: bool r18 :: str r19, r20, r21 :: object - r22 :: int32 + r22 :: i32 r23 :: bit r24 :: bool r25 :: str @@ -637,7 +637,7 @@ L0: r0 = __main__.Position :: type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L5 :: bool L1: r4 = 'x' @@ -646,7 +646,7 @@ L1: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L2 else goto L5 :: bool L2: r11 = 'y' @@ -655,7 +655,7 @@ L2: r14 = PyObject_RichCompare(r12, r13, 2) r15 = PyObject_IsTrue(r14) r16 = r15 >= 0 :: signed - r17 = truncate r15: int32 to builtins.bool + r17 = truncate r15: i32 to builtins.bool if r17 goto L3 else goto L5 :: bool L3: r18 = 'z' @@ -664,7 +664,7 @@ L3: r21 = PyObject_RichCompare(r19, r20, 2) r22 = PyObject_IsTrue(r21) r23 = r22 >= 0 :: signed - r24 = truncate r22: int32 to builtins.bool + r24 = truncate r22: i32 to builtins.bool if r24 goto L4 else goto L5 :: bool L4: r25 = 'matched' @@ -693,22 +693,22 @@ def f(x): [out] def f(x): x, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: str r5, r6, r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11 :: str r12, r13, r14 :: object - r15 :: int32 + r15 :: i32 r16 :: bit r17 :: bool r18 :: str r19, r20, r21 :: object - r22 :: int32 + r22 :: i32 r23 :: bit r24 :: bool r25 :: str @@ -722,7 +722,7 @@ L0: r0 = __main__.Position :: type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L5 :: bool L1: r4 = 'z' @@ -731,7 +731,7 @@ L1: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L2 else goto L5 :: bool L2: r11 = 'y' @@ -740,7 +740,7 @@ L2: r14 = PyObject_RichCompare(r12, r13, 2) r15 = PyObject_IsTrue(r14) r16 = r15 >= 0 :: signed - r17 = truncate r15: int32 to builtins.bool + r17 = truncate r15: i32 to builtins.bool if r17 goto L3 else goto L5 :: bool L3: r18 = 'x' @@ -749,7 +749,7 @@ L3: r21 = PyObject_RichCompare(r19, r20, 2) r22 = PyObject_IsTrue(r21) r23 = r22 >= 0 :: signed - r24 = truncate r22: int32 to builtins.bool + r24 = truncate r22: i32 to builtins.bool if r24 goto L4 else goto L5 :: bool L4: r25 = 'matched' @@ -776,16 +776,16 @@ def f(x): [out] def f(x): x, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: str r5, r6, r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11, r12 :: object - r13 :: int32 + r13 :: i32 r14 :: bit r15 :: bool r16 :: str @@ -799,7 +799,7 @@ L0: r0 = __main__.C :: type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L5 :: bool L1: r4 = 'num' @@ -808,14 +808,14 @@ L1: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L4 else goto L2 :: bool L2: r11 = object 2 r12 = PyObject_RichCompare(r5, r11, 2) r13 = PyObject_IsTrue(r12) r14 = r13 >= 0 :: signed - r15 = truncate r13: int32 to builtins.bool + r15 = truncate r13: i32 to builtins.bool if r15 goto L4 else goto L3 :: bool L3: goto L5 @@ -856,18 +856,18 @@ L0: return 1 def f(x): x, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4, y :: __main__.C r5 :: str r6, r7, r8 :: object - r9 :: int32 + r9 :: i32 r10 :: bit r11 :: bool r12 :: str r13, r14, r15 :: object - r16 :: int32 + r16 :: i32 r17 :: bit r18 :: bool r19 :: str @@ -881,7 +881,7 @@ L0: r0 = __main__.C :: type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L5 :: bool L1: r4 = cast(__main__.C, x) @@ -893,7 +893,7 @@ L2: r8 = PyObject_RichCompare(r6, r7, 2) r9 = PyObject_IsTrue(r8) r10 = r9 >= 0 :: signed - r11 = truncate r9: int32 to builtins.bool + r11 = truncate r9: i32 to builtins.bool if r11 goto L3 else goto L5 :: bool L3: r12 = 'b' @@ -902,7 +902,7 @@ L3: r15 = PyObject_RichCompare(r13, r14, 2) r16 = PyObject_IsTrue(r15) r17 = r16 >= 0 :: signed - r18 = truncate r16: int32 to builtins.bool + r18 = truncate r16: i32 to builtins.bool if r18 goto L4 else goto L5 :: bool L4: r19 = 'matched' @@ -940,7 +940,7 @@ L0: return 1 def f(x): x, r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: str @@ -957,7 +957,7 @@ L0: r0 = __main__.C :: type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L3 :: bool L1: r4 = 'x' @@ -986,7 +986,7 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: str r3 :: object @@ -1021,15 +1021,15 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: str - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: object r6 :: str r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11 :: str @@ -1054,7 +1054,7 @@ L2: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L3 else goto L4 :: bool L3: r11 = 'matched' @@ -1078,7 +1078,7 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2, rest :: dict r3 :: str @@ -1117,19 +1117,19 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: str - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: object r6 :: str r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11, rest :: dict - r12 :: int32 + r12 :: i32 r13 :: bit r14 :: str r15 :: object @@ -1153,7 +1153,7 @@ L2: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L3 else goto L5 :: bool L3: r11 = CPyDict_FromAny(x) @@ -1182,7 +1182,7 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: native_int r3, r4 :: bit @@ -1224,16 +1224,16 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: native_int r3, r4 :: bit r5, r6, r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11, r12, r13 :: object - r14 :: int32 + r14 :: i32 r15 :: bit r16 :: bool r17 :: str @@ -1258,7 +1258,7 @@ L2: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L3 else goto L5 :: bool L3: r11 = PySequence_GetItem(x, 1) @@ -1266,7 +1266,7 @@ L3: r13 = PyObject_RichCompare(r11, r12, 2) r14 = PyObject_IsTrue(r13) r15 = r14 >= 0 :: signed - r16 = truncate r14: int32 to builtins.bool + r16 = truncate r14: i32 to builtins.bool if r16 goto L4 else goto L5 :: bool L4: r17 = 'matched' @@ -1290,16 +1290,16 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: native_int r3, r4 :: bit r5, r6, r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11, r12, r13 :: object - r14 :: int32 + r14 :: i32 r15 :: bit r16 :: bool r17 :: str @@ -1324,7 +1324,7 @@ L2: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L3 else goto L5 :: bool L3: r11 = PySequence_GetItem(x, 1) @@ -1332,7 +1332,7 @@ L3: r13 = PyObject_RichCompare(r11, r12, 2) r14 = PyObject_IsTrue(r13) r15 = r14 >= 0 :: signed - r16 = truncate r14: int32 to builtins.bool + r16 = truncate r14: i32 to builtins.bool if r16 goto L4 else goto L5 :: bool L4: r17 = 'matched' @@ -1356,16 +1356,16 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: native_int r3, r4 :: bit r5, r6, r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11, r12, r13 :: object - r14 :: int32 + r14 :: i32 r15 :: bit r16 :: bool r17 :: native_int @@ -1392,7 +1392,7 @@ L2: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L3 else goto L6 :: bool L3: r11 = PySequence_GetItem(x, 1) @@ -1400,7 +1400,7 @@ L3: r13 = PyObject_RichCompare(r11, r12, 2) r14 = PyObject_IsTrue(r13) r15 = r14 >= 0 :: signed - r16 = truncate r14: int32 to builtins.bool + r16 = truncate r14: i32 to builtins.bool if r16 goto L4 else goto L6 :: bool L4: r17 = r2 - 0 @@ -1428,21 +1428,21 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: native_int r3, r4 :: bit r5 :: object r6 :: str r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool r11 :: native_int r12 :: object r13 :: str r14 :: object - r15 :: int32 + r15 :: i32 r16 :: bit r17 :: bool r18 :: native_int @@ -1469,7 +1469,7 @@ L2: r7 = PyObject_RichCompare(r5, r6, 2) r8 = PyObject_IsTrue(r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool if r10 goto L3 else goto L6 :: bool L3: r11 = r2 - 1 @@ -1478,7 +1478,7 @@ L3: r14 = PyObject_RichCompare(r12, r13, 2) r15 = PyObject_IsTrue(r14) r16 = r15 >= 0 :: signed - r17 = truncate r15: int32 to builtins.bool + r17 = truncate r15: i32 to builtins.bool if r17 goto L4 else goto L6 :: bool L4: r18 = r2 - 1 @@ -1506,18 +1506,18 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: native_int r3, r4 :: bit r5 :: native_int r6, r7, r8 :: object - r9 :: int32 + r9 :: i32 r10 :: bit r11 :: bool r12 :: native_int r13, r14, r15 :: object - r16 :: int32 + r16 :: i32 r17 :: bit r18 :: bool r19 :: native_int @@ -1545,7 +1545,7 @@ L2: r8 = PyObject_RichCompare(r6, r7, 2) r9 = PyObject_IsTrue(r8) r10 = r9 >= 0 :: signed - r11 = truncate r9: int32 to builtins.bool + r11 = truncate r9: i32 to builtins.bool if r11 goto L3 else goto L6 :: bool L3: r12 = r2 - 1 @@ -1554,7 +1554,7 @@ L3: r15 = PyObject_RichCompare(r13, r14, 2) r16 = PyObject_IsTrue(r15) r17 = r16 >= 0 :: signed - r18 = truncate r16: int32 to builtins.bool + r18 = truncate r16: i32 to builtins.bool if r18 goto L4 else goto L6 :: bool L4: r19 = r2 - 2 @@ -1620,7 +1620,7 @@ def f(x): [out] def f(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: native_int r3, r4 :: bit @@ -1674,7 +1674,7 @@ def f(x: A | int) -> int: def f(x): x :: union[__main__.A, int] r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4 :: str @@ -1687,7 +1687,7 @@ L0: r0 = __main__.A :: type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L3 :: bool L1: r4 = 'a' diff --git a/mypyc/test-data/irbuild-optional.test b/mypyc/test-data/irbuild-optional.test index e98cf1b19e2e..e89018a727da 100644 --- a/mypyc/test-data/irbuild-optional.test +++ b/mypyc/test-data/irbuild-optional.test @@ -91,7 +91,7 @@ def f(x): r0 :: object r1 :: bit r2 :: __main__.A - r3 :: int32 + r3 :: i32 r4 :: bit r5 :: bool L0: @@ -102,7 +102,7 @@ L1: r2 = cast(__main__.A, x) r3 = PyObject_IsTrue(r2) r4 = r3 >= 0 :: signed - r5 = truncate r3: int32 to builtins.bool + r5 = truncate r3: i32 to builtins.bool if r5 goto L2 else goto L3 :: bool L2: return 2 @@ -252,7 +252,7 @@ def f(x: Union[int, A]) -> int: def f(x): x :: union[int, __main__.A] r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool r4, r5 :: int @@ -262,7 +262,7 @@ L0: r0 = load_address PyLong_Type r1 = PyObject_IsInstance(x, r0) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool if r3 goto L1 else goto L2 :: bool L1: r4 = unbox(int, x) @@ -337,7 +337,7 @@ L3: def set(o, s): o :: union[__main__.A, __main__.B] s, r0 :: str - r1 :: int32 + r1 :: i32 r2 :: bit L0: r0 = 'a' diff --git a/mypyc/test-data/irbuild-set.test b/mypyc/test-data/irbuild-set.test index b6c551124769..a56ebe3438fa 100644 --- a/mypyc/test-data/irbuild-set.test +++ b/mypyc/test-data/irbuild-set.test @@ -6,13 +6,13 @@ def f() -> Set[int]: def f(): r0 :: set r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit r4 :: object - r5 :: int32 + r5 :: i32 r6 :: bit r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit L0: r0 = PySet_New(0) @@ -90,7 +90,7 @@ def test1(): r14 :: object r15, x, r16 :: int r17 :: object - r18 :: int32 + r18 :: i32 r19 :: bit r20 :: short_int a :: set @@ -138,7 +138,7 @@ def test2(): r2, r3, r4 :: object r5, x, r6 :: int r7 :: object - r8 :: int32 + r8 :: i32 r9, r10 :: bit b :: set L0: @@ -179,7 +179,7 @@ def test3(): r15 :: object r16, x, r17 :: int r18 :: object - r19 :: int32 + r19 :: i32 r20, r21, r22 :: bit c :: set L0: @@ -225,7 +225,7 @@ def test4(): r2 :: bit r3 :: int r4 :: object - r5 :: int32 + r5 :: i32 r6 :: bit r7 :: short_int d :: set @@ -256,7 +256,7 @@ def test5(): r2 :: bit r3 :: int r4 :: object - r5 :: int32 + r5 :: i32 r6 :: bit r7 :: short_int e :: set @@ -331,18 +331,18 @@ def test(): r29 :: bit r30 :: int r31 :: object - r32 :: int32 + r32 :: i32 r33 :: bit r34 :: short_int r35, r36, r37 :: object r38, y, r39 :: int r40 :: object - r41 :: int32 + r41 :: i32 r42, r43 :: bit r44, r45, r46 :: object r47, x, r48 :: int r49 :: object - r50 :: int32 + r50 :: i32 r51, r52 :: bit a :: set L0: @@ -452,13 +452,13 @@ def f() -> int: def f(): r0 :: set r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit r4 :: object - r5 :: int32 + r5 :: i32 r6 :: bit r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: ptr r11 :: native_int @@ -489,14 +489,14 @@ def f() -> bool: def f(): r0 :: set r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit r4 :: object - r5 :: int32 + r5 :: i32 r6 :: bit x :: set r7 :: object - r8 :: int32 + r8 :: i32 r9 :: bit r10 :: bool L0: @@ -511,7 +511,7 @@ L0: r7 = object 5 r8 = PySet_Contains(x, r7) r9 = r8 >= 0 :: signed - r10 = truncate r8: int32 to builtins.bool + r10 = truncate r8: i32 to builtins.bool return r10 [case testSetRemove] @@ -542,7 +542,7 @@ def f() -> Set[int]: def f(): r0, x :: set r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit L0: r0 = PySet_New(0) @@ -562,7 +562,7 @@ def f() -> Set[int]: def f(): r0, x :: set r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit L0: r0 = PySet_New(0) @@ -581,7 +581,7 @@ def f() -> Set[int]: [out] def f(): r0, x :: set - r1 :: int32 + r1 :: i32 r2 :: bit L0: r0 = PySet_New(0) @@ -612,7 +612,7 @@ def update(s: Set[int], x: List[int]) -> None: def update(s, x): s :: set x :: list - r0 :: int32 + r0 :: i32 r1 :: bit L0: r0 = _PySet_Update(s, x) @@ -627,17 +627,17 @@ def f(x: Set[int], y: Set[int]) -> Set[int]: def f(x, y): x, y, r0 :: set r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit r4 :: object - r5 :: int32 + r5 :: i32 r6 :: bit - r7 :: int32 + r7 :: i32 r8 :: bit - r9 :: int32 + r9 :: i32 r10 :: bit r11 :: object - r12 :: int32 + r12 :: i32 r13 :: bit L0: r0 = PySet_New(0) @@ -672,14 +672,14 @@ def not_precomputed_nested_set(i: int) -> bool: def precomputed(i): i :: object r0 :: set - r1 :: int32 + r1 :: i32 r2 :: bit r3 :: bool L0: r0 = frozenset({(), (None, (27,)), 1, 2.0, 3, 4j, False, b'bar', 'daylily', 'foo'}) r1 = PySet_Contains(r0, i) r2 = r1 >= 0 :: signed - r3 = truncate r1: int32 to builtins.bool + r3 = truncate r1: i32 to builtins.bool return r3 def not_precomputed_non_final_name(i): i :: int @@ -689,10 +689,10 @@ def not_precomputed_non_final_name(i): r3 :: int r4 :: set r5 :: object - r6 :: int32 + r6 :: i32 r7 :: bit r8 :: object - r9 :: int32 + r9 :: i32 r10 :: bit r11 :: bool L0: @@ -707,23 +707,23 @@ L0: r8 = box(int, i) r9 = PySet_Contains(r4, r8) r10 = r9 >= 0 :: signed - r11 = truncate r9: int32 to builtins.bool + r11 = truncate r9: i32 to builtins.bool return r11 def not_precomputed_nested_set(i): i :: int r0 :: set r1 :: object - r2 :: int32 + r2 :: i32 r3 :: bit r4 :: object r5 :: set - r6 :: int32 + r6 :: i32 r7 :: bit r8 :: object - r9 :: int32 + r9 :: i32 r10 :: bit r11 :: object - r12 :: int32 + r12 :: i32 r13 :: bit r14 :: bool L0: @@ -741,7 +741,7 @@ L0: r11 = box(int, i) r12 = PySet_Contains(r5, r11) r13 = r12 >= 0 :: signed - r14 = truncate r12: int32 to builtins.bool + r14 = truncate r12: i32 to builtins.bool return r14 [case testForSetLiteral] @@ -809,7 +809,7 @@ def not_precomputed(): r3 :: int r4 :: set r5 :: object - r6 :: int32 + r6 :: i32 r7 :: bit r8, r9 :: object r10, not_optimized :: int diff --git a/mypyc/test-data/irbuild-singledispatch.test b/mypyc/test-data/irbuild-singledispatch.test index 4e18bbf50d4e..10970a385966 100644 --- a/mypyc/test-data/irbuild-singledispatch.test +++ b/mypyc/test-data/irbuild-singledispatch.test @@ -16,7 +16,7 @@ def f_obj.__init__(__mypyc_self__): __mypyc_self__ :: __main__.f_obj r0, r1 :: dict r2 :: str - r3 :: int32 + r3 :: i32 r4 :: bit L0: r0 = PyDict_New() @@ -39,7 +39,7 @@ def f_obj.__call__(__mypyc_self__, arg): r9 :: object r10 :: dict r11 :: object - r12 :: int32 + r12 :: i32 r13 :: bit r14 :: object r15 :: ptr @@ -148,7 +148,7 @@ def f_obj.__init__(__mypyc_self__): __mypyc_self__ :: __main__.f_obj r0, r1 :: dict r2 :: str - r3 :: int32 + r3 :: i32 r4 :: bit L0: r0 = PyDict_New() @@ -171,7 +171,7 @@ def f_obj.__call__(__mypyc_self__, x): r9 :: object r10 :: dict r11 :: object - r12 :: int32 + r12 :: i32 r13 :: bit r14 :: object r15 :: ptr diff --git a/mypyc/test-data/irbuild-statements.test b/mypyc/test-data/irbuild-statements.test index 090c7ed9f3df..062abd47d163 100644 --- a/mypyc/test-data/irbuild-statements.test +++ b/mypyc/test-data/irbuild-statements.test @@ -651,11 +651,11 @@ def f(l: List[int], t: Tuple[int, ...]) -> None: def f(l, t): l :: list t :: tuple - r0 :: int32 + r0 :: i32 r1 :: bit r2, r3, x :: object r4, y :: int - r5 :: int32 + r5 :: i32 r6 :: bit r7, r8 :: object r9 :: int @@ -701,13 +701,13 @@ L2: return 2 def literal_msg(x): x :: object - r0 :: int32 + r0 :: i32 r1 :: bit r2, r3 :: bool L0: r0 = PyObject_IsTrue(x) r1 = r0 >= 0 :: signed - r2 = truncate r0: int32 to builtins.bool + r2 = truncate r0: i32 to builtins.bool if r2 goto L2 else goto L1 :: bool L1: r3 = raise AssertionError('message') @@ -756,7 +756,7 @@ def delList(): r3, r4, r5 :: ptr l :: list r6 :: object - r7 :: int32 + r7 :: i32 r8 :: bit L0: r0 = PyList_New(2) @@ -779,13 +779,13 @@ def delListMultiple(): r8, r9, r10, r11, r12, r13, r14, r15 :: ptr l :: list r16 :: object - r17 :: int32 + r17 :: i32 r18 :: bit r19 :: object - r20 :: int32 + r20 :: i32 r21 :: bit r22 :: object - r23 :: int32 + r23 :: i32 r24 :: bit L0: r0 = PyList_New(7) @@ -837,7 +837,7 @@ def delDict(): r2, r3 :: object r4, d :: dict r5 :: str - r6 :: int32 + r6 :: i32 r7 :: bit L0: r0 = 'one' @@ -855,9 +855,9 @@ def delDictMultiple(): r4, r5, r6, r7 :: object r8, d :: dict r9, r10 :: str - r11 :: int32 + r11 :: i32 r12 :: bit - r13 :: int32 + r13 :: i32 r14 :: bit L0: r0 = 'one' @@ -901,7 +901,7 @@ L0: def delAttribute(): r0, dummy :: __main__.Dummy r1 :: str - r2 :: int32 + r2 :: i32 r3 :: bit L0: r0 = Dummy(2, 4) @@ -913,10 +913,10 @@ L0: def delAttributeMultiple(): r0, dummy :: __main__.Dummy r1 :: str - r2 :: int32 + r2 :: i32 r3 :: bit r4 :: str - r5 :: int32 + r5 :: i32 r6 :: bit L0: r0 = Dummy(2, 4) @@ -1029,7 +1029,7 @@ def f(a, b): r6, r7 :: object r8, x :: int r9, y :: bool - r10 :: int32 + r10 :: i32 r11 :: bit r12 :: bool r13 :: short_int @@ -1055,7 +1055,7 @@ L3: y = r9 r10 = PyObject_IsTrue(b) r11 = r10 >= 0 :: signed - r12 = truncate r10: int32 to builtins.bool + r12 = truncate r10: i32 to builtins.bool if r12 goto L4 else goto L5 :: bool L4: x = 2 diff --git a/mypyc/test-data/irbuild-str.test b/mypyc/test-data/irbuild-str.test index 63be7250ebd1..9851e0f4fb24 100644 --- a/mypyc/test-data/irbuild-str.test +++ b/mypyc/test-data/irbuild-str.test @@ -65,7 +65,7 @@ def neq(x: str, y: str) -> bool: [out] def eq(x, y): x, y :: str - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: object r3, r4, r5 :: bit @@ -84,7 +84,7 @@ L3: return r5 def neq(x, y): x, y :: str - r0 :: int32 + r0 :: i32 r1 :: bit r2 :: object r3, r4, r5 :: bit diff --git a/mypyc/test-data/irbuild-try.test b/mypyc/test-data/irbuild-try.test index faf3fa1dbd2f..a5b7b9a55b86 100644 --- a/mypyc/test-data/irbuild-try.test +++ b/mypyc/test-data/irbuild-try.test @@ -337,7 +337,7 @@ def foo(x): r11, r12 :: object r13, r14 :: tuple[object, object, object] r15, r16, r17, r18 :: object - r19 :: int32 + r19 :: i32 r20 :: bit r21 :: bool r22 :: bit @@ -372,7 +372,7 @@ L3: (handler for L2) r18 = PyObject_CallFunctionObjArgs(r3, r0, r15, r16, r17, 0) r19 = PyObject_IsTrue(r18) r20 = r19 >= 0 :: signed - r21 = truncate r19: int32 to builtins.bool + r21 = truncate r19: i32 to builtins.bool if r21 goto L5 else goto L4 :: bool L4: CPy_Reraise() @@ -448,7 +448,7 @@ def foo(x): r9, r10, r11 :: object r12 :: None r13 :: object - r14 :: int32 + r14 :: i32 r15 :: bit r16 :: bool r17 :: bit @@ -478,7 +478,7 @@ L3: (handler for L2) r13 = box(None, r12) r14 = PyObject_IsTrue(r13) r15 = r14 >= 0 :: signed - r16 = truncate r14: int32 to builtins.bool + r16 = truncate r14: i32 to builtins.bool if r16 goto L5 else goto L4 :: bool L4: CPy_Reraise() diff --git a/mypyc/test-data/irbuild-tuple.test b/mypyc/test-data/irbuild-tuple.test index 6a86a6c6781b..a47f3db6a725 100644 --- a/mypyc/test-data/irbuild-tuple.test +++ b/mypyc/test-data/irbuild-tuple.test @@ -103,7 +103,7 @@ def f(x, y): r1, r2 :: object r3, r4, r5 :: ptr r6, r7, r8 :: object - r9 :: int32 + r9 :: i32 r10 :: bit r11 :: tuple L0: diff --git a/mypyc/test-data/irbuild-u8.test b/mypyc/test-data/irbuild-u8.test new file mode 100644 index 000000000000..14f691c9451f --- /dev/null +++ b/mypyc/test-data/irbuild-u8.test @@ -0,0 +1,543 @@ +# Test cases for u8 native ints. Focus on things that are different from i64; no need to +# duplicate all i64 test cases here. + +[case testU8BinaryOp] +from mypy_extensions import u8 + +def add_op(x: u8, y: u8) -> u8: + x = y + x + y = x + 5 + y += x + y += 7 + x = 5 + y + return x +def compare(x: u8, y: u8) -> None: + a = x == y + b = x == 5 + c = x < y + d = x < 5 + e = 5 == x + f = 5 < x +[out] +def add_op(x, y): + x, y, r0, r1, r2, r3, r4 :: u8 +L0: + r0 = y + x + x = r0 + r1 = x + 5 + y = r1 + r2 = y + x + y = r2 + r3 = y + 7 + y = r3 + r4 = 5 + y + x = r4 + return x +def compare(x, y): + x, y :: u8 + r0 :: bit + a :: bool + r1 :: bit + b :: bool + r2 :: bit + c :: bool + r3 :: bit + d :: bool + r4 :: bit + e :: bool + r5 :: bit + f :: bool +L0: + r0 = x == y + a = r0 + r1 = x == 5 + b = r1 + r2 = x < y :: unsigned + c = r2 + r3 = x < 5 :: unsigned + d = r3 + r4 = 5 == x + e = r4 + r5 = 5 < x :: unsigned + f = r5 + return 1 + +[case testU8UnaryOp] +from mypy_extensions import u8 + +def unary(x: u8) -> u8: + y = -x + x = ~y + y = +x + return y +[out] +def unary(x): + x, r0, y, r1 :: u8 +L0: + r0 = 0 - x + y = r0 + r1 = y ^ 255 + x = r1 + y = x + return y + +[case testU8DivisionByConstant] +from mypy_extensions import u8 + +def div_by_constant(x: u8) -> u8: + x = x // 5 + x //= 17 + return x +[out] +def div_by_constant(x): + x, r0, r1 :: u8 +L0: + r0 = x / 5 + x = r0 + r1 = x / 17 + x = r1 + return x + +[case testU8ModByConstant] +from mypy_extensions import u8 + +def mod_by_constant(x: u8) -> u8: + x = x % 5 + x %= 17 + return x +[out] +def mod_by_constant(x): + x, r0, r1 :: u8 +L0: + r0 = x % 5 + x = r0 + r1 = x % 17 + x = r1 + return x + +[case testU8DivModByVariable] +from mypy_extensions import u8 + +def divmod(x: u8, y: u8) -> u8: + a = x // y + return a % y +[out] +def divmod(x, y): + x, y :: u8 + r0 :: bit + r1 :: bool + r2, a :: u8 + r3 :: bit + r4 :: bool + r5 :: u8 +L0: + r0 = y == 0 + if r0 goto L1 else goto L2 :: bool +L1: + r1 = raise ZeroDivisionError('integer division or modulo by zero') + unreachable +L2: + r2 = x / y + a = r2 + r3 = y == 0 + if r3 goto L3 else goto L4 :: bool +L3: + r4 = raise ZeroDivisionError('integer division or modulo by zero') + unreachable +L4: + r5 = a % y + return r5 + +[case testU8BinaryOperationWithOutOfRangeOperand] +from mypy_extensions import u8 + +def out_of_range(x: u8) -> None: + x + (-1) + (-2) + x + x * 256 + -1 < x + x > -5 + x == 1000 + x + 255 # OK + 255 + x # OK +[out] +main:4: error: Value -1 is out of range for "u8" +main:5: error: Value -2 is out of range for "u8" +main:6: error: Value 256 is out of range for "u8" +main:7: error: Value -1 is out of range for "u8" +main:8: error: Value -5 is out of range for "u8" +main:9: error: Value 1000 is out of range for "u8" + +[case testU8DetectMoreOutOfRangeLiterals] +from mypy_extensions import u8 + +def out_of_range() -> None: + a: u8 = 256 + b: u8 = -1 + f(256) + # The following are ok + c: u8 = 0 + d: u8 = 255 + f(0) + f(255) + +def f(x: u8) -> None: pass +[out] +main:4: error: Value 256 is out of range for "u8" +main:5: error: Value -1 is out of range for "u8" +main:6: error: Value 256 is out of range for "u8" + +[case testU8BoxAndUnbox] +from typing import Any +from mypy_extensions import u8 + +def f(x: Any) -> Any: + y: u8 = x + return y +[out] +def f(x): + x :: object + r0, y :: u8 + r1 :: object +L0: + r0 = unbox(u8, x) + y = r0 + r1 = box(u8, y) + return r1 + +[case testU8MixedCompare1] +from mypy_extensions import u8 +def f(x: int, y: u8) -> bool: + return x == y +[out] +def f(x, y): + x :: int + y :: u8 + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6 :: u8 + r7 :: bit +L0: + r0 = x & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = x < 512 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = x >= 0 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = x >> 1 + r5 = truncate r4: native_int to u8 + r6 = r5 + goto L5 +L4: + CPyUInt8_Overflow() + unreachable +L5: + r7 = r6 == y + return r7 + +[case testU8MixedCompare2] +from mypy_extensions import u8 +def f(x: u8, y: int) -> bool: + return x == y +[out] +def f(x, y): + x :: u8 + y :: int + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6 :: u8 + r7 :: bit +L0: + r0 = y & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = y < 512 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = y >= 0 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = y >> 1 + r5 = truncate r4: native_int to u8 + r6 = r5 + goto L5 +L4: + CPyUInt8_Overflow() + unreachable +L5: + r7 = x == r6 + return r7 + +[case testU8ConvertToInt] +from mypy_extensions import u8 + +def u8_to_int(a: u8) -> int: + return a +[out] +def u8_to_int(a): + a :: u8 + r0 :: native_int + r1 :: int +L0: + r0 = extend a: u8 to native_int + r1 = r0 << 1 + return r1 + +[case testU8OperatorAssignmentMixed] +from mypy_extensions import u8 + +def f(a: u8) -> None: + x = 0 + x += a +[out] +def f(a): + a :: u8 + x :: int + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6, r7 :: u8 + r8 :: native_int + r9 :: int +L0: + x = 0 + r0 = x & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = x < 512 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = x >= 0 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = x >> 1 + r5 = truncate r4: native_int to u8 + r6 = r5 + goto L5 +L4: + CPyUInt8_Overflow() + unreachable +L5: + r7 = r6 + a + r8 = extend r7: u8 to native_int + r9 = r8 << 1 + x = r9 + return 1 + +[case testU8InitializeFromLiteral] +from mypy_extensions import u8, i64 + +def f() -> None: + x: u8 = 0 + y: u8 = 255 + z: u8 = 5 + 7 +[out] +def f(): + x, y, z :: u8 +L0: + x = 0 + y = 255 + z = 12 + return 1 + +[case testU8ExplicitConversionFromNativeInt] +from mypy_extensions import i64, i32, i16, u8 + +def from_u8(x: u8) -> u8: + return u8(x) + +def from_i16(x: i16) -> u8: + return u8(x) + +def from_i32(x: i32) -> u8: + return u8(x) + +def from_i64(x: i64) -> u8: + return u8(x) +[out] +def from_u8(x): + x :: u8 +L0: + return x +def from_i16(x): + x :: i16 + r0 :: u8 +L0: + r0 = truncate x: i16 to u8 + return r0 +def from_i32(x): + x :: i32 + r0 :: u8 +L0: + r0 = truncate x: i32 to u8 + return r0 +def from_i64(x): + x :: i64 + r0 :: u8 +L0: + r0 = truncate x: i64 to u8 + return r0 + +[case testU8ExplicitConversionToNativeInt] +from mypy_extensions import i64, i32, i16, u8 + +def to_i16(x: u8) -> i16: + return i16(x) + +def to_i32(x: u8) -> i32: + return i32(x) + +def to_i64(x: u8) -> i64: + return i64(x) +[out] +def to_i16(x): + x :: u8 + r0 :: i16 +L0: + r0 = extend x: u8 to i16 + return r0 +def to_i32(x): + x :: u8 + r0 :: i32 +L0: + r0 = extend x: u8 to i32 + return r0 +def to_i64(x): + x :: u8 + r0 :: i64 +L0: + r0 = extend x: u8 to i64 + return r0 + +[case testU8ExplicitConversionFromInt] +from mypy_extensions import u8 + +def f(x: int) -> u8: + return u8(x) +[out] +def f(x): + x :: int + r0 :: native_int + r1, r2, r3 :: bit + r4 :: native_int + r5, r6 :: u8 +L0: + r0 = x & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L4 :: bool +L1: + r2 = x < 512 :: signed + if r2 goto L2 else goto L4 :: bool +L2: + r3 = x >= 0 :: signed + if r3 goto L3 else goto L4 :: bool +L3: + r4 = x >> 1 + r5 = truncate r4: native_int to u8 + r6 = r5 + goto L5 +L4: + CPyUInt8_Overflow() + unreachable +L5: + return r6 + +[case testU8ExplicitConversionFromLiteral] +from mypy_extensions import u8 + +def f() -> None: + x = u8(0) + y = u8(11) + z = u8(-3) # Truncate + zz = u8(258) # Truncate + a = u8(255) +[out] +def f(): + x, y, z, zz, a :: u8 +L0: + x = 0 + y = 11 + z = 253 + zz = 2 + a = 255 + return 1 + +[case testU8ExplicitConversionFromVariousTypes] +from mypy_extensions import u8 + +def bool_to_u8(b: bool) -> u8: + return u8(b) + +def str_to_u8(s: str) -> u8: + return u8(s) + +class C: + def __int__(self) -> u8: + return 5 + +def instance_to_u8(c: C) -> u8: + return u8(c) + +def float_to_u8(x: float) -> u8: + return u8(x) +[out] +def bool_to_u8(b): + b :: bool + r0 :: u8 +L0: + r0 = extend b: builtins.bool to u8 + return r0 +def str_to_u8(s): + s :: str + r0 :: object + r1 :: u8 +L0: + r0 = CPyLong_FromStr(s) + r1 = unbox(u8, r0) + return r1 +def C.__int__(self): + self :: __main__.C +L0: + return 5 +def instance_to_u8(c): + c :: __main__.C + r0 :: u8 +L0: + r0 = c.__int__() + return r0 +def float_to_u8(x): + x :: float + r0 :: int + r1 :: native_int + r2, r3, r4 :: bit + r5 :: native_int + r6, r7 :: u8 +L0: + r0 = CPyTagged_FromFloat(x) + r1 = r0 & 1 + r2 = r1 == 0 + if r2 goto L1 else goto L4 :: bool +L1: + r3 = r0 < 512 :: signed + if r3 goto L2 else goto L4 :: bool +L2: + r4 = r0 >= 0 :: signed + if r4 goto L3 else goto L4 :: bool +L3: + r5 = r0 >> 1 + r6 = truncate r5: native_int to u8 + r7 = r6 + goto L5 +L4: + CPyUInt8_Overflow() + unreachable +L5: + return r7 diff --git a/mypyc/test-data/irbuild-unreachable.test b/mypyc/test-data/irbuild-unreachable.test index 2c164491a5a1..1c024a249bf1 100644 --- a/mypyc/test-data/irbuild-unreachable.test +++ b/mypyc/test-data/irbuild-unreachable.test @@ -11,7 +11,7 @@ def f(): r1 :: str r2 :: object r3, r4 :: str - r5 :: int32 + r5 :: i32 r6 :: bit r7 :: object r8, r9, r10 :: bit @@ -68,7 +68,7 @@ def f(): r1 :: str r2 :: object r3, r4 :: str - r5 :: int32 + r5 :: i32 r6 :: bit r7 :: object r8, r9, r10 :: bit diff --git a/mypyc/test-data/refcount.test b/mypyc/test-data/refcount.test index 372956a00cab..3db4caa39566 100644 --- a/mypyc/test-data/refcount.test +++ b/mypyc/test-data/refcount.test @@ -702,7 +702,7 @@ def f(a, x): a :: list x :: int r0 :: object - r1 :: int32 + r1 :: i32 r2 :: bit L0: inc_ref x :: int @@ -1504,10 +1504,10 @@ def f(x): x, r0 :: int r1 :: native_int r2 :: bit - r3, r4 :: int64 + r3, r4 :: i64 r5 :: ptr r6 :: c_ptr - r7 :: int64 + r7 :: i64 L0: r0 = CPyTagged_Add(x, 2) r1 = r0 & 1 diff --git a/mypyc/test-data/run-classes.test b/mypyc/test-data/run-classes.test index 268e07f6bde4..59617714f7e7 100644 --- a/mypyc/test-data/run-classes.test +++ b/mypyc/test-data/run-classes.test @@ -1370,9 +1370,8 @@ except TypeError as e: [case testMetaclass] from meta import Meta -import six -class Nothing1(metaclass=Meta): +class Nothing(metaclass=Meta): pass def ident(x): return x @@ -1381,15 +1380,7 @@ def ident(x): return x class Test: pass -class Nothing2(six.with_metaclass(Meta, Test)): - pass - -@six.add_metaclass(Meta) -class Nothing3: - pass - [file meta.py] -from typing import Any class Meta(type): def __new__(mcs, name, bases, dct): dct['X'] = 10 @@ -1397,10 +1388,8 @@ class Meta(type): [file driver.py] -from native import Nothing1, Nothing2, Nothing3 -assert Nothing1.X == 10 -assert Nothing2.X == 10 -assert Nothing3.X == 10 +from native import Nothing +assert Nothing.X == 10 [case testPickling] from mypy_extensions import trait, mypyc_attr diff --git a/mypyc/test-data/run-i16.test b/mypyc/test-data/run-i16.test new file mode 100644 index 000000000000..fbb0c15220bc --- /dev/null +++ b/mypyc/test-data/run-i16.test @@ -0,0 +1,338 @@ +[case testI16BasicOps] +from typing import Any, Tuple + +from mypy_extensions import i16, i32, i64 + +from testutil import assertRaises + +def test_box_and_unbox() -> None: + values = (list(range(-2**15, -2**15 + 100)) + + list(range(-1000, 1000)) + + list(range(2**15 - 100, 2**15))) + for i in values: + o: Any = i + x: i16 = o + o2: Any = x + assert o == o2 + assert x == i + with assertRaises(OverflowError, "int too large to convert to i16"): + o = 2**15 + x2: i16 = o + with assertRaises(OverflowError, "int too large to convert to i16"): + o = -2**15 - 1 + x3: i16 = o + +def div_by_7(x: i16) -> i16: + return x // 7 +def div_by_neg_7(x: i16) -> i16: + return x // -7 + +def div(x: i16, y: i16) -> i16: + return x // y + +def test_divide_by_constant() -> None: + for i in range(-1000, 1000): + assert div_by_7(i) == i // 7 + for i in range(-2**15, -2**15 + 1000): + assert div_by_7(i) == i // 7 + for i in range(2**15 - 1000, 2**15): + assert div_by_7(i) == i // 7 + +def test_divide_by_negative_constant() -> None: + for i in range(-1000, 1000): + assert div_by_neg_7(i) == i // -7 + for i in range(-2**15, -2**15 + 1000): + assert div_by_neg_7(i) == i // -7 + for i in range(2**15 - 1000, 2**15): + assert div_by_neg_7(i) == i // -7 + +def test_divide_by_variable() -> None: + values = (list(range(-50, 50)) + + list(range(-2**15, -2**15 + 10)) + + list(range(2**15 - 10, 2**15))) + for x in values: + for y in values: + if y != 0: + if x // y == 2**15: + with assertRaises(OverflowError, "integer division overflow"): + div(x, y) + else: + assert div(x, y) == x // y + else: + with assertRaises(ZeroDivisionError, "integer division or modulo by zero"): + div(x, y) + +def mod_by_7(x: i16) -> i16: + return x % 7 + +def mod_by_neg_7(x: i16) -> i16: + return x // -7 + +def mod(x: i16, y: i16) -> i16: + return x % y + +def test_mod_by_constant() -> None: + for i in range(-1000, 1000): + assert mod_by_7(i) == i % 7 + for i in range(-2**15, -2**15 + 1000): + assert mod_by_7(i) == i % 7 + for i in range(2**15 - 1000, 2**15): + assert mod_by_7(i) == i % 7 + +def test_mod_by_negative_constant() -> None: + for i in range(-1000, 1000): + assert mod_by_neg_7(i) == i // -7 + for i in range(-2**15, -2**15 + 1000): + assert mod_by_neg_7(i) == i // -7 + for i in range(2**15 - 1000, 2**15): + assert mod_by_neg_7(i) == i // -7 + +def test_mod_by_variable() -> None: + values = (list(range(-50, 50)) + + list(range(-2**15, -2**15 + 10)) + + list(range(2**15 - 10, 2**15))) + for x in values: + for y in values: + if y != 0: + assert mod(x, y) == x % y + else: + with assertRaises(ZeroDivisionError, "integer division or modulo by zero"): + mod(x, y) + +def test_simple_arithmetic_ops() -> None: + zero: i16 = int() + one: i16 = zero + 1 + two: i16 = one + 1 + neg_one: i16 = -one + assert one + one == 2 + assert one + two == 3 + assert one + neg_one == 0 + assert one - one == 0 + assert one - two == -1 + assert one * one == 1 + assert one * two == 2 + assert two * two == 4 + assert two * neg_one == -2 + assert neg_one * one == -1 + assert neg_one * neg_one == 1 + assert two * 0 == 0 + assert 0 * two == 0 + assert -one == -1 + assert -two == -2 + assert -neg_one == 1 + assert -zero == 0 + +def test_bitwise_ops() -> None: + x: i16 = 13855 + int() + y: i16 = 367 + int() + z: i16 = -11091 + int() + zero: i16 = int() + one: i16 = zero + 1 + two: i16 = zero + 2 + neg_one: i16 = -one + + assert x & y == 15 + assert x & z == 5133 + assert z & z == z + assert x & zero == 0 + + assert x | y == 14207 + assert x | z == -2369 + assert z | z == z + assert x | 0 == x + + assert x ^ y == 14192 + assert x ^ z == -7502 + assert z ^ z == 0 + assert z ^ 0 == z + + assert x << one == 27710 + assert x << two == -10116 + assert z << two == 21172 + assert z << 0 == z + + assert x >> one == 6927 + assert x >> two == 3463 + assert z >> two == -2773 + assert z >> 0 == z + + assert ~x == -13856 + assert ~z == 11090 + assert ~zero == -1 + assert ~neg_one == 0 + +def eq(x: i16, y: i16) -> bool: + return x == y + +def test_eq() -> None: + assert eq(int(), int()) + assert eq(5 + int(), 5 + int()) + assert eq(-5 + int(), -5 + int()) + assert not eq(int(), 1 + int()) + assert not eq(5 + int(), 6 + int()) + assert not eq(-5 + int(), -6 + int()) + assert not eq(-5 + int(), 5 + int()) + +def test_comparisons() -> None: + one: i16 = 1 + int() + one2: i16 = 1 + int() + two: i16 = 2 + int() + assert one < two + assert not (one < one2) + assert not (two < one) + assert two > one + assert not (one > one2) + assert not (one > two) + assert one <= two + assert one <= one2 + assert not (two <= one) + assert two >= one + assert one >= one2 + assert not (one >= two) + assert one == one2 + assert not (one == two) + assert one != two + assert not (one != one2) + +def test_mixed_comparisons() -> None: + i16_3: i16 = int() + 3 + int_5 = int() + 5 + assert i16_3 < int_5 + assert int_5 > i16_3 + b = i16_3 > int_5 + assert not b + + int_largest = int() + (1 << 15) - 1 + assert int_largest > i16_3 + int_smallest = int() - (1 << 15) + assert i16_3 > int_smallest + + int_too_big = int() + (1 << 15) + int_too_small = int() - (1 << 15) - 1 + with assertRaises(OverflowError): + assert i16_3 < int_too_big + with assertRaises(OverflowError): + assert int_too_big < i16_3 + with assertRaises(OverflowError): + assert i16_3 > int_too_small + with assertRaises(OverflowError): + assert int_too_small < i16_3 + +def test_mixed_arithmetic_and_bitwise_ops() -> None: + i16_3: i16 = int() + 3 + int_5 = int() + 5 + assert i16_3 + int_5 == 8 + assert int_5 - i16_3 == 2 + assert i16_3 << int_5 == 96 + assert int_5 << i16_3 == 40 + assert i16_3 ^ int_5 == 6 + assert int_5 | i16_3 == 7 + + int_largest = int() + (1 << 15) - 1 + assert int_largest - i16_3 == 32764 + int_smallest = int() - (1 << 15) + assert int_smallest + i16_3 == -32765 + + int_too_big = int() + (1 << 15) + int_too_small = int() - (1 << 15) - 1 + with assertRaises(OverflowError): + assert i16_3 & int_too_big + with assertRaises(OverflowError): + assert int_too_small & i16_3 + +def test_coerce_to_and_from_int() -> None: + for shift in range(0, 16): + for sign in 1, -1: + for delta in range(-5, 5): + n = sign * (1 << shift) + delta + if -(1 << 15) <= n < (1 << 15): + x: i16 = n + m: int = x + assert m == n + +def test_explicit_conversion_to_i16() -> None: + x = i16(5) + assert x == 5 + y = int() - 113 + x = i16(y) + assert x == -113 + n64: i64 = 1733 + x = i16(n64) + assert x == 1733 + n32: i32 = -1733 + x = i16(n32) + assert x == -1733 + z = i16(x) + assert z == -1733 + +def test_explicit_conversion_overflow() -> None: + max_i16 = int() + 2**15 - 1 + x = i16(max_i16) + assert x == 2**15 - 1 + assert int(x) == max_i16 + + min_i16 = int() - 2**15 + y = i16(min_i16) + assert y == -2**15 + assert int(y) == min_i16 + + too_big = int() + 2**15 + with assertRaises(OverflowError): + x = i16(too_big) + + too_small = int() - 2**15 - 1 + with assertRaises(OverflowError): + x = i16(too_small) + +def test_i16_from_large_small_literal() -> None: + x = i16(2**15 - 1) + assert x == 2**15 - 1 + x = i16(-2**15) + assert x == -2**15 + +def test_i16_truncate_from_i64() -> None: + large = i64(2**32 + 65536 + 157 + int()) + x = i16(large) + assert x == 157 + small = i64(-2**32 - 65536 - 157 + int()) + x = i16(small) + assert x == -157 + large2 = i64(2**15 + int()) + x = i16(large2) + assert x == -2**15 + small2 = i64(-2**15 - 1 - int()) + x = i16(small2) + assert x == 2**15 - 1 + +def test_i16_truncate_from_i32() -> None: + large = i32(2**16 + 2**30 + 5 + int()) + assert i16(large) == 5 + small = i32(-2**16 - 2**30 - 1 + int()) + assert i16(small) == -1 + +def from_float(x: float) -> i16: + return i16(x) + +def test_explicit_conversion_from_float() -> None: + assert from_float(0.0) == 0 + assert from_float(1.456) == 1 + assert from_float(-1234.567) == -1234 + assert from_float(2**15 - 1) == 2**15 - 1 + assert from_float(-2**15) == -2**15 + # The error message could be better, but this is acceptable + with assertRaises(OverflowError, "int too large to convert to i16"): + assert from_float(float(2**15)) + with assertRaises(OverflowError, "int too large to convert to i16"): + # One ulp below the lowest valid i64 value + from_float(float(-2**15 - 1)) + +def test_tuple_i16() -> None: + a: i16 = 1 + b: i16 = 2 + t = (a, b) + a, b = t + assert a == 1 + assert b == 2 + x: Any = t + tt: Tuple[i16, i16] = x + assert tt == (1, 2) diff --git a/mypyc/test-data/run-i32.test b/mypyc/test-data/run-i32.test index af99fb79d35e..bb1fa43bb9fd 100644 --- a/mypyc/test-data/run-i32.test +++ b/mypyc/test-data/run-i32.test @@ -1,7 +1,7 @@ [case testI32BasicOps] from typing import Any, Tuple -from mypy_extensions import i32, i64 +from mypy_extensions import i16, i32, i64 from testutil import assertRaises @@ -259,11 +259,15 @@ def test_explicit_conversion_to_i32() -> None: n64: i64 = 1733 x = i32(n64) assert x == 1733 - n32 = -1733 + n32: i32 = -1733 x = i32(n32) assert x == -1733 z = i32(x) assert z == -1733 + a: i16 = int() + 19764 + assert i32(a) == 19764 + a = int() - 1 + assert i32(a) == -1 def test_explicit_conversion_overflow() -> None: max_i32 = int() + 2**31 - 1 diff --git a/mypyc/test-data/run-i64.test b/mypyc/test-data/run-i64.test index bcde39fed5ff..1a82ac3e2dd1 100644 --- a/mypyc/test-data/run-i64.test +++ b/mypyc/test-data/run-i64.test @@ -1,7 +1,7 @@ [case testI64BasicOps] from typing import List, Any, Tuple, Union -from mypy_extensions import i64, i32 +from mypy_extensions import i64, i32, i16 from testutil import assertRaises @@ -282,6 +282,10 @@ def test_explicit_conversion_to_i64() -> None: assert x == -1733 z = i64(x) assert z == -1733 + a: i16 = int() + 19764 + assert i64(a) == 19764 + a = int() - 1 + assert i64(a) == -1 def test_explicit_conversion_overflow() -> None: max_i64 = int() + 2**63 - 1 @@ -1500,3 +1504,16 @@ def test_implement_trait_attribute() -> None: a.y = 8 assert a.x == 7 assert a.y == 8 + +class DunderErr: + def __contains__(self, i: i64) -> bool: + raise IndexError() + +def test_dunder_arg_check() -> None: + o: Any = DunderErr() + with assertRaises(TypeError): + 'x' in o + with assertRaises(TypeError): + 2**63 in o + with assertRaises(IndexError): + 1 in o diff --git a/mypyc/test-data/run-misc.test b/mypyc/test-data/run-misc.test index fd0eb5022236..c40e0fc55f0e 100644 --- a/mypyc/test-data/run-misc.test +++ b/mypyc/test-data/run-misc.test @@ -1108,25 +1108,14 @@ assert not C # make the initial import fail assert False -class C: - def __init__(self): - self.x = 1 - self.y = 2 -def test() -> None: - a = C() [file driver.py] # load native, cause PyInit to be run, create the module but don't finish initializing the globals -try: - import native -except: - pass -try: - # try accessing those globals that were never properly initialized - import native - native.test() -# should fail with AssertionError due to `assert False` in other function -except AssertionError: - pass +for _ in range(2): + try: + import native + raise RuntimeError('exception expected') + except AssertionError: + pass [case testRepeatedUnderscoreFunctions] def _(arg): pass diff --git a/mypyc/test-data/run-tuples.test b/mypyc/test-data/run-tuples.test index f6c92b9c720f..0851c15e57fd 100644 --- a/mypyc/test-data/run-tuples.test +++ b/mypyc/test-data/run-tuples.test @@ -98,6 +98,7 @@ assert f(Sub(3, 2)) == 3 -- Ref: https://github.com/mypyc/mypyc/issues/924 [case testNamedTupleClassSyntax] from typing import Dict, List, NamedTuple, Optional, Tuple, Union +from typing_extensions import final class FuncIR: pass @@ -121,6 +122,11 @@ class Record(NamedTuple): # Ref: https://github.com/mypyc/mypyc/issues/938 class ClassIR: pass +# Ref: https://github.com/mypyc/mypyc/issues/927 +@final +class Inextensible(NamedTuple): + x: int + [file driver.py] from typing import ForwardRef, Optional from native import ClassIR, FuncIR, Record diff --git a/mypyc/test-data/run-u8.test b/mypyc/test-data/run-u8.test new file mode 100644 index 000000000000..cddb031e3352 --- /dev/null +++ b/mypyc/test-data/run-u8.test @@ -0,0 +1,303 @@ +[case testU8BasicOps] +from typing import Any, Tuple + +from mypy_extensions import u8, i16, i32, i64 +from typing_extensions import Final + +from testutil import assertRaises + +ERROR: Final = 239 + +def test_box_and_unbox() -> None: + for i in range(0, 256): + o: Any = i + x: u8 = o + o2: Any = x + assert o == o2 + assert x == i + with assertRaises(OverflowError, "int too large or small to convert to u8"): + o = 256 + x2: u8 = o + with assertRaises(OverflowError, "int too large or small to convert to u8"): + o = -1 + x3: u8 = o + +def div_by_7(x: u8) -> u8: + return x // 7 + +def div(x: u8, y: u8) -> u8: + return x // y + +def test_divide_by_constant() -> None: + for i in range(0, 256): + assert div_by_7(i) == i // 7 + +def test_divide_by_variable() -> None: + for x in range(0, 256): + for y in range(0, 256): + if y != 0: + assert div(x, y) == x // y + else: + with assertRaises(ZeroDivisionError, "integer division or modulo by zero"): + div(x, y) + +def mod_by_7(x: u8) -> u8: + return x % 7 + +def mod(x: u8, y: u8) -> u8: + return x % y + +def test_mod_by_constant() -> None: + for i in range(0, 256): + assert mod_by_7(i) == i % 7 + +def test_mod_by_variable() -> None: + for x in range(0, 256): + for y in range(0, 256): + if y != 0: + assert mod(x, y) == x % y + else: + with assertRaises(ZeroDivisionError, "integer division or modulo by zero"): + mod(x, y) + +def test_simple_arithmetic_ops() -> None: + zero: u8 = int() + one: u8 = zero + 1 + two: u8 = one + 1 + neg_one: u8 = -one + assert neg_one == 255 + assert one + one == 2 + assert one + two == 3 + assert one + neg_one == 0 + assert one - one == 0 + assert one - two == 255 + assert one * one == 1 + assert one * two == 2 + assert two * two == 4 + assert two * neg_one == 254 + assert neg_one * one == 255 + assert neg_one * neg_one == 1 + assert two * 0 == 0 + assert 0 * two == 0 + assert -one == 255 + assert -two == 254 + assert -neg_one == 1 + assert -zero == 0 + +def test_bitwise_ops() -> None: + x: u8 = 184 + int() + y: u8 = 79 + int() + z: u8 = 113 + int() + zero: u8 = int() + one: u8 = zero + 1 + two: u8 = zero + 2 + neg_one: u8 = -one + + assert x & y == 8 + assert x & z == 48 + assert z & z == z + assert x & zero == 0 + + assert x | y == 255 + assert x | z == 249 + assert z | z == z + assert x | 0 == x + + assert x ^ y == 247 + assert x ^ z == 201 + assert z ^ z == 0 + assert z ^ 0 == z + + assert x << one == 112 + assert x << two == 224 + assert z << two == 196 + assert z << 0 == z + + assert x >> one == 92 + assert x >> two == 46 + assert z >> two == 28 + assert z >> 0 == z + + for i in range(256): + t: u8 = i + assert ~t == (~(i + int()) & 0xff) + +def eq(x: u8, y: u8) -> bool: + return x == y + +def test_eq() -> None: + assert eq(int(), int()) + assert eq(5 + int(), 5 + int()) + assert not eq(int(), 1 + int()) + assert not eq(5 + int(), 6 + int()) + +def test_comparisons() -> None: + one: u8 = 1 + int() + one2: u8 = 1 + int() + two: u8 = 2 + int() + assert one < two + assert not (one < one2) + assert not (two < one) + assert two > one + assert not (one > one2) + assert not (one > two) + assert one <= two + assert one <= one2 + assert not (two <= one) + assert two >= one + assert one >= one2 + assert not (one >= two) + assert one == one2 + assert not (one == two) + assert one != two + assert not (one != one2) + +def test_mixed_comparisons() -> None: + u8_3: u8 = int() + 3 + int_5 = int() + 5 + assert u8_3 < int_5 + assert int_5 > u8_3 + b = u8_3 > int_5 + assert not b + + int_largest = int() + 255 + assert int_largest > u8_3 + int_smallest = int() + assert u8_3 > int_smallest + + int_too_big = int() + 256 + int_too_small = int() -1 + with assertRaises(OverflowError): + assert u8_3 < int_too_big + with assertRaises(OverflowError): + assert int_too_big < u8_3 + with assertRaises(OverflowError): + assert u8_3 > int_too_small + with assertRaises(OverflowError): + assert int_too_small < u8_3 + +def test_mixed_arithmetic_and_bitwise_ops() -> None: + u8_3: u8 = int() + 3 + int_5 = int() + 5 + assert u8_3 + int_5 == 8 + assert int_5 - u8_3 == 2 + assert u8_3 << int_5 == 96 + assert int_5 << u8_3 == 40 + assert u8_3 ^ int_5 == 6 + assert int_5 | u8_3 == 7 + + int_largest = int() + 255 + assert int_largest - u8_3 == 252 + int_smallest = int() + assert int_smallest + u8_3 == 3 + + int_too_big = int() + 256 + int_too_small = int() - 1 + with assertRaises(OverflowError): + assert u8_3 & int_too_big + with assertRaises(OverflowError): + assert int_too_small & u8_3 + +def test_coerce_to_and_from_int() -> None: + for n in range(0, 256): + x: u8 = n + m: int = x + assert m == n + +def test_explicit_conversion_to_u8() -> None: + x = u8(5) + assert x == 5 + y = int() + ERROR + x = u8(y) + assert x == ERROR + n64: i64 = 233 + x = u8(n64) + assert x == 233 + n32: i32 = 234 + x = u8(n32) + assert x == 234 + z = u8(x) + assert z == 234 + n16: i16 = 231 + x = u8(n16) + assert x == 231 + +def test_explicit_conversion_overflow() -> None: + max_u8 = int() + 255 + x = u8(max_u8) + assert x == 255 + assert int(x) == max_u8 + + min_u8 = int() + y = u8(min_u8) + assert y == 0 + assert int(y) == min_u8 + + too_big = int() + 256 + with assertRaises(OverflowError): + x = u8(too_big) + + too_small = int() - 1 + with assertRaises(OverflowError): + x = u8(too_small) + +def test_u8_from_large_small_literal() -> None: + x = u8(255) # XXX u8(2**15 - 1) + assert x == 255 + x = u8(0) + assert x == 0 + +def test_u8_truncate_from_i64() -> None: + large = i64(2**32 + 256 + 157 + int()) + x = u8(large) + assert x == 157 + small = i64(-2**32 - 256 - 157 + int()) + x = u8(small) + assert x == 256 - 157 + large2 = i64(2**8 + int()) + x = u8(large2) + assert x == 0 + small2 = i64(-2**8 - 1 - int()) + x = u8(small2) + assert x == 255 + +def test_u8_truncate_from_i32() -> None: + large = i32(2**16 + 2**8 + 5 + int()) + assert u8(large) == 5 + small = i32(-2**16 - 2**8 - 1 + int()) + assert u8(small) == 255 + +def from_float(x: float) -> u8: + return u8(x) + +def test_explicit_conversion_from_float() -> None: + assert from_float(0.0) == 0 + assert from_float(1.456) == 1 + assert from_float(234.567) == 234 + assert from_float(255) == 255 + assert from_float(0) == 0 + assert from_float(-0.999) == 0 + # The error message could be better, but this is acceptable + with assertRaises(OverflowError, "int too large or small to convert to u8"): + assert from_float(float(256)) + with assertRaises(OverflowError, "int too large or small to convert to u8"): + # One ulp below the lowest valid i64 value + from_float(float(-1.0)) + +def test_tuple_u8() -> None: + a: u8 = 1 + b: u8 = 2 + t = (a, b) + a, b = t + assert a == 1 + assert b == 2 + x: Any = t + tt: Tuple[u8, u8] = x + assert tt == (1, 2) + +def test_convert_u8_to_native_int() -> None: + for i in range(256): + x: u8 = i + assert i16(x) == i + assert i32(x) == i + assert i64(x) == i diff --git a/mypyc/test/test_emit.py b/mypyc/test/test_emit.py index 54bf4eef3c74..e4ace3ec01f0 100644 --- a/mypyc/test/test_emit.py +++ b/mypyc/test/test_emit.py @@ -4,7 +4,7 @@ from mypyc.codegen.emit import Emitter, EmitterContext from mypyc.ir.ops import BasicBlock, Register, Value -from mypyc.ir.rtypes import int_rprimitive +from mypyc.ir.rtypes import RTuple, bool_rprimitive, int_rprimitive, str_rprimitive from mypyc.namegen import NameGenerator @@ -49,3 +49,21 @@ def test_emit_line(self) -> None: CPyStatics[1]; /* [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] */\n""" ) + + def test_emit_undefined_value_for_simple_type(self) -> None: + emitter = Emitter(self.context, {}) + assert emitter.c_undefined_value(int_rprimitive) == "CPY_INT_TAG" + assert emitter.c_undefined_value(str_rprimitive) == "NULL" + assert emitter.c_undefined_value(bool_rprimitive) == "2" + + def test_emit_undefined_value_for_tuple(self) -> None: + emitter = Emitter(self.context, {}) + assert ( + emitter.c_undefined_value(RTuple([str_rprimitive, int_rprimitive, bool_rprimitive])) + == "(tuple_T3OIC) { NULL, CPY_INT_TAG, 2 }" + ) + assert emitter.c_undefined_value(RTuple([str_rprimitive])) == "(tuple_T1O) { NULL }" + assert ( + emitter.c_undefined_value(RTuple([RTuple([str_rprimitive]), bool_rprimitive])) + == "(tuple_T2T1OC) { { NULL }, 2 }" + ) diff --git a/mypyc/test/test_irbuild.py b/mypyc/test/test_irbuild.py index fe347c855661..5b3f678d8f17 100644 --- a/mypyc/test/test_irbuild.py +++ b/mypyc/test/test_irbuild.py @@ -42,6 +42,8 @@ "irbuild-strip-asserts.test", "irbuild-i64.test", "irbuild-i32.test", + "irbuild-i16.test", + "irbuild-u8.test", "irbuild-vectorcall.test", "irbuild-unreachable.test", "irbuild-isinstance.test", diff --git a/mypyc/test/test_ircheck.py b/mypyc/test/test_ircheck.py index 008963642272..7f7063cdc5e6 100644 --- a/mypyc/test/test_ircheck.py +++ b/mypyc/test/test_ircheck.py @@ -117,7 +117,7 @@ def test_invalid_return_type(self) -> None: blocks=[self.basic_block([ret])], ) assert_has_error( - fn, FnError(source=ret, desc="Cannot coerce source type int32 to dest type int64") + fn, FnError(source=ret, desc="Cannot coerce source type i32 to dest type i64") ) def test_invalid_assign(self) -> None: @@ -130,7 +130,7 @@ def test_invalid_assign(self) -> None: blocks=[self.basic_block([assign, ret])], ) assert_has_error( - fn, FnError(source=assign, desc="Cannot coerce source type int32 to dest type int64") + fn, FnError(source=assign, desc="Cannot coerce source type i32 to dest type i64") ) def test_can_coerce_to(self) -> None: diff --git a/mypyc/test/test_run.py b/mypyc/test/test_run.py index dc054ac9002f..df9d44eab73f 100644 --- a/mypyc/test/test_run.py +++ b/mypyc/test/test_run.py @@ -41,6 +41,8 @@ "run-integers.test", "run-i64.test", "run-i32.test", + "run-i16.test", + "run-u8.test", "run-floats.test", "run-math.test", "run-bools.test", @@ -63,12 +65,10 @@ "run-dunders.test", "run-singledispatch.test", "run-attrs.test", + "run-python37.test", + "run-python38.test", ] -files.append("run-python37.test") -if sys.version_info >= (3, 8): - files.append("run-python38.test") - if sys.version_info >= (3, 10): files.append("run-match.test") diff --git a/mypyc/test/test_struct.py b/mypyc/test/test_struct.py index 2b0298cadeda..82990e6afd82 100644 --- a/mypyc/test/test_struct.py +++ b/mypyc/test/test_struct.py @@ -55,8 +55,8 @@ def test_struct_str(self) -> None: "b:}>" ) r1 = RStruct("Bar", ["c"], [int32_rprimitive]) - assert str(r1) == "Bar{c:int32}" - assert repr(r1) == "}>" + assert str(r1) == "Bar{c:i32}" + assert repr(r1) == "}>" r2 = RStruct("Baz", [], []) assert str(r2) == "Baz{}" assert repr(r2) == "" diff --git a/mypyc/test/test_typeops.py b/mypyc/test/test_typeops.py index 0d9860d88ffe..ff2c05ad983e 100644 --- a/mypyc/test/test_typeops.py +++ b/mypyc/test/test_typeops.py @@ -8,6 +8,7 @@ RUnion, bit_rprimitive, bool_rprimitive, + int16_rprimitive, int32_rprimitive, int64_rprimitive, int_rprimitive, @@ -18,31 +19,44 @@ from mypyc.rt_subtype import is_runtime_subtype from mypyc.subtype import is_subtype +native_int_types = [int64_rprimitive, int32_rprimitive, int16_rprimitive] + class TestSubtype(unittest.TestCase): def test_bit(self) -> None: assert is_subtype(bit_rprimitive, bool_rprimitive) assert is_subtype(bit_rprimitive, int_rprimitive) assert is_subtype(bit_rprimitive, short_int_rprimitive) - assert is_subtype(bit_rprimitive, int64_rprimitive) - assert is_subtype(bit_rprimitive, int32_rprimitive) + for rt in native_int_types: + assert is_subtype(bit_rprimitive, rt) def test_bool(self) -> None: assert not is_subtype(bool_rprimitive, bit_rprimitive) assert is_subtype(bool_rprimitive, int_rprimitive) assert is_subtype(bool_rprimitive, short_int_rprimitive) - assert is_subtype(bool_rprimitive, int64_rprimitive) - assert is_subtype(bool_rprimitive, int32_rprimitive) + for rt in native_int_types: + assert is_subtype(bool_rprimitive, rt) def test_int64(self) -> None: + assert is_subtype(int64_rprimitive, int64_rprimitive) assert is_subtype(int64_rprimitive, int_rprimitive) assert not is_subtype(int64_rprimitive, short_int_rprimitive) assert not is_subtype(int64_rprimitive, int32_rprimitive) + assert not is_subtype(int64_rprimitive, int16_rprimitive) def test_int32(self) -> None: + assert is_subtype(int32_rprimitive, int32_rprimitive) assert is_subtype(int32_rprimitive, int_rprimitive) assert not is_subtype(int32_rprimitive, short_int_rprimitive) assert not is_subtype(int32_rprimitive, int64_rprimitive) + assert not is_subtype(int32_rprimitive, int16_rprimitive) + + def test_int16(self) -> None: + assert is_subtype(int16_rprimitive, int16_rprimitive) + assert is_subtype(int16_rprimitive, int_rprimitive) + assert not is_subtype(int16_rprimitive, short_int_rprimitive) + assert not is_subtype(int16_rprimitive, int64_rprimitive) + assert not is_subtype(int16_rprimitive, int32_rprimitive) class TestRuntimeSubtype(unittest.TestCase): diff --git a/mypyc/test/testutil.py b/mypyc/test/testutil.py index 796811a6363c..6446af3427af 100644 --- a/mypyc/test/testutil.py +++ b/mypyc/test/testutil.py @@ -102,7 +102,7 @@ def build_ir_for_single_file2( # By default generate IR compatible with the earliest supported Python C API. # If a test needs more recent API features, this should be overridden. - compiler_options = compiler_options or CompilerOptions(capi_version=(3, 5)) + compiler_options = compiler_options or CompilerOptions(capi_version=(3, 7)) options = Options() options.show_traceback = True options.hide_error_codes = True @@ -272,7 +272,7 @@ def infer_ir_build_options_from_test_name(name: str) -> CompilerOptions | None: return None if "_32bit" in name and not IS_32_BIT_PLATFORM: return None - options = CompilerOptions(strip_asserts="StripAssert" in name, capi_version=(3, 5)) + options = CompilerOptions(strip_asserts="StripAssert" in name, capi_version=(3, 7)) # A suffix like _python3.8 is used to set the target C API version. m = re.search(r"_python([3-9]+)_([0-9]+)(_|\b)", name) if m: diff --git a/pyproject.toml b/pyproject.toml index 3d100dff5101..67201acb9b94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,20 +6,18 @@ requires = [ "setuptools >= 40.6.2", "wheel >= 0.30.0", # the following is from mypy-requirements.txt - "typing_extensions>=3.10", + "typing_extensions>=4.1.0", "mypy_extensions>=1.0.0", - "typed_ast>=1.4.0,<2; python_version<'3.8'", "tomli>=1.1.0; python_version<'3.11'", # the following is from build-requirements.txt "types-psutil", "types-setuptools", - "types-typed-ast>=1.5.8.5,<1.6.0", ] build-backend = "setuptools.build_meta" [tool.black] line-length = 99 -target-version = ["py37", "py38", "py39", "py310", "py311"] +target-version = ["py38", "py39", "py310", "py311"] skip-magic-trailing-comma = true force-exclude = ''' ^/mypy/typeshed| @@ -27,15 +25,99 @@ force-exclude = ''' ^/test-data ''' -[tool.isort] -py_version = 37 -profile = "black" -line_length = 99 -combine_as_imports = true -skip_gitignore = true -extra_standard_library = ["typing_extensions"] -skip_glob = [ - "mypy/typeshed/*", - "mypyc/test-data/*", - "test-data/*", +[tool.ruff] +line-length = 99 +target-version = "py38" +fix = true + +select = [ + "E", # pycodestyle (error) + "F", # pyflakes + "B", # flake8-bugbear + "I", # isort + "RUF100", # Unused noqa comments + "PGH004" # blanket noqa comments +] + +ignore = [ + "B006", # use of mutable defaults in function signatures + "B007", # Loop control variable not used within the loop body. + "B011", # Don't use assert False + "B023", # Function definition does not bind loop variable + "E203", # conflicts with black + "E402", # module level import not at top of file + "E501", # conflicts with black + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name +] + +unfixable = [ + "F841", # unused variable. ruff keeps the call, but mostly we want to get rid of it all + "F601", # automatic fix might obscure issue + "F602", # automatic fix might obscure issue + "B018", # automatic fix might obscure issue +] + +extend-exclude = [ + "@*", + # Sphinx configuration is irrelevant + "docs/source/conf.py", + "mypyc/doc/conf.py", + # tests have more relaxed styling requirements + # fixtures have their own .pyi-specific configuration + "test-data/*", + "mypyc/test-data/*", + # typeshed has its own .pyi-specific configuration + "mypy/typeshed/*", +] + +[tool.ruff.isort] +combine-as-imports = true +extra-standard-library = ["typing_extensions"] + +[tool.check-manifest] +ignore = ["**/.readthedocs.yaml"] + +[tool.pytest.ini_options] +minversion = "6.0.0" +testpaths = ["mypy/test", "mypyc/test"] +python_files = 'test*.py' + +# Where do the test cases come from? We provide our own collection +# logic by implementing `pytest_pycollect_makeitem` in mypy.test.data; +# the test files import that module, and pytest sees the magic name +# and invokes it at the relevant moment. See +# https://doc.pytest.org/en/latest/how-to/writing_plugins.html#collection-hooks + +# Both our plugin and unittest provide their own collection logic, +# So we can disable the default python collector by giving it empty +# patterns to search for. +# Note that unittest requires that no "Test*" classes exist. +python_classes = [] +python_functions = [] + +# always run in parallel (requires pytest-xdist, see test-requirements.txt) +# and enable strict mode: require all markers +# to be defined and raise on invalid config values +addopts = "-nauto --strict-markers --strict-config" + +# treat xpasses as test failures so they get converted to regular tests as soon as possible +xfail_strict = true + +[tool.coverage.run] +branch = true +source = "mypy" +parallel = true + +[tool.coverage.report] +show_missing = true +skip_covered = true +omit = 'mypy/test/*' +exclude_lines = [ + '\#\s*pragma: no cover', + '^\s*raise AssertionError\b', + '^\s*raise NotImplementedError\b', + '^\s*return NotImplemented\b', + '^\s*raise$', + '''^if __name__ == ['"]__main__['"]:$''', ] diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index a123b0f11328..000000000000 --- a/pytest.ini +++ /dev/null @@ -1,27 +0,0 @@ -[pytest] -minversion = 6.0.0 - -testpaths = mypy/test mypyc/test - -python_files = test*.py - -# Where do the test cases come from? We provide our own collection -# logic by implementing `pytest_pycollect_makeitem` in mypy.test.data; -# the test files import that module, and pytest sees the magic name -# and invokes it at the relevant moment. See -# https://doc.pytest.org/en/latest/how-to/writing_plugins.html#collection-hooks - -# Both our plugin and unittest provide their own collection logic, -# So we can disable the default python collector by giving it empty -# patterns to search for. -# Note that unittest requires that no "Test*" classes exist. -python_classes = -python_functions = - -# always run in parallel (requires pytest-xdist, see test-requirements.txt) -# and enable strict mode: require all markers -# to be defined and raise on invalid config values -addopts = -nauto --strict-markers --strict-config - -# treat xpasses as test failures so they get converted to regular tests as soon as possible -xfail_strict = true diff --git a/runtests.py b/runtests.py index 66fade81ffab..80ef8d814ee1 100755 --- a/runtests.py +++ b/runtests.py @@ -48,7 +48,17 @@ # time to run. cmds = { # Self type check - "self": [executable, "-m", "mypy", "--config-file", "mypy_self_check.ini", "-p", "mypy"], + "self": [ + executable, + "-m", + "mypy", + "--config-file", + "mypy_self_check.ini", + "-p", + "mypy", + "-p", + "mypyc", + ], # Lint "lint": ["pre-commit", "run", "--all-files"], # Fast test cases only (this is the bulk of the test suite) diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 511f794474e7..000000000000 --- a/setup.cfg +++ /dev/null @@ -1,59 +0,0 @@ -[flake8] -max-line-length = 99 -noqa-require-code = True -# typeshed and unit test fixtures have .pyi-specific flake8 configuration -exclude = - # from .gitignore: directories, and file patterns that intersect with *.py - build, - bin, - lib, - include, - @*, - env, - docs/build, - out, - .venv, - .mypy_cache, - .git, - .cache, - # Sphinx configuration is irrelevant - docs/source/conf.py, - mypyc/doc/conf.py, - # tests have more relaxed styling requirements - # fixtures have their own .pyi-specific configuration - test-data/*, - mypyc/test-data/*, - # typeshed has its own .pyi-specific configuration - mypy/typeshed/*, - .tox - .eggs - .Python - -# Things to ignore: -# E203: conflicts with black -# E501: conflicts with black -# W601: has_key() deprecated (false positives) -# E402: module level import not at top of file -# B006: use of mutable defaults in function signatures -# B007: Loop control variable not used within the loop body. -# B011: Don't use assert False -# B023: Function definition does not bind loop variable -# E741: Ambiguous variable name -extend-ignore = E203,E501,W601,E402,B006,B007,B011,B023,E741 - -[coverage:run] -branch = true -source = mypy -parallel = true - -[coverage:report] -show_missing = true -skip_covered = True -omit = mypy/test/* -exclude_lines = - \#\s*pragma: no cover - ^\s*raise AssertionError\b - ^\s*raise NotImplementedError\b - ^\s*return NotImplemented\b - ^\s*raise$ - ^if __name__ == ['"]__main__['"]:$ diff --git a/setup.py b/setup.py index 061bb9ddf5b5..bbb655ea4537 100644 --- a/setup.py +++ b/setup.py @@ -8,8 +8,8 @@ import sys from typing import TYPE_CHECKING, Any -if sys.version_info < (3, 7, 0): - sys.stderr.write("ERROR: You need Python 3.7 or later to use mypy.\n") +if sys.version_info < (3, 8, 0): + sys.stderr.write("ERROR: You need Python 3.8 or later to use mypy.\n") exit(1) # we'll import stuff from the source tree, let's ensure is on the sys path @@ -186,7 +186,6 @@ def run(self): "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -221,19 +220,18 @@ def run(self): cmdclass=cmdclass, # When changing this, also update mypy-requirements.txt. install_requires=[ - "typed_ast >= 1.4.0, < 2; python_version<'3.8'", - "typing_extensions>=3.10", + "typing_extensions>=4.1.0", "mypy_extensions >= 1.0.0", "tomli>=1.1.0; python_version<'3.11'", ], # Same here. extras_require={ "dmypy": "psutil >= 4.0", - "python2": "typed_ast >= 1.4.0, < 2", + "python2": "", "reports": "lxml", "install-types": "pip", }, - python_requires=">=3.7", + python_requires=">=3.8", include_package_data=True, project_urls={ "News": "https://mypy-lang.org/news.html", diff --git a/test-data/.flake8 b/test-data/.flake8 deleted file mode 100644 index df2f9caf8c94..000000000000 --- a/test-data/.flake8 +++ /dev/null @@ -1,22 +0,0 @@ -# Some PEP8 deviations are considered irrelevant to stub files: -# (error counts as of 2016-12-19) -# 17381 E704 multiple statements on one line (def) -# 11840 E301 expected 1 blank line -# 7467 E302 expected 2 blank lines -# 1772 E501 line too long -# 1487 F401 imported but unused -# 1248 E701 multiple statements on one line (colon) -# 427 F811 redefinition -# 356 E305 expected 2 blank lines - -# Nice-to-haves ignored for now -# 152 E128 continuation line under-indented for visual indent -# 43 E127 continuation line over-indented for visual indent - -[flake8] -ignore = F401, F811, E127, E128, E301, E302, E305, E501, E701, E704, B303 -# We are checking with Python 3 but many of the stubs are Python 2 stubs. -# A nice future improvement would be to provide separate .flake8 -# configurations for Python 2 and Python 3 files. -builtins = StandardError,apply,basestring,buffer,cmp,coerce,execfile,file,intern,long,raw_input,reduce,reload,unichr,unicode,xrange -exclude = .venv*,@* diff --git a/test-data/packages/typedpkg-stubs/pyproject.toml b/test-data/packages/typedpkg-stubs/pyproject.toml new file mode 100644 index 000000000000..125816151ef8 --- /dev/null +++ b/test-data/packages/typedpkg-stubs/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = 'typedpkg-stubs' +version = '0.1' +description = 'test' + +[tool.hatch.build] +include = ["**/*.pyi"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/test-data/packages/typedpkg-stubs/setup.py b/test-data/packages/typedpkg-stubs/setup.py deleted file mode 100644 index 4948dc6a01df..000000000000 --- a/test-data/packages/typedpkg-stubs/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -This setup file installs packages to test mypy's PEP 561 implementation -""" - -from setuptools import setup - -setup( - name='typedpkg-stubs', - author="The mypy team", - version='0.1', - package_data={'typedpkg-stubs': ['sample.pyi', '__init__.pyi', 'py.typed']}, - packages=['typedpkg-stubs'], -) diff --git a/test-data/packages/typedpkg/pyproject.toml b/test-data/packages/typedpkg/pyproject.toml new file mode 100644 index 000000000000..5269c94320e1 --- /dev/null +++ b/test-data/packages/typedpkg/pyproject.toml @@ -0,0 +1,8 @@ +[project] +name = 'typedpkg' +version = '0.1' +description = 'test' + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/test-data/packages/typedpkg/setup.py b/test-data/packages/typedpkg/setup.py deleted file mode 100644 index 11bcfb11a104..000000000000 --- a/test-data/packages/typedpkg/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -This setup file installs packages to test mypy's PEP 561 implementation -""" - -from setuptools import setup - -setup( - name='typedpkg', - author="The mypy team", - version='0.1', - package_data={'typedpkg': ['py.typed']}, - packages=['typedpkg', 'typedpkg.pkg'], - include_package_data=True, - zip_safe=False, -) diff --git a/test-data/packages/typedpkg_ns_a/pyproject.toml b/test-data/packages/typedpkg_ns_a/pyproject.toml new file mode 100644 index 000000000000..cc464af75b17 --- /dev/null +++ b/test-data/packages/typedpkg_ns_a/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = 'typedpkg_namespace.alpha' +version = '0.1' +description = 'test' + +[tool.hatch.build] +include = ["**/*.py", "**/*.pyi", "**/py.typed"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/test-data/packages/typedpkg_ns_a/setup.py b/test-data/packages/typedpkg_ns_a/setup.py deleted file mode 100644 index 3dab731cada9..000000000000 --- a/test-data/packages/typedpkg_ns_a/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -from setuptools import setup - -setup( - name='typedpkg_namespace.alpha', - version='1.0.0', - namespace_packages=['typedpkg_ns'], - zip_safe=False, - package_data={'typedpkg_ns.a': ['py.typed']}, - packages=['typedpkg_ns.a'], -) diff --git a/test-data/packages/typedpkg_ns_b-stubs/pyproject.toml b/test-data/packages/typedpkg_ns_b-stubs/pyproject.toml new file mode 100644 index 000000000000..d5275d1ed8b3 --- /dev/null +++ b/test-data/packages/typedpkg_ns_b-stubs/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = 'typedpkg_ns-stubs' +version = '0.1' +description = 'test' + +[tool.hatch.build] +include = ["**/*.pyi"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/test-data/packages/typedpkg_ns_b-stubs/setup.py b/test-data/packages/typedpkg_ns_b-stubs/setup.py deleted file mode 100644 index a5d7df83eeea..000000000000 --- a/test-data/packages/typedpkg_ns_b-stubs/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -This setup file installs packages to test mypy's PEP 561 implementation -""" - -from distutils.core import setup - -setup( - name='typedpkg_ns_b-stubs', - author="The mypy team", - version='0.1', - namespace_packages=['typedpkg_ns-stubs'], - package_data={'typedpkg_ns-stubs.b': ['__init__.pyi', 'bbb.pyi']}, - packages=['typedpkg_ns-stubs.b'], -) diff --git a/test-data/packages/typedpkg_ns_b/pyproject.toml b/test-data/packages/typedpkg_ns_b/pyproject.toml new file mode 100644 index 000000000000..8567af11152e --- /dev/null +++ b/test-data/packages/typedpkg_ns_b/pyproject.toml @@ -0,0 +1,8 @@ +[project] +name = 'typedpkg_namespace.beta' +version = '0.1' +description = 'test' + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/test-data/packages/typedpkg_ns_b/setup.py b/test-data/packages/typedpkg_ns_b/setup.py deleted file mode 100644 index 4f0d0d954a73..000000000000 --- a/test-data/packages/typedpkg_ns_b/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -from setuptools import setup - -setup( - name='typedpkg_namespace.beta', - version='1.0.0', - namespace_packages=['typedpkg_ns'], - zip_safe=False, - package_data={'typedpkg_ns.b': []}, - packages=['typedpkg_ns.b'], -) diff --git a/test-data/pybind11_mypy_demo/src/main.cpp b/test-data/pybind11_mypy_demo/src/main.cpp index ff0f93bf7017..00e5b2f4e871 100644 --- a/test-data/pybind11_mypy_demo/src/main.cpp +++ b/test-data/pybind11_mypy_demo/src/main.cpp @@ -119,8 +119,8 @@ void bind_basics(py::module& basics) { using namespace basics; // Functions - basics.def("answer", &answer); - basics.def("sum", &sum); + basics.def("answer", &answer, "answer docstring, with end quote\""); // tests explicit docstrings + basics.def("sum", &sum, "multiline docstring test, edge case quotes \"\"\"'''"); basics.def("midpoint", &midpoint, py::arg("left"), py::arg("right")); basics.def("weighted_midpoint", weighted_midpoint, py::arg("left"), py::arg("right"), py::arg("alpha")=0.5); diff --git a/test-data/pybind11_mypy_demo/stubgen-include-docs/pybind11_mypy_demo/__init__.pyi b/test-data/pybind11_mypy_demo/stubgen-include-docs/pybind11_mypy_demo/__init__.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/test-data/pybind11_mypy_demo/stubgen-include-docs/pybind11_mypy_demo/basics.pyi b/test-data/pybind11_mypy_demo/stubgen-include-docs/pybind11_mypy_demo/basics.pyi new file mode 100644 index 000000000000..676d7f6d3f15 --- /dev/null +++ b/test-data/pybind11_mypy_demo/stubgen-include-docs/pybind11_mypy_demo/basics.pyi @@ -0,0 +1,112 @@ +from typing import ClassVar + +from typing import overload +PI: float + +class Point: + class AngleUnit: + __members__: ClassVar[dict] = ... # read-only + __entries: ClassVar[dict] = ... + degree: ClassVar[Point.AngleUnit] = ... + radian: ClassVar[Point.AngleUnit] = ... + def __init__(self, value: int) -> None: + """__init__(self: pybind11_mypy_demo.basics.Point.AngleUnit, value: int) -> None""" + def __eq__(self, other: object) -> bool: + """__eq__(self: object, other: object) -> bool""" + def __getstate__(self) -> int: + """__getstate__(self: object) -> int""" + def __hash__(self) -> int: + """__hash__(self: object) -> int""" + def __index__(self) -> int: + """__index__(self: pybind11_mypy_demo.basics.Point.AngleUnit) -> int""" + def __int__(self) -> int: + """__int__(self: pybind11_mypy_demo.basics.Point.AngleUnit) -> int""" + def __ne__(self, other: object) -> bool: + """__ne__(self: object, other: object) -> bool""" + def __setstate__(self, state: int) -> None: + """__setstate__(self: pybind11_mypy_demo.basics.Point.AngleUnit, state: int) -> None""" + @property + def name(self) -> str: ... + @property + def value(self) -> int: ... + + class LengthUnit: + __members__: ClassVar[dict] = ... # read-only + __entries: ClassVar[dict] = ... + inch: ClassVar[Point.LengthUnit] = ... + mm: ClassVar[Point.LengthUnit] = ... + pixel: ClassVar[Point.LengthUnit] = ... + def __init__(self, value: int) -> None: + """__init__(self: pybind11_mypy_demo.basics.Point.LengthUnit, value: int) -> None""" + def __eq__(self, other: object) -> bool: + """__eq__(self: object, other: object) -> bool""" + def __getstate__(self) -> int: + """__getstate__(self: object) -> int""" + def __hash__(self) -> int: + """__hash__(self: object) -> int""" + def __index__(self) -> int: + """__index__(self: pybind11_mypy_demo.basics.Point.LengthUnit) -> int""" + def __int__(self) -> int: + """__int__(self: pybind11_mypy_demo.basics.Point.LengthUnit) -> int""" + def __ne__(self, other: object) -> bool: + """__ne__(self: object, other: object) -> bool""" + def __setstate__(self, state: int) -> None: + """__setstate__(self: pybind11_mypy_demo.basics.Point.LengthUnit, state: int) -> None""" + @property + def name(self) -> str: ... + @property + def value(self) -> int: ... + angle_unit: ClassVar[Point.AngleUnit] = ... + length_unit: ClassVar[Point.LengthUnit] = ... + x_axis: ClassVar[Point] = ... # read-only + y_axis: ClassVar[Point] = ... # read-only + origin: ClassVar[Point] = ... + x: float + y: float + @overload + def __init__(self) -> None: + """__init__(*args, **kwargs) + Overloaded function. + + 1. __init__(self: pybind11_mypy_demo.basics.Point) -> None + + 2. __init__(self: pybind11_mypy_demo.basics.Point, x: float, y: float) -> None""" + @overload + def __init__(self, x: float, y: float) -> None: + """__init__(*args, **kwargs) + Overloaded function. + + 1. __init__(self: pybind11_mypy_demo.basics.Point) -> None + + 2. __init__(self: pybind11_mypy_demo.basics.Point, x: float, y: float) -> None""" + @overload + def distance_to(self, x: float, y: float) -> float: + """distance_to(*args, **kwargs) + Overloaded function. + + 1. distance_to(self: pybind11_mypy_demo.basics.Point, x: float, y: float) -> float + + 2. distance_to(self: pybind11_mypy_demo.basics.Point, other: pybind11_mypy_demo.basics.Point) -> float""" + @overload + def distance_to(self, other: Point) -> float: + """distance_to(*args, **kwargs) + Overloaded function. + + 1. distance_to(self: pybind11_mypy_demo.basics.Point, x: float, y: float) -> float + + 2. distance_to(self: pybind11_mypy_demo.basics.Point, other: pybind11_mypy_demo.basics.Point) -> float""" + @property + def length(self) -> float: ... + +def answer() -> int: + '''answer() -> int + + answer docstring, with end quote"''' +def midpoint(left: float, right: float) -> float: + """midpoint(left: float, right: float) -> float""" +def sum(arg0: int, arg1: int) -> int: + '''sum(arg0: int, arg1: int) -> int + + multiline docstring test, edge case quotes """\'\'\'''' +def weighted_midpoint(left: float, right: float, alpha: float = ...) -> float: + """weighted_midpoint(left: float, right: float, alpha: float = 0.5) -> float""" diff --git a/test-data/unit/README.md b/test-data/unit/README.md index 97680c949bef..5a9416603541 100644 --- a/test-data/unit/README.md +++ b/test-data/unit/README.md @@ -12,7 +12,7 @@ feature you added. If you added a new `check-*.test` file, it will be autodiscov Add the test in this format anywhere in the file: [case testNewSyntaxBasics] - # flags: --python-version 3.6 + # flags: --python-version 3.10 x: int x = 5 y: int = 5 @@ -159,7 +159,7 @@ To run mypy on itself: To run the linter: - flake8 + ruff . You can also run all of the above tests using `runtests.py` (this includes type checking mypy and linting): diff --git a/test-data/unit/check-abstract.test b/test-data/unit/check-abstract.test index 8a13e5cb5760..299074050baa 100644 --- a/test-data/unit/check-abstract.test +++ b/test-data/unit/check-abstract.test @@ -9,11 +9,11 @@ from abc import abstractmethod, ABCMeta -i = None # type: I -j = None # type: J -a = None # type: A -b = None # type: B -c = None # type: C +i: I +j: J +a: A +b: B +c: C def f(): i, j, a, b, c # Prevent redefinition @@ -44,10 +44,10 @@ class C(I): pass from abc import abstractmethod, ABCMeta -i = None # type: I -j = None # type: J -a = None # type: A -o = None # type: object +i: I +j: J +a: A +o: object def f(): i, j, a, o # Prevent redefinition @@ -73,9 +73,9 @@ class A(J): pass [case testInheritingAbstractClassInSubclass] from abc import abstractmethod, ABCMeta -i = None # type: I -a = None # type: A -b = None # type: B +i: I +a: A +b: B if int(): i = a # E: Incompatible types in assignment (expression has type "A", variable has type "I") @@ -106,8 +106,8 @@ class I(metaclass=ABCMeta): @abstractmethod def f(self): pass -o = None # type: object -t = None # type: type +o: object +t: type o = I t = I @@ -122,8 +122,10 @@ class I(metaclass=ABCMeta): class A(I): pass class B: pass -i, a, b = None, None, None # type: (I, A, B) -o = None # type: object +i: I +a: A +b: B +o: object if int(): a = cast(I, o) # E: Incompatible types in assignment (expression has type "I", variable has type "A") @@ -196,6 +198,24 @@ x: Type[B] f(x) # OK [out] +[case testAbstractTypeInADict] +from typing import Dict, Type +from abc import abstractmethod + +class Class: + @abstractmethod + def method(self) -> None: + pass + +my_dict_init: Dict[int, Type[Class]] = {0: Class} # E: Only concrete class can be given where "Tuple[int, Type[Class]]" is expected + +class Child(Class): + def method(self) -> None: ... + +other_dict_init: Dict[int, Type[Class]] = {0: Child} # ok +[builtins fixtures/dict.pyi] +[out] + [case testInstantiationAbstractsInTypeForAliases] from typing import Type from abc import abstractmethod @@ -220,6 +240,7 @@ f(GoodAlias) [out] [case testInstantiationAbstractsInTypeForVariables] +# flags: --no-strict-optional from typing import Type from abc import abstractmethod @@ -399,7 +420,9 @@ class I(metaclass=ABCMeta): @abstractmethod def f(self, a: int) -> str: pass -i, a, b = None, None, None # type: (I, int, str) +i: I +a: int +b: str if int(): a = i.f(a) # E: Incompatible types in assignment (expression has type "str", variable has type "int") @@ -419,7 +442,9 @@ class J(metaclass=ABCMeta): def f(self, a: int) -> str: pass class I(J): pass -i, a, b = None, None, None # type: (I, int, str) +i: I +a: int +b: str if int(): a = i.f(1) # E: Incompatible types in assignment (expression has type "str", variable has type "int") @@ -505,7 +530,7 @@ class B(metaclass=ABCMeta): @abstractmethod def g(self) -> None: pass class C(A, B): pass -x = None # type: C +x: C x.f() x.g() x.f(x) # E: Too many arguments for "f" of "A" @@ -735,7 +760,45 @@ class A(metaclass=ABCMeta): def x(self) -> int: pass @x.setter def x(self, x: int) -> None: pass -[out] + +[case testReadWriteDeleteAbstractProperty] +# flags: --no-strict-optional +from abc import ABC, abstractmethod +class Abstract(ABC): + @property + @abstractmethod + def prop(self) -> str: ... + + @prop.setter + @abstractmethod + def prop(self, code: str) -> None: ... + + @prop.deleter + @abstractmethod + def prop(self) -> None: ... + +class Good(Abstract): + @property + def prop(self) -> str: ... + @prop.setter + def prop(self, code: str) -> None: ... + @prop.deleter + def prop(self) -> None: ... + +class Bad1(Abstract): + @property # E: Read-only property cannot override read-write property + def prop(self) -> str: ... + +class ThisShouldProbablyError(Abstract): + @property + def prop(self) -> str: ... + @prop.setter + def prop(self, code: str) -> None: ... + +a = Good() +reveal_type(a.prop) # N: Revealed type is "builtins.str" +a.prop = 123 # E: Incompatible types in assignment (expression has type "int", variable has type "str") +[builtins fixtures/property.pyi] [case testInstantiateClassWithReadOnlyAbstractProperty] from abc import abstractproperty, ABCMeta @@ -767,7 +830,7 @@ b = B() b.x() # E: "int" not callable [builtins fixtures/property.pyi] -[case testImplementReradWriteAbstractPropertyViaProperty] +[case testImplementReadWriteAbstractPropertyViaProperty] from abc import abstractproperty, ABCMeta class A(metaclass=ABCMeta): @abstractproperty @@ -816,6 +879,7 @@ main:8: error: Cannot instantiate abstract class "B" with abstract attribute "x" main:9: error: "int" has no attribute "y" [case testSuperWithAbstractProperty] +# flags: --no-strict-optional from abc import abstractproperty, ABCMeta class A(metaclass=ABCMeta): @abstractproperty @@ -1061,7 +1125,6 @@ b.y = 1 -- ----------------------------------------------- [case testEmptyBodyProhibitedFunction] -# flags: --strict-optional from typing import overload, Union def func1(x: str) -> int: pass # E: Missing return statement @@ -1084,7 +1147,6 @@ def func5(x: Union[int, str]) -> Union[int, str]: # E: Missing return statement """Some function.""" [case testEmptyBodyProhibitedMethodNonAbstract] -# flags: --strict-optional from typing import overload, Union class A: @@ -1119,7 +1181,6 @@ class C: [builtins fixtures/classmethod.pyi] [case testEmptyBodyProhibitedPropertyNonAbstract] -# flags: --strict-optional class A: @property def x(self) -> int: ... # E: Missing return statement @@ -1148,7 +1209,6 @@ class C: [builtins fixtures/property.pyi] [case testEmptyBodyNoteABCMeta] -# flags: --strict-optional from abc import ABC class A(ABC): @@ -1157,7 +1217,6 @@ class A(ABC): ... [case testEmptyBodyAllowedFunctionStub] -# flags: --strict-optional import stub [file stub.pyi] from typing import overload, Union @@ -1168,7 +1227,6 @@ def func3(x: str) -> int: """Some function.""" [case testEmptyBodyAllowedMethodNonAbstractStub] -# flags: --strict-optional import stub [file stub.pyi] from typing import overload, Union @@ -1190,7 +1248,6 @@ class B: [builtins fixtures/classmethod.pyi] [case testEmptyBodyAllowedPropertyNonAbstractStub] -# flags: --strict-optional import stub [file stub.pyi] class A: @@ -1221,7 +1278,6 @@ class C: [builtins fixtures/property.pyi] [case testEmptyBodyAllowedMethodAbstract] -# flags: --strict-optional from typing import overload, Union from abc import abstractmethod @@ -1269,7 +1325,6 @@ class C: [builtins fixtures/classmethod.pyi] [case testEmptyBodyAllowedPropertyAbstract] -# flags: --strict-optional from abc import abstractmethod class A: @property @@ -1308,7 +1363,6 @@ class C: [builtins fixtures/property.pyi] [case testEmptyBodyImplicitlyAbstractProtocol] -# flags: --strict-optional from typing import Protocol, overload, Union class P1(Protocol): @@ -1349,7 +1403,6 @@ C3() [builtins fixtures/classmethod.pyi] [case testEmptyBodyImplicitlyAbstractProtocolProperty] -# flags: --strict-optional from typing import Protocol class P1(Protocol): @@ -1379,7 +1432,6 @@ C2() [builtins fixtures/property.pyi] [case testEmptyBodyImplicitlyAbstractProtocolStub] -# flags: --strict-optional from stub import P1, P2, P3, P4 class B1(P1): ... @@ -1415,7 +1467,6 @@ class P4(Protocol): [builtins fixtures/classmethod.pyi] [case testEmptyBodyUnsafeAbstractSuper] -# flags: --strict-optional from stub import StubProto, StubAbstract from typing import Protocol from abc import abstractmethod @@ -1464,7 +1515,6 @@ class StubAbstract: def meth(self) -> int: ... [case testEmptyBodyUnsafeAbstractSuperProperty] -# flags: --strict-optional from stub import StubProto, StubAbstract from typing import Protocol from abc import abstractmethod @@ -1522,7 +1572,6 @@ class StubAbstract: [builtins fixtures/property.pyi] [case testEmptyBodyUnsafeAbstractSuperOverloads] -# flags: --strict-optional from stub import StubProto from typing import Protocol, overload, Union @@ -1607,7 +1656,6 @@ class SubAbstract(Abstract): return super().meth() [case testEmptyBodyNoSuperWarningOptionalReturn] -# flags: --strict-optional from typing import Protocol, Optional from abc import abstractmethod @@ -1625,7 +1673,6 @@ class SubAbstract(Abstract): return super().meth() [case testEmptyBodyTypeCheckingOnly] -# flags: --strict-optional from typing import TYPE_CHECKING class C: diff --git a/test-data/unit/check-async-await.test b/test-data/unit/check-async-await.test index 83a66ef4a815..653025a0bb24 100644 --- a/test-data/unit/check-async-await.test +++ b/test-data/unit/check-async-await.test @@ -183,7 +183,6 @@ async def f() -> None: [typing fixtures/typing-async.pyi] [case testAsyncForComprehension] -# flags: --python-version 3.6 from typing import Generic, Iterable, TypeVar, AsyncIterator, Tuple T = TypeVar('T') @@ -223,7 +222,6 @@ async def generatorexp(obj: Iterable[int]): [typing fixtures/typing-async.pyi] [case testAsyncForComprehensionErrors] -# flags: --python-version 3.6 from typing import Generic, Iterable, TypeVar, AsyncIterator, Tuple T = TypeVar('T') @@ -240,16 +238,10 @@ class asyncify(Generic[T], AsyncIterator[T]): raise StopAsyncIteration async def wrong_iterable(obj: Iterable[int]): - [i async for i in obj] - [i for i in asyncify(obj)] - {i: i async for i in obj} - {i: i for i in asyncify(obj)} - -[out] -main:18: error: "Iterable[int]" has no attribute "__aiter__" (not async iterable) -main:19: error: "asyncify[int]" has no attribute "__iter__"; maybe "__aiter__"? (not iterable) -main:20: error: "Iterable[int]" has no attribute "__aiter__" (not async iterable) -main:21: error: "asyncify[int]" has no attribute "__iter__"; maybe "__aiter__"? (not iterable) + [i async for i in obj] # E: "Iterable[int]" has no attribute "__aiter__" (not async iterable) + [i for i in asyncify(obj)] # E: "asyncify[int]" has no attribute "__iter__"; maybe "__aiter__"? (not iterable) + {i: i async for i in obj} # E: "Iterable[int]" has no attribute "__aiter__" (not async iterable) + {i: i for i in asyncify(obj)} # E: "asyncify[int]" has no attribute "__iter__"; maybe "__aiter__"? (not iterable) [builtins fixtures/async_await.pyi] [typing fixtures/typing-async.pyi] @@ -291,7 +283,7 @@ async def f() -> None: [typing fixtures/typing-async.pyi] [case testAsyncWithErrorBadAenter2] - +# flags: --no-strict-optional class C: def __aenter__(self) -> None: pass async def __aexit__(self, x, y, z) -> None: pass @@ -313,7 +305,7 @@ async def f() -> None: [typing fixtures/typing-async.pyi] [case testAsyncWithErrorBadAexit2] - +# flags: --no-strict-optional class C: async def __aenter__(self) -> int: pass def __aexit__(self, x, y, z) -> None: pass @@ -340,17 +332,6 @@ async def f() -> None: [builtins fixtures/async_await.pyi] [typing fixtures/typing-async.pyi] -[case testNoYieldInAsyncDef] -# flags: --python-version 3.5 - -async def f(): - yield None # E: "yield" in async function -async def g(): - yield # E: "yield" in async function -async def h(): - x = yield # E: "yield" in async function -[builtins fixtures/async_await.pyi] - [case testNoYieldFromInAsyncDef] async def f(): @@ -422,7 +403,6 @@ def f() -> Generator[int, str, int]: -- --------------------------------------------------------------------- [case testAsyncGenerator] -# flags: --python-version 3.6 from typing import AsyncGenerator, Generator async def f() -> int: @@ -450,7 +430,6 @@ async def wrong_return() -> Generator[int, None, None]: # E: The return type of [typing fixtures/typing-async.pyi] [case testAsyncGeneratorReturnIterator] -# flags: --python-version 3.6 from typing import AsyncIterator async def gen() -> AsyncIterator[int]: @@ -466,7 +445,6 @@ async def use_gen() -> None: [typing fixtures/typing-async.pyi] [case testAsyncGeneratorManualIter] -# flags: --python-version 3.6 from typing import AsyncGenerator async def genfunc() -> AsyncGenerator[int, None]: @@ -484,7 +462,6 @@ async def user() -> None: [typing fixtures/typing-async.pyi] [case testAsyncGeneratorAsend] -# flags: --python-version 3.6 from typing import AsyncGenerator async def f() -> None: @@ -498,14 +475,13 @@ async def gen() -> AsyncGenerator[int, str]: async def h() -> None: g = gen() - await g.asend(()) # E: Argument 1 to "asend" of "AsyncGenerator" has incompatible type "Tuple[]"; expected "str" + await g.asend(()) # E: Argument 1 to "asend" of "AsyncGenerator" has incompatible type "Tuple[()]"; expected "str" reveal_type(await g.asend('hello')) # N: Revealed type is "builtins.int" [builtins fixtures/dict.pyi] [typing fixtures/typing-async.pyi] [case testAsyncGeneratorAthrow] -# flags: --python-version 3.6 from typing import AsyncGenerator async def gen() -> AsyncGenerator[str, int]: @@ -524,7 +500,6 @@ async def h() -> None: [typing fixtures/typing-async.pyi] [case testAsyncGeneratorNoSyncIteration] -# flags: --python-version 3.6 from typing import AsyncGenerator async def gen() -> AsyncGenerator[int, None]: @@ -532,17 +507,13 @@ async def gen() -> AsyncGenerator[int, None]: yield i def h() -> None: - for i in gen(): + for i in gen(): # E: "AsyncGenerator[int, None]" has no attribute "__iter__"; maybe "__aiter__"? (not iterable) pass [builtins fixtures/dict.pyi] [typing fixtures/typing-async.pyi] -[out] -main:9: error: "AsyncGenerator[int, None]" has no attribute "__iter__"; maybe "__aiter__"? (not iterable) - [case testAsyncGeneratorNoYieldFrom] -# flags: --python-version 3.6 from typing import AsyncGenerator async def f() -> AsyncGenerator[int, None]: @@ -555,7 +526,6 @@ async def gen() -> AsyncGenerator[int, None]: [typing fixtures/typing-async.pyi] [case testAsyncGeneratorNoReturnWithValue] -# flags: --python-version 3.6 from typing import AsyncGenerator async def return_int() -> AsyncGenerator[int, None]: @@ -945,17 +915,21 @@ async def bar(x: Union[A, B]) -> None: [typing fixtures/typing-async.pyi] [case testAsyncIteratorWithIgnoredErrors] -from m import L +import m -async def func(l: L) -> None: +async def func(l: m.L) -> None: reveal_type(l.get_iterator) # N: Revealed type is "def () -> typing.AsyncIterator[builtins.str]" reveal_type(l.get_iterator2) # N: Revealed type is "def () -> typing.AsyncIterator[builtins.str]" async for i in l.get_iterator(): reveal_type(i) # N: Revealed type is "builtins.str" + reveal_type(m.get_generator) # N: Revealed type is "def () -> typing.AsyncGenerator[builtins.int, None]" + async for i2 in m.get_generator(): + reveal_type(i2) # N: Revealed type is "builtins.int" + [file m.py] # mypy: ignore-errors=True -from typing import AsyncIterator +from typing import AsyncIterator, AsyncGenerator class L: async def some_func(self, i: int) -> str: @@ -968,6 +942,9 @@ class L: if self: a = (yield 'x') +async def get_generator() -> AsyncGenerator[int, None]: + yield 1 + [builtins fixtures/async_await.pyi] [typing fixtures/typing-async.pyi] @@ -997,7 +974,7 @@ crasher = [await foo(x) for x in [1, 2, 3]] # E: "await" outside function [top def bad() -> None: # These are always critical / syntax issues: - y = [await foo(x) for x in [1, 2, 3]] # E: "await" outside coroutine ("async def") + y = [await foo(x) for x in [1, 2, 3]] # E: "await" outside coroutine ("async def") [await-not-async] async def good() -> None: y = [await foo(x) for x in [1, 2, 3]] # OK [builtins fixtures/async_await.pyi] diff --git a/test-data/unit/check-basic.test b/test-data/unit/check-basic.test index e10e69267c5a..61a7160ce4f4 100644 --- a/test-data/unit/check-basic.test +++ b/test-data/unit/check-basic.test @@ -2,8 +2,8 @@ [out] [case testAssignmentAndVarDef] -a = None # type: A -b = None # type: B +a: A +b: B if int(): a = a if int(): @@ -17,14 +17,14 @@ class A: class B: def __init__(self): pass -x = None # type: A +x: A x = A() if int(): x = B() # E: Incompatible types in assignment (expression has type "B", variable has type "A") [case testInheritInitFromObject] class A(object): pass class B(object): pass -x = None # type: A +x: A if int(): x = A() if int(): @@ -32,8 +32,8 @@ if int(): [case testImplicitInheritInitFromObject] class A: pass class B: pass -x = None # type: A -o = None # type: object +x: A +o: object if int(): x = o # E: Incompatible types in assignment (expression has type "object", variable has type "A") if int(): @@ -59,7 +59,7 @@ x = B() # type: A y = A() # type: B # E: Incompatible types in assignment (expression has type "A", variable has type "B") [case testDeclaredVariableInParentheses] -(x) = None # type: int +(x) = 2 # type: int if int(): x = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") if int(): @@ -135,8 +135,8 @@ main:6: error: Missing positional arguments "baz", "bas" in call to "foo" [case testLocalVariables] def f() -> None: - x = None # type: A - y = None # type: B + x: A + y: B if int(): x = x x = y # E: Incompatible types in assignment (expression has type "B", variable has type "A") @@ -229,12 +229,12 @@ reveal_type(__annotations__) # N: Revealed type is "builtins.dict[builtins.str, [case testLocalVariableShadowing] class A: pass class B: pass -a = None # type: A +a: A if int(): a = B() # E: Incompatible types in assignment (expression has type "B", variable has type "A") a = A() def f() -> None: - a = None # type: B + a: B if int(): a = A() # E: Incompatible types in assignment (expression has type "A", variable has type "B") a = B() @@ -242,8 +242,8 @@ a = B() # E: Incompatible types in assignment (expression has type "B", va a = A() [case testGlobalDefinedInBlockWithType] class A: pass -while A: - a = None # type: A +while 1: + a: A if int(): a = A() a = object() # E: Incompatible types in assignment (expression has type "object", variable has type "A") @@ -385,7 +385,6 @@ y = x # E: Incompatible types in assignment (expression has type "Dict[str, int] [builtins fixtures/dict.pyi] [case testDistinctTypes] -# flags: --strict-optional import b [file a.py] diff --git a/test-data/unit/check-bound.test b/test-data/unit/check-bound.test index eb97bde32e1f..1c713fd77c38 100644 --- a/test-data/unit/check-bound.test +++ b/test-data/unit/check-bound.test @@ -37,15 +37,16 @@ T = TypeVar('T', bound=A) class G(Generic[T]): def __init__(self, x: T) -> None: pass -v = None # type: G[A] -w = None # type: G[B] -x = None # type: G[str] # E: Type argument "str" of "G" must be a subtype of "A" +v: G[A] +w: G[B] +x: G[str] # E: Type argument "str" of "G" must be a subtype of "A" y = G('a') # E: Value of type variable "T" of "G" cannot be "str" z = G(A()) z = G(B()) [case testBoundVoid] +# flags: --no-strict-optional from typing import TypeVar, Generic T = TypeVar('T', bound=int) class C(Generic[T]): @@ -70,10 +71,11 @@ def g(): pass f(g()) C(g()) -z = None # type: C +z: C [case testBoundHigherOrderWithVoid] +# flags: --no-strict-optional from typing import TypeVar, Callable class A: pass T = TypeVar('T', bound=A) diff --git a/test-data/unit/check-callable.test b/test-data/unit/check-callable.test index 7d25eb271f53..07c42de74bb3 100644 --- a/test-data/unit/check-callable.test +++ b/test-data/unit/check-callable.test @@ -587,3 +587,14 @@ class C(B): def f(self, x: int) -> C: ... class B: ... [builtins fixtures/classmethod.pyi] + +[case testCallableUnionCallback] +from typing import Union, Callable, TypeVar + +TA = TypeVar("TA", bound="A") +class A: + def __call__(self: TA, other: Union[Callable, TA]) -> TA: ... +a: A +a() # E: Missing positional argument "other" in call to "__call__" of "A" +a(a) +a(lambda: None) diff --git a/test-data/unit/check-class-namedtuple.test b/test-data/unit/check-class-namedtuple.test index 8ae7f6555f9d..a095f212b900 100644 --- a/test-data/unit/check-class-namedtuple.test +++ b/test-data/unit/check-class-namedtuple.test @@ -1,13 +1,4 @@ -[case testNewNamedTupleOldPythonVersion] -# flags: --python-version 3.5 -from typing import NamedTuple - -class E(NamedTuple): # E: NamedTuple class syntax is only supported in Python 3.6 - pass -[builtins fixtures/tuple.pyi] - [case testNewNamedTupleNoUnderscoreFields] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -17,7 +8,6 @@ class X(NamedTuple): [builtins fixtures/tuple.pyi] [case testNewNamedTupleAccessingAttributes] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -31,7 +21,6 @@ x.z # E: "X" has no attribute "z" [builtins fixtures/tuple.pyi] [case testNewNamedTupleAttributesAreReadOnly] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -47,7 +36,6 @@ a.x = 5 # E: Property "x" defined in "X" is read-only [builtins fixtures/tuple.pyi] [case testNewNamedTupleCreateWithPositionalArguments] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -62,7 +50,6 @@ x = X(1, '2', 3) # E: Too many arguments for "X" [builtins fixtures/tuple.pyi] [case testNewNamedTupleShouldBeSingleBase] -# flags: --python-version 3.6 from typing import NamedTuple class A: ... @@ -71,7 +58,6 @@ class X(NamedTuple, A): # E: NamedTuple should be a single base [builtins fixtures/tuple.pyi] [case testCreateNewNamedTupleWithKeywordArguments] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -85,7 +71,6 @@ x = X(y='x') # E: Missing positional argument "x" in call to "X" [builtins fixtures/tuple.pyi] [case testNewNamedTupleCreateAndUseAsTuple] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -98,7 +83,6 @@ a, b, c = x # E: Need more than 2 values to unpack (3 expected) [builtins fixtures/tuple.pyi] [case testNewNamedTupleWithItemTypes] -# flags: --python-version 3.6 from typing import NamedTuple class N(NamedTuple): @@ -116,7 +100,6 @@ if int(): [builtins fixtures/tuple.pyi] [case testNewNamedTupleConstructorArgumentTypes] -# flags: --python-version 3.6 from typing import NamedTuple class N(NamedTuple): @@ -130,7 +113,6 @@ N(b='x', a=1) [builtins fixtures/tuple.pyi] [case testNewNamedTupleAsBaseClass] -# flags: --python-version 3.6 from typing import NamedTuple class N(NamedTuple): @@ -151,7 +133,6 @@ if int(): [builtins fixtures/tuple.pyi] [case testNewNamedTupleSelfTypeWithNamedTupleAsBase] -# flags: --python-version 3.6 from typing import NamedTuple class A(NamedTuple): @@ -172,7 +153,6 @@ class B(A): [out] [case testNewNamedTupleTypeReferenceToClassDerivedFrom] -# flags: --python-version 3.6 from typing import NamedTuple class A(NamedTuple): @@ -194,7 +174,6 @@ class B(A): [builtins fixtures/tuple.pyi] [case testNewNamedTupleSubtyping] -# flags: --python-version 3.6 from typing import NamedTuple, Tuple class A(NamedTuple): @@ -222,7 +201,6 @@ if int(): [builtins fixtures/tuple.pyi] [case testNewNamedTupleSimpleTypeInference] -# flags: --python-version 3.6 from typing import NamedTuple, Tuple class A(NamedTuple): @@ -239,7 +217,6 @@ a = (1,) # E: Incompatible types in assignment (expression has type "Tuple[int] [builtins fixtures/list.pyi] [case testNewNamedTupleMissingClassAttribute] -# flags: --python-version 3.6 from typing import NamedTuple class MyNamedTuple(NamedTuple): @@ -250,7 +227,6 @@ MyNamedTuple.x # E: "Type[MyNamedTuple]" has no attribute "x" [builtins fixtures/tuple.pyi] [case testNewNamedTupleEmptyItems] -# flags: --python-version 3.6 from typing import NamedTuple class A(NamedTuple): @@ -258,7 +234,6 @@ class A(NamedTuple): [builtins fixtures/tuple.pyi] [case testNewNamedTupleForwardRef] -# flags: --python-version 3.6 from typing import NamedTuple class A(NamedTuple): @@ -271,7 +246,6 @@ a = A(1) # E: Argument 1 to "A" has incompatible type "int"; expected "B" [builtins fixtures/tuple.pyi] [case testNewNamedTupleProperty36] -# flags: --python-version 3.6 from typing import NamedTuple class A(NamedTuple): @@ -288,7 +262,6 @@ C(2).b [builtins fixtures/property.pyi] [case testNewNamedTupleAsDict] -# flags: --python-version 3.6 from typing import NamedTuple, Any class X(NamedTuple): @@ -301,7 +274,6 @@ reveal_type(x._asdict()) # N: Revealed type is "builtins.dict[builtins.str, Any [builtins fixtures/dict.pyi] [case testNewNamedTupleReplaceTyped] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -315,7 +287,6 @@ x._replace(y=5) # E: Argument "y" to "_replace" of "X" has incompatible type "i [builtins fixtures/tuple.pyi] [case testNewNamedTupleFields] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -325,12 +296,14 @@ class X(NamedTuple): reveal_type(X._fields) # N: Revealed type is "Tuple[builtins.str, builtins.str]" reveal_type(X._field_types) # N: Revealed type is "builtins.dict[builtins.str, Any]" reveal_type(X._field_defaults) # N: Revealed type is "builtins.dict[builtins.str, Any]" -reveal_type(X.__annotations__) # N: Revealed type is "builtins.dict[builtins.str, Any]" + +# In typeshed's stub for builtins.pyi, __annotations__ is `dict[str, Any]`, +# but it's inferred as `Mapping[str, object]` here due to the fixture we're using +reveal_type(X.__annotations__) # N: Revealed type is "typing.Mapping[builtins.str, builtins.object]" [builtins fixtures/dict.pyi] [case testNewNamedTupleUnit] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -342,7 +315,6 @@ x._fields[0] # E: Tuple index out of range [builtins fixtures/tuple.pyi] [case testNewNamedTupleJoinNamedTuple] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -357,7 +329,6 @@ reveal_type([X(3, 'b'), Y(1, 'a')]) # N: Revealed type is "builtins.list[Tuple[ [builtins fixtures/list.pyi] [case testNewNamedTupleJoinTuple] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -370,7 +341,6 @@ reveal_type([X(1, 'a'), (3, 'b')]) # N: Revealed type is "builtins.list[Tuple[b [builtins fixtures/list.pyi] [case testNewNamedTupleWithTooManyArguments] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -380,25 +350,17 @@ class X(NamedTuple): [builtins fixtures/tuple.pyi] [case testNewNamedTupleWithInvalidItems2] -# flags: --python-version 3.6 import typing class X(typing.NamedTuple): x: int - y = 1 - x.x: int + y = 1 # E: Invalid statement in NamedTuple definition; expected "field_name: field_type [= default]" + x.x: int # E: Invalid statement in NamedTuple definition; expected "field_name: field_type [= default]" z: str = 'z' - aa: int - -[out] -main:6: error: Invalid statement in NamedTuple definition; expected "field_name: field_type [= default]" -main:7: error: Invalid statement in NamedTuple definition; expected "field_name: field_type [= default]" -main:9: error: Non-default NamedTuple fields cannot follow default fields - + aa: int # E: Non-default NamedTuple fields cannot follow default fields [builtins fixtures/list.pyi] [case testNewNamedTupleWithoutTypesSpecified] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -407,7 +369,6 @@ class X(NamedTuple): [builtins fixtures/tuple.pyi] [case testTypeUsingTypeCNamedTuple] -# flags: --python-version 3.6 from typing import NamedTuple, Type class N(NamedTuple): @@ -415,13 +376,10 @@ class N(NamedTuple): y: str def f(a: Type[N]): - a() + a() # E: Missing positional arguments "x", "y" in call to "N" [builtins fixtures/list.pyi] -[out] -main:9: error: Missing positional arguments "x", "y" in call to "N" [case testNewNamedTupleWithDefaults] -# flags: --python-version 3.6 from typing import List, NamedTuple, Optional class X(NamedTuple): @@ -461,7 +419,6 @@ UserDefined(1) # E: Argument 1 to "UserDefined" has incompatible type "int"; ex [builtins fixtures/list.pyi] [case testNewNamedTupleWithDefaultsStrictOptional] -# flags: --strict-optional --python-version 3.6 from typing import List, NamedTuple, Optional class HasNone(NamedTuple): @@ -480,7 +437,6 @@ class CannotBeNone(NamedTuple): [builtins fixtures/list.pyi] [case testNewNamedTupleWrongType] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -489,7 +445,6 @@ class X(NamedTuple): [builtins fixtures/tuple.pyi] [case testNewNamedTupleErrorInDefault] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): @@ -497,7 +452,6 @@ class X(NamedTuple): [builtins fixtures/tuple.pyi] [case testNewNamedTupleInheritance] -# flags: --python-version 3.6 from typing import NamedTuple class X(NamedTuple): diff --git a/test-data/unit/check-classes.test b/test-data/unit/check-classes.test index c2eddbc597a0..04b51bb603c5 100644 --- a/test-data/unit/check-classes.test +++ b/test-data/unit/check-classes.test @@ -8,8 +8,8 @@ class A: class B: def bar(self, x: 'B', y: A) -> None: pass -a = None # type: A -b = None # type: B +a: A +b: B a.foo(B()) # E: Argument 1 to "foo" of "A" has incompatible type "B"; expected "A" a.bar(B(), A()) # E: "A" has no attribute "bar" @@ -23,7 +23,7 @@ class A: def bar(self, x: 'B') -> None: pass class B(A): pass -a = None # type: A +a: A a.foo(A()) a.foo(B()) a.bar(A()) # E: Argument 1 to "bar" of "A" has incompatible type "A"; expected "B" @@ -34,7 +34,7 @@ class A: def foo(self, x: 'B') -> None: pass class B(A): pass -a = None # type: B +a: B a.foo(A()) # Fail a.foo(B()) @@ -46,7 +46,7 @@ main:6: error: Argument 1 to "foo" of "A" has incompatible type "A"; expected "B class A: def foo(self, x: 'A') -> None: pass -a = None # type: A +a: A a.foo() # Fail a.foo(object(), A()) # Fail [out] @@ -103,7 +103,8 @@ main:5: error: "A" has no attribute "g" import typing class A: def f(self): pass -A().f = None # E: Cannot assign to a method +A().f = None # E: Cannot assign to a method \ + # E: Incompatible types in assignment (expression has type "None", variable has type "Callable[[], Any]") [case testOverrideAttributeWithMethod] @@ -202,8 +203,8 @@ class A: self.a = aa self.b = bb class B: pass -a = None # type: A -b = None # type: B +a: A +b: B a.a = b # Fail a.b = a # Fail b.a # Fail @@ -217,8 +218,8 @@ main:11: error: "B" has no attribute "a" [case testExplicitAttributeInBody] class A: - x = None # type: A -a = None # type: A + x: A +a: A a.x = object() # E: Incompatible types in assignment (expression has type "object", variable has type "A") a.x = A() @@ -458,9 +459,10 @@ class A: def g(self) -> 'A': pass class B(A): def f(self) -> A: pass # Fail - def g(self) -> None: pass + def g(self) -> None: pass # Fail [out] main:6: error: Return type "A" of "f" incompatible with return type "None" in supertype "A" +main:7: error: Return type "None" of "g" incompatible with return type "A" in supertype "A" [case testOverride__new__WithDifferentSignature] class A: @@ -907,7 +909,7 @@ b = __init__() # type: B # E: Incompatible types in assignment (expression has t from typing import Any, cast class A: def __init__(self, a: 'A') -> None: pass -a = None # type: A +a: A a.__init__(a) # E: Accessing "__init__" on an instance is unsound, since instance.__init__ could be from an incompatible subclass (cast(Any, a)).__init__(a) @@ -933,7 +935,6 @@ if int(): b = D2() [case testConstructorJoinsWithCustomMetaclass] -# flags: --strict-optional from typing import TypeVar import abc @@ -1017,13 +1018,14 @@ class A: A.f(A()) A.f(object()) # E: Argument 1 to "f" of "A" has incompatible type "object"; expected "A" A.f() # E: Missing positional argument "self" in call to "f" of "A" -A.f(None, None) # E: Too many arguments for "f" of "A" +A.f(None, None) # E: Too many arguments for "f" of "A" \ + # E: Argument 1 to "f" of "A" has incompatible type "None"; expected "A" [case testAccessAttributeViaClass] import typing class B: pass class A: - x = None # type: A + x: A a = A.x # type: A b = A.x # type: B # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -1060,7 +1062,7 @@ A.f() # E: Missing positional argument "self" in call to "f" of "A" import typing class B: pass class A: - x = None # type: B + x: B A.x = B() A.x = object() # E: Incompatible types in assignment (expression has type "object", variable has type "B") @@ -1077,8 +1079,8 @@ A.x = A() # E: Incompatible types in assignment (expression has type "A", vari class B: pass class A: def __init__(self, b: B) -> None: pass -a = None # type: A -b = None # type: B +a: A +b: B A.__init__(a, b) A.__init__(b, b) # E: Argument 1 to "__init__" of "A" has incompatible type "B"; expected "A" A.__init__(a, a) # E: Argument 2 to "__init__" of "A" has incompatible type "A"; expected "B" @@ -1087,13 +1089,15 @@ A.__init__(a, a) # E: Argument 2 to "__init__" of "A" has incompatible type "A"; import typing class A: def f(self): pass -A.f = None # E: Cannot assign to a method +A.f = None # E: Cannot assign to a method \ + # E: Incompatible types in assignment (expression has type "None", variable has type "Callable[[A], Any]") [case testAssignToNestedClassViaClass] import typing class A: class B: pass -A.B = None # E: Cannot assign to a type +A.B = None # E: Cannot assign to a type \ + # E: Incompatible types in assignment (expression has type "None", variable has type "Type[B]") [targets __main__] [case testAccessingClassAttributeWithTypeInferenceIssue] @@ -1139,7 +1143,7 @@ A[int, int].x # E: Access to generic instance variables via class is ambiguous def f() -> None: class A: def g(self) -> None: pass - a = None # type: A + a: A a.g() a.g(a) # E: Too many arguments for "g" of "A" [targets __main__, __main__.f] @@ -1158,7 +1162,7 @@ def test() -> None: reveal_type(x) # N: Revealed type is "T`-1" reveal_type(x.returns_int()) # N: Revealed type is "builtins.int" return foo - reveal_type(Foo.bar) # N: Revealed type is "def [T <: __main__.Foo@5] (self: __main__.Foo@5, foo: T`-1) -> T`-1" + reveal_type(Foo.bar) # N: Revealed type is "def [T <: __main__.Foo@5] (self: __main__.Foo@5, foo: T`1) -> T`1" [case testGenericClassWithInvalidTypevarUseWithinFunction] from typing import TypeVar @@ -1200,7 +1204,7 @@ class A: def f() -> None: class A: pass - a = None # type: A + a: A if int(): a = A() a = object() # E: Incompatible types in assignment (expression has type "object", variable has type "A") @@ -1209,7 +1213,7 @@ def f() -> None: [case testExternalReferenceToClassWithinClass] class A: class B: pass -b = None # type: A.B +b: A.B if int(): b = A.B() if int(): @@ -1256,19 +1260,19 @@ reveal_type(Foo().Meta.name) # N: Revealed type is "builtins.str" class A: def __init__(self): - self.x = None # type: int # N: By default the bodies of untyped functions are not checked, consider using --check-untyped-defs -a = None # type: A + self.x: int # N: By default the bodies of untyped functions are not checked, consider using --check-untyped-defs +a: A a.x = 1 a.x = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") [case testAccessAttributeDeclaredInInitBeforeDeclaration] -a = None # type: A +a: A a.x = 1 a.x = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") class A: def __init__(self): - self.x = None # type: int # N: By default the bodies of untyped functions are not checked, consider using --check-untyped-defs + self.x: int # N: By default the bodies of untyped functions are not checked, consider using --check-untyped-defs -- Special cases @@ -1624,7 +1628,6 @@ a = A() reveal_type(a.f) # N: Revealed type is "__main__.D" [case testAccessingDescriptorFromClass] -# flags: --strict-optional from d import D, Base class A(Base): f = D() @@ -1642,7 +1645,6 @@ class D: [builtins fixtures/bool.pyi] [case testAccessingDescriptorFromClassWrongBase] -# flags: --strict-optional from d import D, Base class A: f = D() @@ -1659,13 +1661,13 @@ class D: def __get__(self, inst: Base, own: Type[Base]) -> str: pass [builtins fixtures/bool.pyi] [out] -main:5: error: Argument 2 to "__get__" of "D" has incompatible type "Type[A]"; expected "Type[Base]" -main:5: note: Revealed type is "d.D" -main:6: error: No overload variant of "__get__" of "D" matches argument types "A", "Type[A]" -main:6: note: Possible overload variants: -main:6: note: def __get__(self, inst: None, own: Type[Base]) -> D -main:6: note: def __get__(self, inst: Base, own: Type[Base]) -> str -main:6: note: Revealed type is "Any" +main:4: error: Argument 2 to "__get__" of "D" has incompatible type "Type[A]"; expected "Type[Base]" +main:4: note: Revealed type is "d.D" +main:5: error: No overload variant of "__get__" of "D" matches argument types "A", "Type[A]" +main:5: note: Possible overload variants: +main:5: note: def __get__(self, inst: None, own: Type[Base]) -> D +main:5: note: def __get__(self, inst: Base, own: Type[Base]) -> str +main:5: note: Revealed type is "Any" [case testAccessingGenericNonDataDescriptor] from typing import TypeVar, Type, Generic, Any @@ -1697,7 +1699,6 @@ a.g = '' a.g = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "str") [case testAccessingGenericDescriptorFromClass] -# flags: --strict-optional from d import D class A: f = D(10) # type: D[A, int] @@ -1719,7 +1720,6 @@ class D(Generic[T, V]): [builtins fixtures/bool.pyi] [case testAccessingGenericDescriptorFromInferredClass] -# flags: --strict-optional from typing import Type from d import D class A: @@ -1740,11 +1740,10 @@ class D(Generic[T, V]): def __get__(self, inst: T, own: Type[T]) -> V: pass [builtins fixtures/bool.pyi] [out] -main:8: note: Revealed type is "d.D[__main__.A, builtins.int]" -main:9: note: Revealed type is "d.D[__main__.A, builtins.str]" +main:7: note: Revealed type is "d.D[__main__.A, builtins.int]" +main:8: note: Revealed type is "d.D[__main__.A, builtins.str]" [case testAccessingGenericDescriptorFromClassBadOverload] -# flags: --strict-optional from d import D class A: f = D(10) # type: D[A, int] @@ -1761,11 +1760,11 @@ class D(Generic[T, V]): def __get__(self, inst: T, own: Type[T]) -> V: pass [builtins fixtures/bool.pyi] [out] -main:5: error: No overload variant of "__get__" of "D" matches argument types "None", "Type[A]" -main:5: note: Possible overload variants: -main:5: note: def __get__(self, inst: None, own: None) -> D[A, int] -main:5: note: def __get__(self, inst: A, own: Type[A]) -> int -main:5: note: Revealed type is "Any" +main:4: error: No overload variant of "__get__" of "D" matches argument types "None", "Type[A]" +main:4: note: Possible overload variants: +main:4: note: def __get__(self, inst: None, own: None) -> D[A, int] +main:4: note: def __get__(self, inst: A, own: Type[A]) -> int +main:4: note: Revealed type is "Any" [case testAccessingNonDataDescriptorSubclass] from typing import Any @@ -1960,8 +1959,8 @@ from typing import _promote class A: pass @_promote(A) class B: pass -a = None # type: A -b = None # type: B +a: A +b: B if int(): b = a # E: Incompatible types in assignment (expression has type "A", variable has type "B") a = b @@ -1974,8 +1973,8 @@ class A: pass class B: pass @_promote(B) class C: pass -a = None # type: A -c = None # type: C +a: A +c: C if int(): c = a # E: Incompatible types in assignment (expression has type "A", variable has type "C") a = c @@ -2019,7 +2018,7 @@ tmp/foo.pyi:5: note: @overload tmp/foo.pyi:5: note: def __add__(self, int, /) -> int tmp/foo.pyi:5: note: @overload tmp/foo.pyi:5: note: def __add__(self, str, /) -> str -tmp/foo.pyi:5: note: Overloaded operator methods cannot have wider argument types in overrides +tmp/foo.pyi:5: note: Overloaded operator methods can't have wider argument types in overrides [case testOperatorMethodOverrideWideningArgumentType] import typing @@ -2138,7 +2137,7 @@ tmp/foo.pyi:8: note: @overload tmp/foo.pyi:8: note: def __add__(self, str, /) -> A tmp/foo.pyi:8: note: @overload tmp/foo.pyi:8: note: def __add__(self, type, /) -> A -tmp/foo.pyi:8: note: Overloaded operator methods cannot have wider argument types in overrides +tmp/foo.pyi:8: note: Overloaded operator methods can't have wider argument types in overrides [case testOverloadedOperatorMethodOverrideWithSwitchedItemOrder] from foo import * @@ -2734,8 +2733,8 @@ main:8: note: This violates the Liskov substitution principle main:8: note: See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides [case testGetattribute] - -a, b = None, None # type: A, B +a: A +b: B class A: def __getattribute__(self, x: str) -> A: return A() @@ -2790,8 +2789,8 @@ main:4: error: Invalid signature "Callable[[B, A], B]" for "__getattribute__" main:6: error: Invalid signature "Callable[[C, str, str], C]" for "__getattribute__" [case testGetattr] - -a, b = None, None # type: A, B +a: A +b: B class A: def __getattr__(self, x: str) -> A: return A() @@ -3072,8 +3071,9 @@ C(bar='') # E: Unexpected keyword argument "bar" for "C" [builtins fixtures/__new__.pyi] [case testClassWith__new__AndCompatibilityWithType] +from typing import Optional class C: - def __new__(cls, foo: int = None) -> 'C': + def __new__(cls, foo: Optional[int] = None) -> 'C': obj = object.__new__(cls) return obj def f(x: type) -> None: pass @@ -3596,15 +3596,15 @@ main:7: note: Revealed type is "builtins.list[Type[__main__.B]]" [case testTypeEquivalentTypeAny] from typing import Type, Any -a = None # type: Type[Any] +a: Type[Any] b = a # type: type -x = None # type: type +x: type y = x # type: Type[Any] class C: ... -p = None # type: type +p: type q = p # type: Type[C] [builtins fixtures/list.pyi] @@ -3614,9 +3614,9 @@ q = p # type: Type[C] from typing import Type, Any, TypeVar, Generic class C: ... -x = None # type: type -y = None # type: Type[Any] -z = None # type: Type[C] +x: type +y: Type[Any] +z: Type[C] lst = [x, y, z] reveal_type(lst) # N: Revealed type is "builtins.list[builtins.type]" @@ -4540,6 +4540,39 @@ def f(TA: Type[A]): reveal_type(TA) # N: Revealed type is "Type[__main__.A]" reveal_type(TA.x) # N: Revealed type is "builtins.int" +[case testMetaclassConflictingInstanceVars] +from typing import ClassVar + +class Meta(type): + foo: int + bar: int + eggs: ClassVar[int] = 42 + spam: ClassVar[int] = 42 + +class Foo(metaclass=Meta): + foo: str + bar: ClassVar[str] = 'bar' + eggs: str + spam: ClassVar[str] = 'spam' + +reveal_type(Foo.foo) # N: Revealed type is "builtins.int" +reveal_type(Foo.bar) # N: Revealed type is "builtins.str" +reveal_type(Foo.eggs) # N: Revealed type is "builtins.int" +reveal_type(Foo.spam) # N: Revealed type is "builtins.str" + +class MetaSub(Meta): ... + +class Bar(metaclass=MetaSub): + foo: str + bar: ClassVar[str] = 'bar' + eggs: str + spam: ClassVar[str] = 'spam' + +reveal_type(Bar.foo) # N: Revealed type is "builtins.int" +reveal_type(Bar.bar) # N: Revealed type is "builtins.str" +reveal_type(Bar.eggs) # N: Revealed type is "builtins.int" +reveal_type(Bar.spam) # N: Revealed type is "builtins.str" + [case testSubclassMetaclass] class M1(type): x = 0 @@ -4961,7 +4994,7 @@ a[0]() # E: "int" not callable [file m.py] from typing import Tuple -a = None # type: A +a: A class A(Tuple[int, str]): pass [builtins fixtures/tuple.pyi] @@ -5069,6 +5102,7 @@ Foos = NewType('Foos', List[Foo]) # type: ignore def frob(foos: Dict[Any, Foos]) -> None: foo = foos.get(1) + assert foo dict(foo) [builtins fixtures/dict.pyi] [out] @@ -5083,6 +5117,7 @@ x: C class C: def frob(self, foos: Dict[Any, Foos]) -> None: foo = foos.get(1) + assert foo dict(foo) reveal_type(x.frob) # N: Revealed type is "def (foos: builtins.dict[Any, __main__.Foos])" @@ -6443,7 +6478,6 @@ def deco(f: Callable[..., T]) -> Callable[..., Tuple[T, int]]: ... [out] [case testOptionalDescriptorsBinder] -# flags: --strict-optional from typing import Type, TypeVar, Optional T = TypeVar('T') @@ -6657,7 +6691,6 @@ class C(Generic[T]): [builtins fixtures/isinstancelist.pyi] [case testIsInstanceTypeSubclass] -# flags: --strict-optional from typing import Type, Optional class Base: ... class One(Base): @@ -7684,10 +7717,14 @@ class D: def __new__(cls) -> NoReturn: ... def __init__(self) -> NoReturn: ... -reveal_type(A()) # N: Revealed type is "" -reveal_type(B()) # N: Revealed type is "" -reveal_type(C()) # N: Revealed type is "" -reveal_type(D()) # N: Revealed type is "" +if object(): + reveal_type(A()) # N: Revealed type is "" +if object(): + reveal_type(B()) # N: Revealed type is "" +if object(): + reveal_type(C()) # N: Revealed type is "" +if object(): + reveal_type(D()) # N: Revealed type is "" [case testOverloadedNewAndInitNoReturn] from typing import NoReturn, overload @@ -7726,13 +7763,20 @@ class D: def __init__(self, a: int) -> None: ... def __init__(self, a: int = ...) -> None: ... -reveal_type(A()) # N: Revealed type is "" +if object(): + reveal_type(A()) # N: Revealed type is "" reveal_type(A(1)) # N: Revealed type is "__main__.A" -reveal_type(B()) # N: Revealed type is "" + +if object(): + reveal_type(B()) # N: Revealed type is "" reveal_type(B(1)) # N: Revealed type is "__main__.B" -reveal_type(C()) # N: Revealed type is "" + +if object(): + reveal_type(C()) # N: Revealed type is "" reveal_type(C(1)) # N: Revealed type is "__main__.C" -reveal_type(D()) # N: Revealed type is "" + +if object(): + reveal_type(D()) # N: Revealed type is "" reveal_type(D(1)) # N: Revealed type is "__main__.D" [case testClassScopeImportWithWrapperAndError] diff --git a/test-data/unit/check-columns.test b/test-data/unit/check-columns.test index 7d1114ceab7a..44524b9df943 100644 --- a/test-data/unit/check-columns.test +++ b/test-data/unit/check-columns.test @@ -27,7 +27,6 @@ A().f(1, 1) # E:10: Argument 2 to "f" of "A" has incompatible type "int"; expect (A().f(1, 'hello', 'hi')) # E:2: Too many arguments for "f" of "A" [case testColumnsInvalidArgumentType] -# flags: --strict-optional def f(x: int, y: str) -> None: ... def g(*x: int) -> None: pass def h(**x: int) -> None: pass @@ -386,10 +385,6 @@ f(g( )) x[0] [out] -main:2:10:2:10: error: Incompatible types in assignment (expression has type "str", variable has type "int") -main:6:3:6:3: error: Argument 1 to "f" has incompatible type "int"; expected "str" -main:8:1:8:1: error: Value of type "int" is not indexable -[out version>=3.8] main:2:10:2:17: error: Incompatible types in assignment (expression has type "str", variable has type "int") main:6:3:7:1: error: Argument 1 to "f" has incompatible type "int"; expected "str" main:8:1:8:4: error: Value of type "int" is not indexable diff --git a/test-data/unit/check-custom-plugin.test b/test-data/unit/check-custom-plugin.test index c81de675d808..9a0668f98c21 100644 --- a/test-data/unit/check-custom-plugin.test +++ b/test-data/unit/check-custom-plugin.test @@ -802,7 +802,7 @@ else: plugins=/test-data/unit/plugins/union_method.py [case testGetMethodHooksOnUnionsStrictOptional] -# flags: --config-file tmp/mypy.ini --strict-optional +# flags: --config-file tmp/mypy.ini from typing import Union class Foo: @@ -887,7 +887,10 @@ plugins=/test-data/unit/plugins/descriptor.py # flags: --config-file tmp/mypy.ini def dynamic_signature(arg1: str) -> str: ... -reveal_type(dynamic_signature(1)) # N: Revealed type is "builtins.int" +a: int = 1 +reveal_type(dynamic_signature(a)) # N: Revealed type is "builtins.int" +b: bytes = b'foo' +reveal_type(dynamic_signature(b)) # N: Revealed type is "builtins.bytes" [file mypy.ini] \[mypy] plugins=/test-data/unit/plugins/function_sig_hook.py diff --git a/test-data/unit/check-dataclass-transform.test b/test-data/unit/check-dataclass-transform.test index be6b46d70846..9029582ece82 100644 --- a/test-data/unit/check-dataclass-transform.test +++ b/test-data/unit/check-dataclass-transform.test @@ -840,6 +840,24 @@ reveal_type(bar.base) # N: Revealed type is "builtins.int" [typing fixtures/typing-full.pyi] [builtins fixtures/dataclasses.pyi] +[case testDataclassTransformReplace] +from dataclasses import replace +from typing import dataclass_transform, Type + +@dataclass_transform() +def my_dataclass(cls: Type) -> Type: + return cls + +@my_dataclass +class Person: + name: str + +p = Person('John') +y = replace(p, name='Bob') # E: Argument 1 to "replace" has incompatible type "Person"; expected a dataclass + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + [case testDataclassTransformSimpleDescriptor] # flags: --python-version 3.11 @@ -1001,18 +1019,19 @@ class Desc: def __get__(self, instance: object, owner: Any) -> str: ... def __get__(self, instance, owner): ... - def __set__(self, instance: Any, value: bytes) -> None: ... + def __set__(self, instance: Any, value: bytes | None) -> None: ... @my_dataclass class C: x: Desc c = C(x=b'x') -C(x=1) # E: Argument "x" to "C" has incompatible type "int"; expected "bytes" +c = C(x=None) +C(x=1) # E: Argument "x" to "C" has incompatible type "int"; expected "Optional[bytes]" reveal_type(c.x) # N: Revealed type is "builtins.str" reveal_type(C.x) # N: Revealed type is "builtins.int" c.x = b'x' -c.x = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "bytes") +c.x = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "Optional[bytes]") [typing fixtures/typing-full.pyi] [builtins fixtures/dataclasses.pyi] @@ -1051,5 +1070,6 @@ class Desc2: class C: x: Desc # E: Unsupported signature for "__set__" in "Desc" y: Desc2 # E: Unsupported "__set__" in "Desc2" + [typing fixtures/typing-full.pyi] [builtins fixtures/dataclasses.pyi] diff --git a/test-data/unit/check-dataclasses.test b/test-data/unit/check-dataclasses.test index 914e1c2e0602..7881dfbcf1bb 100644 --- a/test-data/unit/check-dataclasses.test +++ b/test-data/unit/check-dataclasses.test @@ -744,6 +744,17 @@ s: str = a.bar() # E: Incompatible types in assignment (expression has type "in [builtins fixtures/dataclasses.pyi] +[case testDataclassGenericCovariant] +from dataclasses import dataclass +from typing import Generic, TypeVar + +T_co = TypeVar("T_co", covariant=True) + +@dataclass +class MyDataclass(Generic[T_co]): + a: T_co + +[builtins fixtures/dataclasses.pyi] [case testDataclassUntypedGenericInheritance] # flags: --python-version 3.7 @@ -1116,7 +1127,6 @@ class Foo: [case testNoComplainFieldNoneStrict] # flags: --python-version 3.7 -# flags: --strict-optional from dataclasses import dataclass, field from typing import Optional @@ -1253,7 +1263,7 @@ class Deferred: pass [builtins fixtures/dataclasses.pyi] [case testDeferredDataclassInitSignatureSubclass] -# flags: --strict-optional --python-version 3.7 +# flags: --python-version 3.7 from dataclasses import dataclass from typing import Optional @@ -1547,6 +1557,35 @@ class Other: [builtins fixtures/dataclasses.pyi] +[case testDataclassWithSlotsRuntimeAttr] +# flags: --python-version 3.10 +from dataclasses import dataclass + +@dataclass(slots=True) +class Some: + x: int + y: str + z: bool + +reveal_type(Some.__slots__) # N: Revealed type is "Tuple[builtins.str, builtins.str, builtins.str]" + +@dataclass(slots=True) +class Other: + x: int + y: str + +reveal_type(Other.__slots__) # N: Revealed type is "Tuple[builtins.str, builtins.str]" + + +@dataclass +class NoSlots: + x: int + y: str + +NoSlots.__slots__ # E: "Type[NoSlots]" has no attribute "__slots__" +[builtins fixtures/dataclasses.pyi] + + [case testSlotsDefinitionWithTwoPasses1] # flags: --python-version 3.10 # https://github.com/python/mypy/issues/11821 @@ -1705,7 +1744,7 @@ reveal_type(Child2[int, A]([A()], [1]).b) # N: Revealed type is "builtins.list[ [builtins fixtures/dataclasses.pyi] [case testDataclassInheritOptionalType] -# flags: --python-version 3.7 --strict-optional +# flags: --python-version 3.7 from dataclasses import dataclass from typing import Any, Callable, Generic, TypeVar, List, Optional @@ -1853,6 +1892,11 @@ class Two: bar: int t: Two reveal_type(t.__match_args__) # N: Revealed type is "Tuple[Literal['bar']]" +@dataclass +class Empty: + ... +e: Empty +reveal_type(e.__match_args__) # N: Revealed type is "Tuple[()]" [builtins fixtures/dataclasses.pyi] [case testDataclassWithoutMatchArgs] @@ -1939,7 +1983,6 @@ B = List[C] [builtins fixtures/dataclasses.pyi] [case testDataclassSelfType] -# flags: --strict-optional from dataclasses import dataclass from typing import Self, TypeVar, Generic, Optional @@ -2006,7 +2049,6 @@ e: Element[Bar] reveal_type(e.elements) # N: Revealed type is "typing.Sequence[__main__.Element[__main__.Bar]]" [builtins fixtures/dataclasses.pyi] - [case testIfConditionsInDefinition] # flags: --python-version 3.11 --always-true TRUTH from dataclasses import dataclass @@ -2040,7 +2082,372 @@ Foo( present_4=4, present_5=5, ) + +[builtins fixtures/dataclasses.pyi] + +[case testReplace] +from dataclasses import dataclass, replace, InitVar +from typing import ClassVar + +@dataclass +class A: + x: int + q: InitVar[int] + q2: InitVar[int] = 0 + c: ClassVar[int] + + +a = A(x=42, q=7) +a2 = replace(a) # E: Missing named argument "q" for "replace" of "A" +a2 = replace(a, q=42) +a2 = replace(a, x=42, q=42) +a2 = replace(a, x=42, q=42, c=7) # E: Unexpected keyword argument "c" for "replace" of "A" +a2 = replace(a, x='42', q=42) # E: Argument "x" to "replace" of "A" has incompatible type "str"; expected "int" +a2 = replace(a, q='42') # E: Argument "q" to "replace" of "A" has incompatible type "str"; expected "int" +reveal_type(a2) # N: Revealed type is "__main__.A" + +[case testReplaceUnion] +from typing import Generic, Union, TypeVar +from dataclasses import dataclass, replace, InitVar + +T = TypeVar('T') + +@dataclass +class A(Generic[T]): + x: T # exercises meet(T=int, int) = int + y: bool # exercises meet(bool, int) = bool + z: str # exercises meet(str, bytes) = + w: dict # exercises meet(dict, ) = + init_var: InitVar[int] # exercises (non-optional, optional) = non-optional + +@dataclass +class B: + x: int + y: int + z: bytes + init_var: int + + +a_or_b: Union[A[int], B] +_ = replace(a_or_b, x=42, y=True, init_var=42) +_ = replace(a_or_b, x=42, y=True) # E: Missing named argument "init_var" for "replace" of "Union[A[int], B]" +_ = replace(a_or_b, x=42, y=True, z='42', init_var=42) # E: Argument "z" to "replace" of "Union[A[int], B]" has incompatible type "str"; expected +_ = replace(a_or_b, x=42, y=True, w={}, init_var=42) # E: Argument "w" to "replace" of "Union[A[int], B]" has incompatible type "Dict[, ]"; expected +_ = replace(a_or_b, y=42, init_var=42) # E: Argument "y" to "replace" of "Union[A[int], B]" has incompatible type "int"; expected "bool" + +[builtins fixtures/dataclasses.pyi] + +[case testReplaceUnionOfTypeVar] +from typing import Generic, Union, TypeVar +from dataclasses import dataclass, replace + +@dataclass +class A: + x: int + y: int + z: str + w: dict + +class B: + pass + +TA = TypeVar('TA', bound=A) +TB = TypeVar('TB', bound=B) + +def f(b_or_t: Union[TA, TB, int]) -> None: + a2 = replace(b_or_t) # E: Argument 1 to "replace" has type "Union[TA, TB, int]" whose item "TB" is not bound to a dataclass # E: Argument 1 to "replace" has incompatible type "Union[TA, TB, int]" whose item "int" is not a dataclass + +[case testReplaceTypeVarBoundNotDataclass] +from dataclasses import dataclass, replace +from typing import Union, TypeVar + +TInt = TypeVar('TInt', bound=int) +TAny = TypeVar('TAny') +TNone = TypeVar('TNone', bound=None) +TUnion = TypeVar('TUnion', bound=Union[str, int]) + +def f1(t: TInt) -> None: + _ = replace(t, x=42) # E: Argument 1 to "replace" has a variable type "TInt" not bound to a dataclass + +def f2(t: TAny) -> TAny: + return replace(t, x='spam') # E: Argument 1 to "replace" has a variable type "TAny" not bound to a dataclass + +def f3(t: TNone) -> TNone: + return replace(t, x='spam') # E: Argument 1 to "replace" has a variable type "TNone" not bound to a dataclass + +def f4(t: TUnion) -> TUnion: + return replace(t, x='spam') # E: Argument 1 to "replace" has incompatible type "TUnion" whose item "str" is not a dataclass # E: Argument 1 to "replace" has incompatible type "TUnion" whose item "int" is not a dataclass + +[case testReplaceTypeVarBound] +from dataclasses import dataclass, replace +from typing import TypeVar + +@dataclass +class A: + x: int + +@dataclass +class B(A): + pass + +TA = TypeVar('TA', bound=A) + +def f(t: TA) -> TA: + t2 = replace(t, x=42) + reveal_type(t2) # N: Revealed type is "TA`-1" + _ = replace(t, x='42') # E: Argument "x" to "replace" of "TA" has incompatible type "str"; expected "int" + return t2 + +f(A(x=42)) +f(B(x=42)) + +[case testReplaceAny] +from dataclasses import replace +from typing import Any + +a: Any +a2 = replace(a) +reveal_type(a2) # N: Revealed type is "Any" + +[case testReplaceNotDataclass] +from dataclasses import replace + +replace(5) # E: Argument 1 to "replace" has incompatible type "int"; expected a dataclass + +class C: + pass + +replace(C()) # E: Argument 1 to "replace" has incompatible type "C"; expected a dataclass + +replace(None) # E: Argument 1 to "replace" has incompatible type "None"; expected a dataclass + +[case testReplaceGeneric] +from dataclasses import dataclass, replace, InitVar +from typing import ClassVar, Generic, TypeVar + +T = TypeVar('T') + +@dataclass +class A(Generic[T]): + x: T + +a = A(x=42) +reveal_type(a) # N: Revealed type is "__main__.A[builtins.int]" +a2 = replace(a, x=42) +reveal_type(a2) # N: Revealed type is "__main__.A[builtins.int]" +a2 = replace(a, x='42') # E: Argument "x" to "replace" of "A[int]" has incompatible type "str"; expected "int" +reveal_type(a2) # N: Revealed type is "__main__.A[builtins.int]" + +[case testPostInitCorrectSignature] +from typing import Any, Generic, TypeVar, Callable, Self +from dataclasses import dataclass, InitVar + +@dataclass +class Test1: + x: int + def __post_init__(self) -> None: ... + +@dataclass +class Test2: + x: int + y: InitVar[int] + z: str + def __post_init__(self, y: int) -> None: ... + +@dataclass +class Test3: + x: InitVar[int] + y: InitVar[str] + def __post_init__(self, x: int, y: str) -> None: ... + +@dataclass +class Test4: + x: int + y: InitVar[str] + z: InitVar[bool] = True + def __post_init__(self, y: str, z: bool) -> None: ... + +@dataclass +class Test5: + y: InitVar[str] = 'a' + z: InitVar[bool] = True + def __post_init__(self, y: str = 'a', z: bool = True) -> None: ... + +F = TypeVar('F', bound=Callable[..., Any]) +def identity(f: F) -> F: return f + +@dataclass +class Test6: + y: InitVar[str] + @identity # decorated method works + def __post_init__(self, y: str) -> None: ... + +T = TypeVar('T') + +@dataclass +class Test7(Generic[T]): + t: InitVar[T] + def __post_init__(self, t: T) -> None: ... + +@dataclass +class Test8: + s: InitVar[Self] + def __post_init__(self, s: Self) -> None: ... +[builtins fixtures/dataclasses.pyi] + +[case testPostInitSubclassing] +from dataclasses import dataclass, InitVar + +@dataclass +class Base: + a: str + x: InitVar[int] + def __post_init__(self, x: int) -> None: ... + +@dataclass +class Child(Base): + b: str + y: InitVar[str] + def __post_init__(self, x: int, y: str) -> None: ... + +@dataclass +class GrandChild(Child): + c: int + z: InitVar[str] = "a" + def __post_init__(self, x: int, y: str, z: str) -> None: ... +[builtins fixtures/dataclasses.pyi] + +[case testPostInitNotADataclassCheck] +from dataclasses import dataclass, InitVar + +class Regular: + __post_init__ = 1 # can be whatever + +class Base: + x: InitVar[int] + def __post_init__(self) -> None: ... # can be whatever + +@dataclass +class Child(Base): + y: InitVar[str] + def __post_init__(self, y: str) -> None: ... +[builtins fixtures/dataclasses.pyi] + +[case testPostInitMissingParam] +from dataclasses import dataclass, InitVar + +@dataclass +class Child: + y: InitVar[str] + def __post_init__(self) -> None: ... +[builtins fixtures/dataclasses.pyi] +[out] +main:6: error: Signature of "__post_init__" incompatible with supertype "dataclass" +main:6: note: Superclass: +main:6: note: def __post_init__(self: Child, y: str) -> None +main:6: note: Subclass: +main:6: note: def __post_init__(self: Child) -> None + +[case testPostInitWrongTypeAndName] +from dataclasses import dataclass, InitVar + +@dataclass +class Test1: + y: InitVar[str] + def __post_init__(self, x: int) -> None: ... # E: Argument 2 of "__post_init__" is incompatible with supertype "dataclass"; supertype defines the argument type as "str" + +@dataclass +class Test2: + y: InitVar[str] = 'a' + def __post_init__(self, x: int) -> None: ... # E: Argument 2 of "__post_init__" is incompatible with supertype "dataclass"; supertype defines the argument type as "str" +[builtins fixtures/dataclasses.pyi] + +[case testPostInitExtraParam] +from dataclasses import dataclass, InitVar + +@dataclass +class Child: + y: InitVar[str] + def __post_init__(self, y: str, z: int) -> None: ... +[builtins fixtures/dataclasses.pyi] +[out] +main:6: error: Signature of "__post_init__" incompatible with supertype "dataclass" +main:6: note: Superclass: +main:6: note: def __post_init__(self: Child, y: str) -> None +main:6: note: Subclass: +main:6: note: def __post_init__(self: Child, y: str, z: int) -> None + +[case testPostInitReturnType] +from dataclasses import dataclass, InitVar + +@dataclass +class Child: + y: InitVar[str] + def __post_init__(self, y: str) -> int: ... # E: Return type "int" of "__post_init__" incompatible with return type "None" in supertype "dataclass" +[builtins fixtures/dataclasses.pyi] + +[case testPostInitDecoratedMethodError] +from dataclasses import dataclass, InitVar +from typing import Any, Callable, TypeVar + +F = TypeVar('F', bound=Callable[..., Any]) +def identity(f: F) -> F: return f + +@dataclass +class Klass: + y: InitVar[str] + @identity + def __post_init__(self) -> None: ... +[builtins fixtures/dataclasses.pyi] +[out] +main:11: error: Signature of "__post_init__" incompatible with supertype "dataclass" +main:11: note: Superclass: +main:11: note: def __post_init__(self: Klass, y: str) -> None +main:11: note: Subclass: +main:11: note: def __post_init__(self: Klass) -> None + +[case testPostInitIsNotAFunction] +from dataclasses import dataclass, InitVar + +@dataclass +class Test: + y: InitVar[str] + __post_init__ = 1 # E: "__post_init__" method must be an instance method +[builtins fixtures/dataclasses.pyi] + +[case testPostInitClassMethod] +from dataclasses import dataclass, InitVar + +@dataclass +class Test: + y: InitVar[str] + @classmethod + def __post_init__(cls) -> None: ... [builtins fixtures/dataclasses.pyi] +[out] +main:7: error: Signature of "__post_init__" incompatible with supertype "dataclass" +main:7: note: Superclass: +main:7: note: def __post_init__(self: Test, y: str) -> None +main:7: note: Subclass: +main:7: note: @classmethod +main:7: note: def __post_init__(cls: Type[Test]) -> None + +[case testPostInitStaticMethod] +from dataclasses import dataclass, InitVar + +@dataclass +class Test: + y: InitVar[str] + @staticmethod + def __post_init__() -> None: ... +[builtins fixtures/dataclasses.pyi] +[out] +main:7: error: Signature of "__post_init__" incompatible with supertype "dataclass" +main:7: note: Superclass: +main:7: note: def __post_init__(self: Test, y: str) -> None +main:7: note: Subclass: +main:7: note: @staticmethod +main:7: note: def __post_init__() -> None [case testProtocolNoCrash] from typing import Protocol, Union, ClassVar @@ -2054,3 +2461,15 @@ class Test(Protocol): def reset(self) -> None: self.x = DEFAULT [builtins fixtures/dataclasses.pyi] + +[case testProtocolNoCrashOnJoining] +from dataclasses import dataclass +from typing import Protocol + +@dataclass +class MyDataclass(Protocol): ... + +a: MyDataclass +b = [a, a] # trigger joining the types + +[builtins fixtures/dataclasses.pyi] diff --git a/test-data/unit/check-dynamic-typing.test b/test-data/unit/check-dynamic-typing.test index dd4cc1579639..0dc05a7a0ea1 100644 --- a/test-data/unit/check-dynamic-typing.test +++ b/test-data/unit/check-dynamic-typing.test @@ -4,8 +4,8 @@ [case testAssignmentWithDynamic] from typing import Any -d = None # type: Any -a = None # type: A +d: Any +a: A if int(): a = d # Everything ok @@ -20,8 +20,9 @@ class A: pass [case testMultipleAssignmentWithDynamic] from typing import Any -d = None # type: Any -a, b = None, None # type: (A, B) +d: Any +a: A +b: B if int(): d, a = b, b # E: Incompatible types in assignment (expression has type "B", variable has type "A") @@ -51,7 +52,8 @@ from typing import Any def f(x: Any) -> 'A': pass -a, b = None, None # type: (A, B) +a: A +b: B if int(): b = f(a) # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -75,7 +77,8 @@ from typing import Any def f(x: 'A') -> Any: pass -a, b = None, None # type: (A, B) +a: A +b: B a = f(b) # E: Argument 1 to "f" has incompatible type "B"; expected "A" @@ -88,10 +91,10 @@ class B: pass [case testBinaryOperationsWithDynamicLeftOperand] from typing import Any -d = None # type: Any -a = None # type: A -c = None # type: C -b = None # type: bool +d: Any +a: A +c: C +b: bool n = 0 d in a # E: Unsupported right operand type for in ("A") @@ -151,10 +154,10 @@ class dict: pass [case testBinaryOperationsWithDynamicAsRightOperand] from typing import Any -d = None # type: Any -a = None # type: A -c = None # type: C -b = None # type: bool +d: Any +a: A +c: C +b: bool n = 0 a and d @@ -224,9 +227,9 @@ class dict: pass [case testDynamicWithUnaryExpressions] from typing import Any -d = None # type: Any -a = None # type: A -b = None # type: bool +d: Any +a: A +b: bool if int(): a = not d # E: Incompatible types in assignment (expression has type "bool", variable has type "A") if int(): @@ -238,8 +241,8 @@ class A: pass [case testDynamicWithMemberAccess] from typing import Any -d = None # type: Any -a = None # type: A +d: Any +a: A if int(): a = d.foo(a()) # E: "A" not callable @@ -256,8 +259,8 @@ class A: pass [case testIndexingWithDynamic] from typing import Any -d = None # type: Any -a = None # type: A +d: Any +a: A if int(): a = d[a()] # E: "A" not callable @@ -270,10 +273,10 @@ d[a], d[a] = a, a class A: pass -[case testTupleExpressionsWithDynamci] +[case testTupleExpressionsWithDynamic] from typing import Tuple, Any -t2 = None # type: Tuple[A, A] -d = None # type: Any +t2: Tuple[A, A] +d: Any if int(): t2 = (d, d, d) # E: Incompatible types in assignment (expression has type "Tuple[Any, Any, Any]", variable has type "Tuple[A, A]") @@ -289,9 +292,9 @@ class A: pass class B: pass def f() -> None: pass -d = None # type: Any -a = None # type: A -b = None # type: B +d: Any +a: A +b: B if int(): b = cast(A, d) # E: Incompatible types in assignment (expression has type "A", variable has type "B") if int(): @@ -309,8 +312,8 @@ def g(a: 'A') -> None: class A: pass class B: pass -d = None # type: Any -t = None # type: Tuple[A, A] +d: Any +t: Tuple[A, A] # TODO: callable types, overloaded functions d = None # All ok @@ -362,10 +365,10 @@ class A: pass [case testImplicitGlobalFunctionSignature] from typing import Any, Callable -x = None # type: Any -a = None # type: A -g = None # type: Callable[[], None] -h = None # type: Callable[[A], None] +x: Any +a: A +g: Callable[[], None] +h: Callable[[A], None] def f(x): pass @@ -384,10 +387,10 @@ class A: pass [case testImplicitGlobalFunctionSignatureWithDifferentArgCounts] from typing import Callable -g0 = None # type: Callable[[], None] -g1 = None # type: Callable[[A], None] -g2 = None # type: Callable[[A, A], None] -a = None # type: A +g0: Callable[[], None] +g1: Callable[[A], None] +g2: Callable[[A, A], None] +a: A def f0(): pass def f2(x, y): pass @@ -415,16 +418,17 @@ from typing import Callable class A: pass class B: pass -a, b = None, None # type: (A, B) +a: A +b: B def f01(x = b): pass def f13(x, y = b, z = b): pass -g0 = None # type: Callable[[], None] -g1 = None # type: Callable[[A], None] -g2 = None # type: Callable[[A, A], None] -g3 = None # type: Callable[[A, A, A], None] -g4 = None # type: Callable[[A, A, A, A], None] +g0: Callable[[], None] +g1: Callable[[A], None] +g2: Callable[[A, A], None] +g3: Callable[[A, A, A], None] +g4: Callable[[A, A, A, A], None] f01(a, a) # E: Too many arguments for "f01" f13() # E: Missing positional argument "x" in call to "f13" @@ -456,7 +460,7 @@ if int(): [builtins fixtures/tuple.pyi] [case testSkipTypeCheckingWithImplicitSignature] -a = None # type: A +a: A def f(): a() def g(x): @@ -469,7 +473,7 @@ class A: pass [builtins fixtures/bool.pyi] [case testSkipTypeCheckingWithImplicitSignatureAndDefaultArgs] -a = None # type: A +a: A def f(x=a()): a() def g(x, y=a, z=a()): @@ -478,10 +482,10 @@ class A: pass [case testImplicitMethodSignature] from typing import Callable -g0 = None # type: Callable[[], None] -g1 = None # type: Callable[[A], None] -g2 = None # type: Callable[[A, A], None] -a = None # type: A +g0: Callable[[], None] +g1: Callable[[A], None] +g2: Callable[[A, A], None] +a: A if int(): g0 = a.f # E: Incompatible types in assignment (expression has type "Callable[[Any], Any]", variable has type "Callable[[], None]") @@ -502,7 +506,7 @@ if int(): [case testSkipTypeCheckingImplicitMethod] -a = None # type: A +a: A class A: def f(self): a() @@ -511,9 +515,9 @@ class A: [case testImplicitInheritedMethod] from typing import Callable -g0 = None # type: Callable[[], None] -g1 = None # type: Callable[[A], None] -a = None # type: A +g0: Callable[[], None] +g1: Callable[[A], None] +a: A if int(): g0 = a.f # E: Incompatible types in assignment (expression has type "Callable[[Any], Any]", variable has type "Callable[[], None]") @@ -559,9 +563,9 @@ from typing import Callable class A: def __init__(self, a, b): pass -f1 = None # type: Callable[[A], A] -f2 = None # type: Callable[[A, A], A] -a = None # type: A +f1: Callable[[A], A] +f2: Callable[[A, A], A] +a: A A(a) # E: Missing positional argument "b" in call to "A" if int(): @@ -576,7 +580,7 @@ class A: pass class B: def __init__(self): pass -t = None # type: type +t: type t = A t = B -- Type compatibility @@ -585,11 +589,11 @@ t = B [case testTupleTypeCompatibility] from typing import Any, Tuple -t1 = None # type: Tuple[Any, A] -t2 = None # type: Tuple[A, Any] -t3 = None # type: Tuple[Any, Any] -t4 = None # type: Tuple[A, A] -t5 = None # type: Tuple[Any, Any, Any] +t1: Tuple[Any, A] +t2: Tuple[A, Any] +t3: Tuple[Any, Any] +t4: Tuple[A, A] +t5: Tuple[Any, Any, Any] def f(): t1, t2, t3, t4, t5 # Prevent redefinition @@ -614,11 +618,11 @@ class A: pass [builtins fixtures/tuple.pyi] [case testFunctionTypeCompatibilityAndReturnTypes] -from typing import Any, Callable -f1 = None # type: Callable[[], Any] -f11 = None # type: Callable[[], Any] -f2 = None # type: Callable[[], A] -f3 = None # type: Callable[[], None] +from typing import Any, Callable, Optional +f1: Callable[[], Any] +f11: Callable[[], Any] +f2: Callable[[], Optional[A]] +f3: Callable[[], None] f2 = f3 @@ -631,9 +635,9 @@ class A: pass [case testFunctionTypeCompatibilityAndArgumentTypes] from typing import Any, Callable -f1 = None # type: Callable[[A, Any], None] -f2 = None # type: Callable[[Any, A], None] -f3 = None # type: Callable[[A, A], None] +f1: Callable[[A, Any], None] +f2: Callable[[Any, A], None] +f3: Callable[[A, A], None] f1 = f1 f1 = f2 @@ -651,8 +655,8 @@ class A: pass [case testFunctionTypeCompatibilityAndArgumentCounts] from typing import Any, Callable -f1 = None # type: Callable[[Any], None] -f2 = None # type: Callable[[Any, Any], None] +f1: Callable[[Any], None] +f2: Callable[[Any, Any], None] if int(): f1 = f2 # E: Incompatible types in assignment (expression has type "Callable[[Any, Any], None]", variable has type "Callable[[Any], None]") @@ -664,7 +668,8 @@ if int(): [case testOverridingMethodWithDynamicTypes] from typing import Any -a, b = None, None # type: (A, B) +a: A +b: B b.f(b) # E: Argument 1 to "f" of "B" has incompatible type "B"; expected "A" a = a.f(b) @@ -682,8 +687,8 @@ class A(B): [builtins fixtures/tuple.pyi] [case testOverridingMethodWithImplicitDynamicTypes] - -a, b = None, None # type: (A, B) +a: A +b: B b.f(b) # E: Argument 1 to "f" of "B" has incompatible type "B"; expected "A" a = a.f(b) diff --git a/test-data/unit/check-enum.test b/test-data/unit/check-enum.test index b62ed3d94210..6779ae266454 100644 --- a/test-data/unit/check-enum.test +++ b/test-data/unit/check-enum.test @@ -297,7 +297,7 @@ f(E.X) from enum import IntEnum class E(IntEnum): a = 1 -x = None # type: int +x: int reveal_type(E(x)) [out] main:5: note: Revealed type is "__main__.E" @@ -306,7 +306,7 @@ main:5: note: Revealed type is "__main__.E" from enum import IntEnum class E(IntEnum): a = 1 -s = None # type: str +s: str reveal_type(E[s]) [out] main:5: note: Revealed type is "__main__.E" @@ -953,7 +953,6 @@ else: [builtins fixtures/bool.pyi] [case testEnumReachabilityWithNone] -# flags: --strict-optional from enum import Enum from typing import Optional @@ -1016,7 +1015,6 @@ reveal_type(x3) # N: Revealed type is "Union[__main__.Foo, __main__.Bar]" [builtins fixtures/bool.pyi] [case testEnumReachabilityPEP484ExampleWithFinal] -# flags: --strict-optional from typing import Union from typing_extensions import Final from enum import Enum @@ -1063,7 +1061,6 @@ def process(response: Union[str, Reason] = '') -> str: [case testEnumReachabilityPEP484ExampleSingleton] -# flags: --strict-optional from typing import Union from typing_extensions import Final from enum import Enum @@ -1088,7 +1085,6 @@ def func(x: Union[int, None, Empty] = _empty) -> int: [builtins fixtures/primitives.pyi] [case testEnumReachabilityPEP484ExampleSingletonWithMethod] -# flags: --strict-optional from typing import Union from typing_extensions import Final from enum import Enum diff --git a/test-data/unit/check-errorcodes.test b/test-data/unit/check-errorcodes.test index 1e7dc9364855..796e1c1ea98e 100644 --- a/test-data/unit/check-errorcodes.test +++ b/test-data/unit/check-errorcodes.test @@ -183,7 +183,7 @@ from defusedxml import xyz # type: ignore[import] [case testErrorCodeBadIgnore] import nostub # type: ignore xyz # E: Invalid "type: ignore" comment [syntax] \ - # E: Cannot find implementation or library stub for module named "nostub" [import] \ + # E: Cannot find implementation or library stub for module named "nostub" [import-not-found] \ # N: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports import nostub # type: ignore[ # E: Invalid "type: ignore" comment [syntax] import nostub # type: ignore[foo # E: Invalid "type: ignore" comment [syntax] @@ -211,7 +211,7 @@ def f(x, # type: int # type: ignore[ pass [out] main:2: error: Invalid "type: ignore" comment [syntax] -main:2: error: Cannot find implementation or library stub for module named "nostub" [import] +main:2: error: Cannot find implementation or library stub for module named "nostub" [import-not-found] main:2: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports main:3: error: Invalid "type: ignore" comment [syntax] main:4: error: Invalid "type: ignore" comment [syntax] @@ -522,12 +522,12 @@ if int() is str(): # E: Non-overlapping identity check (left operand type: "int [builtins fixtures/primitives.pyi] [case testErrorCodeMissingModule] -from defusedxml import xyz # E: Cannot find implementation or library stub for module named "defusedxml" [import] -from nonexistent import foobar # E: Cannot find implementation or library stub for module named "nonexistent" [import] -import nonexistent2 # E: Cannot find implementation or library stub for module named "nonexistent2" [import] -from nonexistent3 import * # E: Cannot find implementation or library stub for module named "nonexistent3" [import] +from defusedxml import xyz # E: Cannot find implementation or library stub for module named "defusedxml" [import-not-found] +from nonexistent import foobar # E: Cannot find implementation or library stub for module named "nonexistent" [import-not-found] +import nonexistent2 # E: Cannot find implementation or library stub for module named "nonexistent2" [import-not-found] +from nonexistent3 import * # E: Cannot find implementation or library stub for module named "nonexistent3" [import-not-found] from pkg import bad # E: Module "pkg" has no attribute "bad" [attr-defined] -from pkg.bad2 import bad3 # E: Cannot find implementation or library stub for module named "pkg.bad2" [import] \ +from pkg.bad2 import bad3 # E: Cannot find implementation or library stub for module named "pkg.bad2" [import-not-found] \ # N: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports [file pkg/__init__.py] @@ -732,7 +732,6 @@ class InvalidReturn: [builtins fixtures/bool.pyi] [case testErrorCodeOverloadedOperatorMethod] -# flags: --strict-optional from typing import Optional, overload class A: @@ -758,7 +757,6 @@ class C: x - C() # type: ignore[operator] [case testErrorCodeMultiLineBinaryOperatorOperand] -# flags: --strict-optional from typing import Optional class C: pass @@ -897,7 +895,6 @@ if any_or_object: [builtins fixtures/list.pyi] [case testTruthyFunctions] -# flags: --strict-optional def f(): pass if f: # E: Function "f" could always be true in boolean context [truthy-function] @@ -907,7 +904,7 @@ if not f: # E: Function "f" could always be true in boolean context [truthy-fu conditional_result = 'foo' if f else 'bar' # E: Function "f" could always be true in boolean context [truthy-function] [case testTruthyIterable] -# flags: --strict-optional --enable-error-code truthy-iterable +# flags: --enable-error-code truthy-iterable from typing import Iterable def func(var: Iterable[str]) -> None: if var: # E: "var" has type "Iterable[str]" which can always be true in boolean context. Consider using "Collection[str]" instead. [truthy-iterable] @@ -995,7 +992,6 @@ var: int = "" # E: Incompatible types in assignment (expression has type "str", show_error_codes = True [case testErrorCodeUnsafeSuper_no_empty] -# flags: --strict-optional from abc import abstractmethod class Base: @@ -1008,7 +1004,6 @@ class Sub(Base): [builtins fixtures/exception.pyi] [case testDedicatedErrorCodeForEmpty_no_empty] -# flags: --strict-optional from typing import Optional def foo() -> int: ... # E: Missing return statement [empty-body] def bar() -> None: ... diff --git a/test-data/unit/check-expressions.test b/test-data/unit/check-expressions.test index 1fa551f6a2e4..c213255997f8 100644 --- a/test-data/unit/check-expressions.test +++ b/test-data/unit/check-expressions.test @@ -13,11 +13,12 @@ [case testNoneAsRvalue] import typing -a = None # type: A +a: A class A: pass [out] [case testNoneAsArgument] +# flags: --no-strict-optional import typing def f(x: 'A', y: 'B') -> None: pass f(None, None) @@ -32,7 +33,7 @@ class B(A): pass [case testIntLiteral] a = 0 -b = None # type: A +b: A if int(): b = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "A") if int(): @@ -42,7 +43,7 @@ class A: [case testStrLiteral] a = '' -b = None # type: A +b: A if int(): b = 'x' # E: Incompatible types in assignment (expression has type "str", variable has type "A") if int(): @@ -56,7 +57,7 @@ class A: [case testFloatLiteral] a = 0.0 -b = None # type: A +b: A if str(): b = 1.1 # E: Incompatible types in assignment (expression has type "float", variable has type "A") if str(): @@ -67,7 +68,7 @@ class A: [case testComplexLiteral] a = 0.0j -b = None # type: A +b: A if str(): b = 1.1j # E: Incompatible types in assignment (expression has type "complex", variable has type "A") if str(): @@ -77,7 +78,8 @@ class A: [builtins fixtures/dict.pyi] [case testBytesLiteral] -b, a = None, None # type: (bytes, A) +b: bytes +a: A if str(): b = b'foo' if str(): @@ -90,10 +92,10 @@ class A: pass [builtins fixtures/dict.pyi] [case testUnicodeLiteralInPython3] -s = None # type: str +s: str if int(): s = u'foo' -b = None # type: bytes +b: bytes if int(): b = u'foo' # E: Incompatible types in assignment (expression has type "str", variable has type "bytes") [builtins fixtures/primitives.pyi] @@ -104,7 +106,9 @@ if int(): [case testAdd] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a + c # E: Unsupported operand types for + ("A" and "C") if int(): @@ -124,7 +128,9 @@ class C: [builtins fixtures/tuple.pyi] [case testSub] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a - c # E: Unsupported operand types for - ("A" and "C") if int(): @@ -144,7 +150,9 @@ class C: [builtins fixtures/tuple.pyi] [case testMul] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a * c # E: Unsupported operand types for * ("A" and "C") if int(): @@ -164,7 +172,9 @@ class C: [builtins fixtures/tuple.pyi] [case testMatMul] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a @ c # E: Unsupported operand types for @ ("A" and "C") if int(): @@ -184,7 +194,9 @@ class C: [builtins fixtures/tuple.pyi] [case testDiv] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a / c # E: Unsupported operand types for / ("A" and "C") a = a / b # E: Incompatible types in assignment (expression has type "C", variable has type "A") @@ -203,7 +215,9 @@ class C: [builtins fixtures/tuple.pyi] [case testIntDiv] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a // c # E: Unsupported operand types for // ("A" and "C") a = a // b # E: Incompatible types in assignment (expression has type "C", variable has type "A") @@ -222,7 +236,9 @@ class C: [builtins fixtures/tuple.pyi] [case testMod] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a % c # E: Unsupported operand types for % ("A" and "C") if int(): @@ -242,7 +258,9 @@ class C: [builtins fixtures/tuple.pyi] [case testPow] -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a ** c # E: Unsupported operand types for ** ("A" and "C") if int(): @@ -262,8 +280,8 @@ class C: [builtins fixtures/tuple.pyi] [case testMiscBinaryOperators] - -a, b = None, None # type: (A, B) +a: A +b: B b = a & a # Fail b = a | b # Fail b = a ^ a # Fail @@ -291,7 +309,8 @@ main:6: error: Unsupported operand types for << ("A" and "B") main:7: error: Unsupported operand types for >> ("A" and "A") [case testBooleanAndOr] -a, b = None, None # type: (A, bool) +a: A +b: bool if int(): b = b and b if int(): @@ -310,8 +329,8 @@ class A: pass [case testRestrictedTypeAnd] -b = None # type: bool -i = None # type: str +b: bool +i: str j = not b and i if j: reveal_type(j) # N: Revealed type is "builtins.str" @@ -319,8 +338,8 @@ if j: [case testRestrictedTypeOr] -b = None # type: bool -i = None # type: str +b: bool +i: str j = b or i if not j: reveal_type(j) # N: Revealed type is "builtins.str" @@ -343,7 +362,9 @@ def f(a: List[str], b: bool) -> bool: [builtins fixtures/list.pyi] [case testNonBooleanOr] -c, d, b = None, None, None # type: (C, D, bool) +c: C +d: D +b: bool if int(): c = c or c if int(): @@ -362,7 +383,11 @@ class D(C): pass [case testInOperator] from typing import Iterator, Iterable, Any -a, b, c, d, e = None, None, None, None, None # type: (A, B, bool, D, Any) +a: A +b: B +c: bool +d: D +e: Any if int(): c = c in a # E: Unsupported operand types for in ("bool" and "A") if int(): @@ -389,7 +414,11 @@ class D(Iterable[A]): [case testNotInOperator] from typing import Iterator, Iterable, Any -a, b, c, d, e = None, None, None, None, None # type: (A, B, bool, D, Any) +a: A +b: B +c: bool +d: D +e: Any if int(): c = c not in a # E: Unsupported operand types for in ("bool" and "A") if int(): @@ -415,7 +444,9 @@ class D(Iterable[A]): [builtins fixtures/bool.pyi] [case testNonBooleanContainsReturnValue] -a, b, c = None, None, None # type: (A, bool, str) +a: A +b: bool +c: str if int(): b = a not in a if int(): @@ -434,8 +465,8 @@ a = 1 in ([1] + ['x']) # E: List item 0 has incompatible type "str"; expected " [builtins fixtures/list.pyi] [case testEq] - -a, b = None, None # type: (A, bool) +a: A +b: bool if int(): a = a == b # E: Incompatible types in assignment (expression has type "bool", variable has type "A") if int(): @@ -451,7 +482,9 @@ class A: [builtins fixtures/bool.pyi] [case testLtAndGt] -a, b, bo = None, None, None # type: (A, B, bool) +a: A +b: B +bo: bool if int(): a = a < b # E: Incompatible types in assignment (expression has type "bool", variable has type "A") if int(): @@ -470,7 +503,9 @@ class B: [builtins fixtures/bool.pyi] [case cmpIgnoredPy3] -a, b, bo = None, None, None # type: (A, B, bool) +a: A +b: B +bo: bool bo = a <= b # E: Unsupported left operand type for <= ("A") class A: @@ -480,7 +515,9 @@ class B: [builtins fixtures/bool.pyi] [case testLeAndGe] -a, b, bo = None, None, None # type: (A, B, bool) +a: A +b: B +bo: bool if int(): a = a <= b # E: Incompatible types in assignment (expression has type "bool", variable has type "A") if int(): @@ -499,8 +536,9 @@ class B: [builtins fixtures/bool.pyi] [case testChainedComp] - -a, b, bo = None, None, None # type: (A, B, bool) +a: A +b: B +bo: bool a < a < b < b # Fail a < b < b < b a < a > a < b # Fail @@ -513,13 +551,15 @@ class B: def __gt__(self, o: 'B') -> bool: pass [builtins fixtures/bool.pyi] [out] -main:3: error: Unsupported operand types for < ("A" and "A") -main:5: error: Unsupported operand types for < ("A" and "A") -main:5: error: Unsupported operand types for > ("A" and "A") +main:4: error: Unsupported operand types for < ("A" and "A") +main:6: error: Unsupported operand types for < ("A" and "A") +main:6: error: Unsupported operand types for > ("A" and "A") [case testChainedCompBoolRes] -a, b, bo = None, None, None # type: (A, B, bool) +a: A +b: B +bo: bool if int(): bo = a < b < b if int(): @@ -535,8 +575,12 @@ class B: [case testChainedCompResTyp] -x, y = None, None # type: (X, Y) -a, b, p, bo = None, None, None, None # type: (A, B, P, bool) +x: X +y: Y +a: A +b: B +p: P +bo: bool if int(): b = y == y == y if int(): @@ -566,7 +610,8 @@ class Y: [case testIs] -a, b = None, None # type: (A, bool) +a: A +b: bool if int(): a = a is b # E: Incompatible types in assignment (expression has type "bool", variable has type "A") if int(): @@ -579,7 +624,8 @@ class A: pass [builtins fixtures/bool.pyi] [case testIsNot] -a, b = None, None # type: (A, bool) +a: A +b: bool if int(): a = a is not b # E: Incompatible types in assignment (expression has type "bool", variable has type "A") if int(): @@ -604,8 +650,8 @@ class A: def __add__(self, x: int) -> int: pass class B: def __radd__(self, x: A) -> str: pass -s = None # type: str -n = None # type: int +s: str +n: int if int(): n = A() + 1 if int(): @@ -618,8 +664,8 @@ class A: def __add__(self, x: 'A') -> object: pass class B: def __radd__(self, x: A) -> str: pass -s = None # type: str -n = None # type: int +s: str +n: int if int(): s = A() + B() n = A() + B() # E: Incompatible types in assignment (expression has type "str", variable has type "int") @@ -632,7 +678,7 @@ class A: def __add__(self, x: N) -> int: pass class B: def __radd__(self, x: N) -> str: pass -s = None # type: str +s: str s = A() + B() # E: Unsupported operand types for + ("A" and "B") [case testBinaryOperatorWithAnyRightOperand] @@ -647,8 +693,8 @@ class A: def __lt__(self, x: C) -> int: pass # E: Signatures of "__lt__" of "A" and "__gt__" of "C" are unsafely overlapping class B: def __gt__(self, x: A) -> str: pass -s = None # type: str -n = None # type: int +s: str +n: int if int(): n = A() < C() s = A() < B() @@ -743,8 +789,8 @@ divmod('foo', d) # E: Unsupported operand types for divmod ("str" and "Decimal" [case testUnaryMinus] - -a, b = None, None # type: (A, B) +a: A +b: B if int(): a = -a # E: Incompatible types in assignment (expression has type "B", variable has type "A") if int(): @@ -760,7 +806,8 @@ class B: [builtins fixtures/tuple.pyi] [case testUnaryPlus] -a, b = None, None # type: (A, B) +a: A +b: B if int(): a = +a # E: Incompatible types in assignment (expression has type "B", variable has type "A") if int(): @@ -776,7 +823,8 @@ class B: [builtins fixtures/tuple.pyi] [case testUnaryNot] -a, b = None, None # type: (A, bool) +a: A +b: bool if int(): a = not b # E: Incompatible types in assignment (expression has type "bool", variable has type "A") if int(): @@ -788,7 +836,8 @@ class A: [builtins fixtures/bool.pyi] [case testUnaryBitwiseNeg] -a, b = None, None # type: (A, B) +a: A +b: B if int(): a = ~a # E: Incompatible types in assignment (expression has type "B", variable has type "A") if int(): @@ -809,8 +858,9 @@ class B: [case testIndexing] - -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): c = a[c] # E: Invalid index type "C" for "A"; expected type "B" if int(): @@ -828,8 +878,9 @@ class C: pass [builtins fixtures/tuple.pyi] [case testIndexingAsLvalue] - -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C a[c] = c # Fail a[b] = a # Fail b[a] = c # Fail @@ -844,16 +895,17 @@ class C: pass [builtins fixtures/tuple.pyi] [out] -main:3: error: Invalid index type "C" for "A"; expected type "B" -main:4: error: Incompatible types in assignment (expression has type "A", target has type "C") -main:5: error: Unsupported target for indexed assignment ("B") +main:4: error: Invalid index type "C" for "A"; expected type "B" +main:5: error: Incompatible types in assignment (expression has type "A", target has type "C") +main:6: error: Unsupported target for indexed assignment ("B") [case testOverloadedIndexing] from foo import * [file foo.pyi] from typing import overload - -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C a[b] a[c] a[1] # E: No overload variant of "__getitem__" of "A" matches argument type "int" \ @@ -861,7 +913,8 @@ a[1] # E: No overload variant of "__getitem__" of "A" matches argument type "in # N: def __getitem__(self, B, /) -> int \ # N: def __getitem__(self, C, /) -> str -i, s = None, None # type: (int, str) +i: int +s: str if int(): i = a[b] if int(): @@ -893,7 +946,9 @@ from typing import cast, Any class A: pass class B: pass class C(A): pass -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C if int(): a = cast(A, a()) # E: "A" not callable @@ -916,7 +971,8 @@ if int(): [case testAnyCast] from typing import cast, Any -a, b = None, None # type: (A, B) +a: A +b: B a = cast(Any, a()) # Fail a = cast(Any, b) b = cast(Any, a) @@ -924,7 +980,7 @@ class A: pass class B: pass [builtins fixtures/tuple.pyi] [out] -main:3: error: "A" not callable +main:4: error: "A" not callable -- assert_type() @@ -989,6 +1045,23 @@ def reduce_it(s: Scalar) -> Scalar: assert_type(reduce_it(True), Scalar) [builtins fixtures/tuple.pyi] +[case testAssertTypeWithDeferredNodes] +from typing import Callable, TypeVar, assert_type + +T = TypeVar("T") + +def dec(f: Callable[[], T]) -> Callable[[], T]: + return f + +def func() -> None: + some = _inner_func() + assert_type(some, int) + +@dec +def _inner_func() -> int: + return 1 +[builtins fixtures/tuple.pyi] + -- None return type -- ---------------- @@ -1003,7 +1076,8 @@ class A: def __call__(self) -> None: pass -a, o = None, None # type: (A, object) +a: A +o: object if int(): a = f() # E: "f" does not return a value if int(): @@ -1040,7 +1114,7 @@ def f() -> None: pass class A: def __add__(self, x: 'A') -> 'A': pass -a = None # type: A +a: A [f()] # E: "f" does not return a value f() + a # E: "f" does not return a value a + f() # E: "f" does not return a value @@ -1058,7 +1132,8 @@ class A: def __add__(self, x: 'A') -> 'A': pass -a, b = None, None # type: (A, bool) +a: A +b: bool f() in a # E: "f" does not return a value # E: Unsupported right operand type for in ("A") a < f() # E: "f" does not return a value f() <= a # E: "f" does not return a value @@ -1075,7 +1150,8 @@ b or f() # E: "f" does not return a value [case testGetSlice] -a, b = None, None # type: (A, B) +a: A +b: B if int(): a = a[1:2] # E: Incompatible types in assignment (expression has type "B", variable has type "A") if int(): @@ -1101,7 +1177,7 @@ class B: pass [case testSlicingWithInvalidBase] -a = None # type: A +a: A a[1:2] # E: Invalid index type "slice" for "A"; expected type "int" a[:] # E: Invalid index type "slice" for "A"; expected type "int" class A: @@ -1110,7 +1186,7 @@ class A: [case testSlicingWithNonindexable] -o = None # type: object +o: object o[1:2] # E: Value of type "object" is not indexable o[:] # E: Value of type "object" is not indexable [builtins fixtures/slice.pyi] @@ -1143,7 +1219,7 @@ class SupportsIndex(Protocol): [case testNoneSliceBounds] from typing import Any -a = None # type: Any +a: Any a[None:1] a[1:None] a[None:] @@ -1151,9 +1227,8 @@ a[:None] [builtins fixtures/slice.pyi] [case testNoneSliceBoundsWithStrictOptional] -# flags: --strict-optional from typing import Any -a = None # type: Any +a: Any a[None:1] a[1:None] a[None:] @@ -1182,6 +1257,7 @@ def void() -> None: x = lambda: void() # type: typing.Callable[[], None] [case testNoCrashOnLambdaGenerator] +# flags: --no-strict-optional from typing import Iterator, Callable # These should not crash @@ -1211,7 +1287,7 @@ def f() -> None: [case testSimpleListComprehension] from typing import List -a = None # type: List[A] +a: List[A] a = [x for x in a] b = [x for x in a] # type: List[B] # E: List comprehension has incompatible type List[A]; expected List[B] class A: pass @@ -1220,7 +1296,7 @@ class B: pass [case testSimpleListComprehensionNestedTuples] from typing import List, Tuple -l = None # type: List[Tuple[A, Tuple[A, B]]] +l: List[Tuple[A, Tuple[A, B]]] a = [a2 for a1, (a2, b1) in l] # type: List[A] b = [a2 for a1, (a2, b1) in l] # type: List[B] # E: List comprehension has incompatible type List[A]; expected List[B] class A: pass @@ -1229,7 +1305,7 @@ class B: pass [case testSimpleListComprehensionNestedTuples2] from typing import List, Tuple -l = None # type: List[Tuple[int, Tuple[int, str]]] +l: List[Tuple[int, Tuple[int, str]]] a = [f(d) for d, (i, s) in l] b = [f(s) for d, (i, s) in l] # E: Argument 1 to "f" has incompatible type "str"; expected "int" @@ -1252,14 +1328,14 @@ def f(a: A) -> B: pass [case testErrorInListComprehensionCondition] from typing import List -a = None # type: List[A] +a: List[A] a = [x for x in a if x()] # E: "A" not callable class A: pass [builtins fixtures/for.pyi] [case testTypeInferenceOfListComprehension] from typing import List -a = None # type: List[A] +a: List[A] o = [x for x in a] # type: List[object] class A: pass [builtins fixtures/for.pyi] @@ -1267,7 +1343,7 @@ class A: pass [case testSimpleListComprehensionInClassBody] from typing import List class A: - a = None # type: List[A] + a: List[A] a = [x for x in a] b = [x for x in a] # type: List[B] # E: List comprehension has incompatible type List[A]; expected List[B] class B: pass @@ -1281,7 +1357,7 @@ class B: pass [case testSimpleSetComprehension] from typing import Set -a = None # type: Set[A] +a: Set[A] a = {x for x in a} b = {x for x in a} # type: Set[B] # E: Set comprehension has incompatible type Set[A]; expected Set[B] class A: pass @@ -1295,8 +1371,8 @@ class B: pass [case testSimpleDictionaryComprehension] from typing import Dict, List, Tuple -abd = None # type: Dict[A, B] -abl = None # type: List[Tuple[A, B]] +abd: Dict[A, B] +abl: List[Tuple[A, B]] abd = {a: b for a, b in abl} x = {a: b for a, b in abl} # type: Dict[B, A] y = {a: b for a, b in abl} # type: A @@ -1312,7 +1388,7 @@ main:6: error: Incompatible types in assignment (expression has type "Dict[A, B] [case testDictionaryComprehensionWithNonDirectMapping] from typing import Dict, List, Tuple abd: Dict[A, B] -abl = None # type: List[Tuple[A, B]] +abl: List[Tuple[A, B]] abd = {a: f(b) for a, b in abl} class A: pass class B: pass @@ -1332,10 +1408,10 @@ main:4: error: Argument 1 to "f" has incompatible type "B"; expected "A" from typing import Iterator # The implementation is mostly identical to list comprehensions, so only a few # test cases is ok. -a = None # type: Iterator[int] +a: Iterator[int] if int(): a = (x for x in a) -b = None # type: Iterator[str] +b: Iterator[str] if int(): b = (x for x in a) # E: Generator has incompatible item type "int"; expected "str" [builtins fixtures/for.pyi] @@ -1344,7 +1420,7 @@ if int(): from typing import Callable, Iterator, List a = [] # type: List[Callable[[], str]] -b = None # type: Iterator[Callable[[], int]] +b: Iterator[Callable[[], int]] if int(): b = (x for x in a) # E: Generator has incompatible item type "Callable[[], str]"; expected "Callable[[], int]" [builtins fixtures/list.pyi] @@ -1441,14 +1517,14 @@ class A: def __add__(self, a: 'A') -> 'A': pass def f() -> None: pass -a = None # type: A +a: A None + a # E: Unsupported left operand type for + ("None") f + a # E: Unsupported left operand type for + ("Callable[[], None]") a + f # E: Unsupported operand types for + ("A" and "Callable[[], None]") cast(A, f) [case testOperatorMethodWithInvalidArgCount] -a = None # type: A +a: A a + a # Fail class A: @@ -1462,7 +1538,7 @@ from typing import Any class A: def __init__(self, _add: Any) -> None: self.__add__ = _add -a = None # type: A +a: A a + a [out] @@ -1471,15 +1547,16 @@ a + a class A: def f(self, x: int) -> str: pass __add__ = f -s = None # type: str +s: str s = A() + 1 A() + (A() + 1) [out] main:7: error: Argument 1 has incompatible type "str"; expected "int" [case testIndexedLvalueWithSubtypes] - -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C a[c] = c a[b] = c a[c] = b @@ -1501,7 +1578,7 @@ class C(B): [case testEllipsis] -a = None # type: A +a: A if str(): a = ... # E: Incompatible types in assignment (expression has type "ellipsis", variable has type "A") b = ... @@ -1988,7 +2065,7 @@ x is 42 [typing fixtures/typing-full.pyi] [case testStrictEqualityStrictOptional] -# flags: --strict-equality --strict-optional +# flags: --strict-equality x: str if x is not None: # OK even with strict-optional @@ -2004,7 +2081,7 @@ if x is not None: # OK without strict-optional [builtins fixtures/bool.pyi] [case testStrictEqualityEqNoOptionalOverlap] -# flags: --strict-equality --strict-optional +# flags: --strict-equality from typing import Optional x: Optional[str] diff --git a/test-data/unit/check-fastparse.test b/test-data/unit/check-fastparse.test index 2e4473c2716b..534967b1edbf 100644 --- a/test-data/unit/check-fastparse.test +++ b/test-data/unit/check-fastparse.test @@ -31,7 +31,6 @@ def f(x): # E: Invalid type comment or annotation pass [case testFastParseInvalidTypes3] -# flags: --python-version 3.6 # All of these should not crash from typing import Callable, Tuple, Iterable @@ -228,8 +227,8 @@ def g(): # E: Type signature has too many arguments assert 1, 2 assert (1, 2) # E: Assertion is always true, perhaps remove parentheses? assert (1, 2), 3 # E: Assertion is always true, perhaps remove parentheses? -assert () assert (1,) # E: Assertion is always true, perhaps remove parentheses? +assert () [builtins fixtures/tuple.pyi] [case testFastParseAssertMessage] diff --git a/test-data/unit/check-flags.test b/test-data/unit/check-flags.test index 39fcef1db673..96f78d81dd16 100644 --- a/test-data/unit/check-flags.test +++ b/test-data/unit/check-flags.test @@ -427,7 +427,7 @@ async def h() -> NoReturn: # E: Implicit return in function which does not retu [typing fixtures/typing-async.pyi] [case testNoWarnNoReturn] -# flags: --no-warn-no-return --strict-optional +# flags: --no-warn-no-return import typing def implicit_optional_return(arg) -> typing.Optional[str]: @@ -1733,7 +1733,7 @@ def h() -> List[Any]: # E: Explicit "Any" is not allowed [builtins fixtures/list.pyi] [case testDisallowAnyExplicitVarDeclaration] -# flags: --python-version 3.6 --disallow-any-explicit +# flags: --disallow-any-explicit from typing import Any v: Any = '' # E: Explicit "Any" is not allowed w = '' # type: Any # E: Explicit "Any" is not allowed @@ -1741,7 +1741,7 @@ class X: y = '' # type: Any # E: Explicit "Any" is not allowed [case testDisallowAnyExplicitGenericVarDeclaration] -# flags: --python-version 3.6 --disallow-any-explicit +# flags: --disallow-any-explicit from typing import Any, List v: List[Any] = [] # E: Explicit "Any" is not allowed [builtins fixtures/list.pyi] @@ -1836,7 +1836,7 @@ N = TypedDict('N', {'x': str, 'y': List}) # no error [builtins fixtures/dict.pyi] [case testDisallowAnyGenericsTupleNoTypeParams] -# flags: --python-version 3.6 --disallow-any-generics +# flags: --disallow-any-generics from typing import Tuple def f(s: Tuple) -> None: pass # E: Missing type parameters for generic type "Tuple" @@ -1877,7 +1877,7 @@ def g(l: L[str]) -> None: pass # no error [builtins fixtures/list.pyi] [case testDisallowAnyGenericsGenericAlias] -# flags: --python-version 3.6 --disallow-any-generics +# flags: --disallow-any-generics from typing import TypeVar, Tuple T = TypeVar('T') @@ -1892,7 +1892,7 @@ x: A = ('a', 'b', 1) # E: Missing type parameters for generic type "A" [builtins fixtures/tuple.pyi] [case testDisallowAnyGenericsPlainList] -# flags: --python-version 3.6 --disallow-any-generics +# flags: --disallow-any-generics from typing import List def f(l: List) -> None: pass # E: Missing type parameters for generic type "List" @@ -1905,7 +1905,7 @@ y: List = [] # E: Missing type parameters for generic type "List" [builtins fixtures/list.pyi] [case testDisallowAnyGenericsCustomGenericClass] -# flags: --python-version 3.6 --disallow-any-generics +# flags: --disallow-any-generics from typing import Generic, TypeVar, Any T = TypeVar('T') @@ -2077,6 +2077,61 @@ y = 1 f(reveal_type(y)) # E: Call to untyped function "f" in typed context \ # N: Revealed type is "builtins.int" +[case testDisallowUntypedCallsAllowListFlags] +# flags: --disallow-untyped-calls --untyped-calls-exclude=foo --untyped-calls-exclude=bar.A +from foo import test_foo +from bar import A, B +from baz import test_baz +from foobar import bad + +test_foo(42) # OK +test_baz(42) # E: Call to untyped function "test_baz" in typed context +bad(42) # E: Call to untyped function "bad" in typed context + +a: A +b: B +a.meth() # OK +b.meth() # E: Call to untyped function "meth" in typed context +[file foo.py] +def test_foo(x): pass +[file foobar.py] +def bad(x): pass +[file bar.py] +class A: + def meth(self): pass +class B: + def meth(self): pass +[file baz.py] +def test_baz(x): pass + +[case testDisallowUntypedCallsAllowListConfig] +# flags: --config-file tmp/mypy.ini +from foo import test_foo +from bar import A, B +from baz import test_baz + +test_foo(42) # OK +test_baz(42) # E: Call to untyped function "test_baz" in typed context + +a: A +b: B +a.meth() # OK +b.meth() # E: Call to untyped function "meth" in typed context +[file foo.py] +def test_foo(x): pass +[file bar.py] +class A: + def meth(self): pass +class B: + def meth(self): pass +[file baz.py] +def test_baz(x): pass + +[file mypy.ini] +\[mypy] +disallow_untyped_calls = True +untyped_calls_exclude = foo, bar.A + [case testPerModuleErrorCodes] # flags: --config-file tmp/mypy.ini import tests.foo @@ -2174,3 +2229,39 @@ def f(x: bytes, y: bytearray, z: memoryview) -> None: x in y x in z [builtins fixtures/primitives.pyi] + +[case testNoCrashFollowImportsForStubs] +# flags: --config-file tmp/mypy.ini +{**{"x": "y"}} + +[file mypy.ini] +\[mypy] +follow_imports = skip +follow_imports_for_stubs = true +[builtins fixtures/dict.pyi] + +[case testReturnAnyLambda] +# flags: --warn-return-any +from typing import Any, Callable + +def cb(f: Callable[[int], int]) -> None: ... +a: Any +cb(lambda x: a) # OK + +fn = lambda x: a +cb(fn) + +[case testShowErrorCodeLinks] +# flags: --show-error-codes --show-error-code-links + +x: int = "" # E: Incompatible types in assignment (expression has type "str", variable has type "int") [assignment] +list(1) # E: No overload variant of "list" matches argument type "int" [call-overload] \ + # N: Possible overload variants: \ + # N: def [T] __init__(self) -> List[T] \ + # N: def [T] __init__(self, x: Iterable[T]) -> List[T] \ + # N: See https://mypy.rtfd.io/en/stable/_refs.html#code-call-overload for more info +list(2) # E: No overload variant of "list" matches argument type "int" [call-overload] \ + # N: Possible overload variants: \ + # N: def [T] __init__(self) -> List[T] \ + # N: def [T] __init__(self, x: Iterable[T]) -> List[T] +[builtins fixtures/list.pyi] diff --git a/test-data/unit/check-formatting.test b/test-data/unit/check-formatting.test index 588b2c11714e..75651124b76f 100644 --- a/test-data/unit/check-formatting.test +++ b/test-data/unit/check-formatting.test @@ -4,7 +4,10 @@ [case testStringInterpolationType] from typing import Tuple -i, f, s, t = None, None, None, None # type: (int, float, str, Tuple[int]) +i: int +f: float +s: str +t: Tuple[int] '%d' % i '%f' % f '%s' % s @@ -21,7 +24,9 @@ i, f, s, t = None, None, None, None # type: (int, float, str, Tuple[int]) [case testStringInterpolationSAcceptsAnyType] from typing import Any -i, o, s = None, None, None # type: (int, object, str) +i: int +o: object +s: str '%s %s %s' % (i, o, s) [builtins fixtures/primitives.pyi] @@ -98,7 +103,6 @@ a = None # type: Any [typing fixtures/typing-medium.pyi] [case testStringInterpolationC] -# flags: --python-version 3.6 '%c' % 1 '%c' % 1.0 # E: "%c" requires int or char (expression has type "float") '%c' % 's' @@ -139,8 +143,10 @@ class BytesThing: def __getitem__(self, __key: bytes) -> str: ... -a = None # type: Any -ds, do, di = None, None, None # type: Dict[str, int], Dict[object, int], Dict[int, int] +a: Any +ds: Dict[str, int] +do: Dict[object, int] +di: Dict[int, int] '%(a)' % 1 # E: Format requires a mapping (expression has type "int", expected type for mapping is "SupportsKeysAndGetItem[str, Any]") '%()d' % a '%()d' % ds @@ -225,18 +231,12 @@ t5: Iterable[str] = ('A', 'B') -- Bytes interpolation -- -------------------- - -[case testBytesInterpolationBefore35] -# flags: --python-version 3.4 -b'%b' % 1 # E: Unsupported left operand type for % ("bytes") - [case testBytesInterpolation] b'%b' % 1 # E: Incompatible types in string interpolation (expression has type "int", placeholder has type "bytes") b'%b' % b'1' b'%a' % 3 [case testBytesInterpolationC] -# flags: --python-version 3.6 b'%c' % 1 b'%c' % 1.0 # E: "%c" requires an integer in range(256) or a single byte (expression has type "float") b'%c' % 's' # E: "%c" requires an integer in range(256) or a single byte (expression has type "str") @@ -484,6 +484,23 @@ class D(bytes): '{}'.format(D()) [builtins fixtures/primitives.pyi] +[case testNoSpuriousFormattingErrorsDuringFailedOverlodMatch] +from typing import overload, Callable + +@overload +def sub(pattern: str, repl: Callable[[str], str]) -> str: ... +@overload +def sub(pattern: bytes, repl: Callable[[bytes], bytes]) -> bytes: ... +def sub(pattern: object, repl: object) -> object: + pass + +def better_snakecase(text: str) -> str: + # Mypy used to emit a spurious error here + # warning about interpolating bytes into an f-string: + text = sub(r"([A-Z])([A-Z]+)([A-Z](?:[^A-Z]|$))", lambda match: f"{match}") + return text +[builtins fixtures/primitives.pyi] + [case testFormatCallFinal] from typing_extensions import Final diff --git a/test-data/unit/check-functions.test b/test-data/unit/check-functions.test index b5d540b105e3..f49541420cc0 100644 --- a/test-data/unit/check-functions.test +++ b/test-data/unit/check-functions.test @@ -10,8 +10,9 @@ [case testCallingVariableWithFunctionType] from typing import Callable -f = None # type: Callable[[A], B] -a, b = None, None # type: (A, B) +f: Callable[[A], B] +a: A +b: B if int(): a = f(a) # E: Incompatible types in assignment (expression has type "B", variable has type "A") if int(): @@ -82,9 +83,9 @@ from typing import Callable class A: pass class B(A): pass -f = None # type: Callable[[B], A] -g = None # type: Callable[[A], A] # subtype of f -h = None # type: Callable[[B], B] # subtype of f +f: Callable[[B], A] +g: Callable[[A], A] # subtype of f +h: Callable[[B], B] # subtype of f if int(): g = h # E: Incompatible types in assignment (expression has type "Callable[[B], B]", variable has type "Callable[[A], A]") if int(): @@ -132,7 +133,7 @@ ff = g from typing import Callable def f(a: int, b: str) -> None: pass -f_nonames = None # type: Callable[[int, str], None] +f_nonames: Callable[[int, str], None] def g(a: int, b: str = "") -> None: pass def h(aa: int, b: str = "") -> None: pass @@ -160,7 +161,7 @@ if int(): from typing import Any, Callable def everything(*args: Any, **kwargs: Any) -> None: pass -everywhere = None # type: Callable[..., None] +everywhere: Callable[..., None] def specific_1(a: int, b: str) -> None: pass def specific_2(a: int, *, b: str) -> None: pass @@ -238,6 +239,7 @@ if int(): gg = f # E: Incompatible types in assignment (expression has type "Callable[[int, str], None]", variable has type "Callable[[Arg(int, 'a'), Arg(str, 'b')], None]") [case testFunctionTypeCompatibilityWithOtherTypes] +# flags: --no-strict-optional from typing import Callable f = None # type: Callable[[], None] a, o = None, None # type: (A, object) @@ -272,8 +274,8 @@ def g(x: int) -> Tuple[()]: [case testFunctionSubtypingWithVoid] from typing import Callable -f = None # type: Callable[[], None] -g = None # type: Callable[[], object] +f: Callable[[], None] +g: Callable[[], object] if int(): f = g # E: Incompatible types in assignment (expression has type "Callable[[], object]", variable has type "Callable[[], None]") if int(): @@ -286,9 +288,9 @@ if int(): [case testFunctionSubtypingWithMultipleArgs] from typing import Callable -f = None # type: Callable[[A, A], None] -g = None # type: Callable[[A, B], None] -h = None # type: Callable[[B, B], None] +f: Callable[[A, A], None] +g: Callable[[A, B], None] +h: Callable[[B, B], None] if int(): f = g # E: Incompatible types in assignment (expression has type "Callable[[A, B], None]", variable has type "Callable[[A, A], None]") if int(): @@ -313,9 +315,9 @@ class B(A): pass [case testFunctionTypesWithDifferentArgumentCounts] from typing import Callable -f = None # type: Callable[[], None] -g = None # type: Callable[[A], None] -h = None # type: Callable[[A, A], None] +f: Callable[[], None] +g: Callable[[A], None] +h: Callable[[A, A], None] if int(): f = g # E: Incompatible types in assignment (expression has type "Callable[[A], None]", variable has type "Callable[[], None]") @@ -342,8 +344,8 @@ class A: def f() -> None: pass -t = None # type: type -a = None # type: A +t: type +a: A if int(): a = A # E: Incompatible types in assignment (expression has type "Type[A]", variable has type "A") @@ -356,9 +358,9 @@ if int(): from foo import * [file foo.pyi] from typing import Callable, overload -f = None # type: Callable[[AA], A] -g = None # type: Callable[[B], B] -h = None # type: Callable[[A], AA] +f: Callable[[AA], A] +g: Callable[[B], B] +h: Callable[[A], AA] if int(): h = i # E: Incompatible types in assignment (expression has type overloaded function, variable has type "Callable[[A], AA]") @@ -395,11 +397,13 @@ def j(x: A) -> AA: from foo import * [file foo.pyi] from typing import Callable, overload -g1 = None # type: Callable[[A], A] -g2 = None # type: Callable[[B], B] -g3 = None # type: Callable[[C], C] -g4 = None # type: Callable[[A], B] -a, b, c = None, None, None # type: (A, B, C) +g1: Callable[[A], A] +g2: Callable[[B], B] +g3: Callable[[C], C] +g4: Callable[[A], B] +a: A +b: B +c: C if int(): b = f(a) # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -448,15 +452,15 @@ f([D]) # E: List item 0 has incompatible type "Type[D]"; expected "Callable[[An [case testSubtypingTypeTypeAsCallable] from typing import Callable, Type class A: pass -x = None # type: Callable[..., A] -y = None # type: Type[A] +x: Callable[..., A] +y: Type[A] x = y [case testSubtypingCallableAsTypeType] from typing import Callable, Type class A: pass -x = None # type: Callable[..., A] -y = None # type: Type[A] +x: Callable[..., A] +y: Type[A] if int(): y = x # E: Incompatible types in assignment (expression has type "Callable[..., A]", variable has type "Type[A]") @@ -573,11 +577,11 @@ A().f('') # E: Argument 1 to "f" of "A" has incompatible type "str"; expected "i [case testMethodAsDataAttribute] from typing import Any, Callable, ClassVar class B: pass -x = None # type: Any +x: Any class A: f = x # type: ClassVar[Callable[[A], None]] g = x # type: ClassVar[Callable[[A, B], None]] -a = None # type: A +a: A a.f() a.g(B()) a.f(a) # E: Too many arguments @@ -586,21 +590,21 @@ a.g() # E: Too few arguments [case testMethodWithInvalidMethodAsDataAttribute] from typing import Any, Callable, ClassVar class B: pass -x = None # type: Any +x: Any class A: f = x # type: ClassVar[Callable[[], None]] g = x # type: ClassVar[Callable[[B], None]] -a = None # type: A +a: A a.f() # E: Attribute function "f" with type "Callable[[], None]" does not accept self argument a.g() # E: Invalid self argument "A" to attribute function "g" with type "Callable[[B], None]" [case testMethodWithDynamicallyTypedMethodAsDataAttribute] from typing import Any, Callable, ClassVar class B: pass -x = None # type: Any +x: Any class A: f = x # type: ClassVar[Callable[[Any], Any]] -a = None # type: A +a: A a.f() a.f(a) # E: Too many arguments @@ -627,7 +631,7 @@ class A: @overload def f(self, b: B) -> None: pass g = f -a = None # type: A +a: A a.g() a.g(B()) a.g(a) # E: No overload variant matches argument type "A" \ @@ -640,7 +644,7 @@ a.g(a) # E: No overload variant matches argument type "A" \ class A: def f(self, x): pass g = f -a = None # type: A +a: A a.g(object()) a.g(a, a) # E: Too many arguments a.g() # E: Too few arguments @@ -652,7 +656,7 @@ class B: pass class A(Generic[t]): def f(self, x: t) -> None: pass g = f -a = None # type: A[B] +a: A[B] a.g(B()) a.g(a) # E: Argument 1 has incompatible type "A[B]"; expected "B" @@ -661,11 +665,11 @@ from typing import Any, TypeVar, Generic, Callable, ClassVar t = TypeVar('t') class B: pass class C: pass -x = None # type: Any +x: Any class A(Generic[t]): f = x # type: ClassVar[Callable[[A[B]], None]] -ab = None # type: A[B] -ac = None # type: A[C] +ab: A[B] +ac: A[C] ab.f() ac.f() # E: Invalid self argument "A[C]" to attribute function "f" with type "Callable[[A[B]], None]" @@ -674,21 +678,21 @@ from typing import Any, TypeVar, Generic, Callable, ClassVar t = TypeVar('t') class B: pass class C: pass -x = None # type: Any +x: Any class A(Generic[t]): f = x # type: ClassVar[Callable[[A], None]] -ab = None # type: A[B] -ac = None # type: A[C] +ab: A[B] +ac: A[C] ab.f() ac.f() [case testCallableDataAttribute] from typing import Callable, ClassVar class A: - g = None # type: ClassVar[Callable[[A], None]] + g: ClassVar[Callable[[A], None]] def __init__(self, f: Callable[[], None]) -> None: self.f = f -a = A(None) +a = A(lambda: None) a.f() a.g() a.f(a) # E: Too many arguments @@ -895,7 +899,7 @@ def dec(x) -> Callable[[Any], None]: pass class A: @dec def f(self, a, b, c): pass -a = None # type: A +a: A a.f() a.f(None) # E: Too many arguments for "f" of "A" @@ -1945,9 +1949,9 @@ def a(f: Callable[[VarArg(int)], int]): from typing import Callable from mypy_extensions import Arg, DefaultArg -int_str_fun = None # type: Callable[[int, str], str] -int_opt_str_fun = None # type: Callable[[int, DefaultArg(str, None)], str] -int_named_str_fun = None # type: Callable[[int, Arg(str, 's')], str] +int_str_fun: Callable[[int, str], str] +int_opt_str_fun: Callable[[int, DefaultArg(str, None)], str] +int_named_str_fun: Callable[[int, Arg(str, 's')], str] def isf(ii: int, ss: str) -> str: return ss @@ -2140,6 +2144,7 @@ main:8: error: Cannot use a covariant type variable as a parameter from typing import TypeVar, Generic, Callable [case testRejectContravariantReturnType] +# flags: --no-strict-optional from typing import TypeVar, Generic t = TypeVar('t', contravariant=True) @@ -2148,9 +2153,10 @@ class A(Generic[t]): return None [builtins fixtures/bool.pyi] [out] -main:5: error: Cannot use a contravariant type variable as return type +main:6: error: Cannot use a contravariant type variable as return type [case testAcceptCovariantReturnType] +# flags: --no-strict-optional from typing import TypeVar, Generic t = TypeVar('t', covariant=True) @@ -2158,6 +2164,7 @@ class A(Generic[t]): def foo(self) -> t: return None [builtins fixtures/bool.pyi] + [case testAcceptContravariantArgument] from typing import TypeVar, Generic @@ -2323,7 +2330,7 @@ T = TypeVar('T') def deco() -> Callable[[T], T]: pass reveal_type(deco) # N: Revealed type is "def () -> def [T] (T`-1) -> T`-1" f = deco() -reveal_type(f) # N: Revealed type is "def [T] (T`-1) -> T`-1" +reveal_type(f) # N: Revealed type is "def [T] (T`1) -> T`1" i = f(3) reveal_type(i) # N: Revealed type is "builtins.int" @@ -2336,7 +2343,7 @@ U = TypeVar('U') def deco(x: U) -> Callable[[T, U], T]: pass reveal_type(deco) # N: Revealed type is "def [U] (x: U`-1) -> def [T] (T`-2, U`-1) -> T`-2" f = deco("foo") -reveal_type(f) # N: Revealed type is "def [T] (T`-2, builtins.str) -> T`-2" +reveal_type(f) # N: Revealed type is "def [T] (T`1, builtins.str) -> T`1" i = f(3, "eggs") reveal_type(i) # N: Revealed type is "builtins.int" @@ -2347,9 +2354,9 @@ T = TypeVar('T') R = TypeVar('R') def deco() -> Callable[[T], Callable[[T, R], R]]: pass f = deco() -reveal_type(f) # N: Revealed type is "def [T] (T`-1) -> def [R] (T`-1, R`-2) -> R`-2" +reveal_type(f) # N: Revealed type is "def [T] (T`2) -> def [R] (T`2, R`1) -> R`1" g = f(3) -reveal_type(g) # N: Revealed type is "def [R] (builtins.int, R`-2) -> R`-2" +reveal_type(g) # N: Revealed type is "def [R] (builtins.int, R`3) -> R`3" s = g(4, "foo") reveal_type(s) # N: Revealed type is "builtins.str" @@ -2538,7 +2545,6 @@ reveal_type(bar(None)) # N: Revealed type is "None" [out] [case testNoComplainOverloadNoneStrict] -# flags: --strict-optional from typing import overload, Optional @overload def bar(x: None) -> None: @@ -2567,7 +2573,6 @@ xx: Optional[int] = X(x_in) [out] [case testNoComplainInferredNoneStrict] -# flags: --strict-optional from typing import TypeVar, Optional T = TypeVar('T') def X(val: T) -> T: ... @@ -2752,8 +2757,7 @@ class E(D): pass class F(E): @override def f(self, x: int) -> str: pass -[typing fixtures/typing-full.pyi] -[builtins fixtures/tuple.pyi] +[typing fixtures/typing-override.pyi] [case explicitOverrideStaticmethod] # flags: --python-version 3.12 @@ -2785,8 +2789,8 @@ class D(A): def f(x: str) -> str: pass # E: Argument 1 of "f" is incompatible with supertype "A"; supertype defines the argument type as "int" \ # N: This violates the Liskov substitution principle \ # N: See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides -[typing fixtures/typing-full.pyi] -[builtins fixtures/callable.pyi] +[typing fixtures/typing-override.pyi] +[builtins fixtures/staticmethod.pyi] [case explicitOverrideClassmethod] # flags: --python-version 3.12 @@ -2818,8 +2822,8 @@ class D(A): def f(cls, x: str) -> str: pass # E: Argument 1 of "f" is incompatible with supertype "A"; supertype defines the argument type as "int" \ # N: This violates the Liskov substitution principle \ # N: See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides -[typing fixtures/typing-full.pyi] -[builtins fixtures/callable.pyi] +[typing fixtures/typing-override.pyi] +[builtins fixtures/classmethod.pyi] [case explicitOverrideProperty] # flags: --python-version 3.12 @@ -2853,8 +2857,8 @@ class D(A): # N: str \ # N: Subclass: \ # N: int +[typing fixtures/typing-override.pyi] [builtins fixtures/property.pyi] -[typing fixtures/typing-full.pyi] [case explicitOverrideSettableProperty] # flags: --python-version 3.12 @@ -2891,8 +2895,8 @@ class D(A): @f.setter def f(self, value: int) -> None: pass +[typing fixtures/typing-override.pyi] [builtins fixtures/property.pyi] -[typing fixtures/typing-full.pyi] [case invalidExplicitOverride] # flags: --python-version 3.12 @@ -2907,8 +2911,7 @@ class A: pass def g() -> None: @override # E: "override" used with a non-method def h(b: bool) -> int: pass -[typing fixtures/typing-full.pyi] -[builtins fixtures/tuple.pyi] +[typing fixtures/typing-override.pyi] [case explicitOverrideSpecialMethods] # flags: --python-version 3.12 @@ -2924,8 +2927,7 @@ class B(A): class C: @override def __init__(self, a: int) -> None: pass -[typing fixtures/typing-full.pyi] -[builtins fixtures/tuple.pyi] +[typing fixtures/typing-override.pyi] [case explicitOverrideFromExtensions] from typing_extensions import override @@ -2936,7 +2938,6 @@ class A: class B(A): @override def f2(self, x: int) -> str: pass # E: Method "f2" is marked as an override, but no base method was found with this name -[typing fixtures/typing-full.pyi] [builtins fixtures/tuple.pyi] [case explicitOverrideOverloads] @@ -2953,8 +2954,7 @@ class B(A): def f2(self, x: str) -> str: pass @override def f2(self, x: int | str) -> str: pass -[typing fixtures/typing-full.pyi] -[builtins fixtures/tuple.pyi] +[typing fixtures/typing-override.pyi] [case explicitOverrideNotOnOverloadsImplementation] # flags: --python-version 3.12 @@ -2978,8 +2978,7 @@ class C(A): @overload def f(self, y: str) -> str: pass def f(self, y: int | str) -> str: pass -[typing fixtures/typing-full.pyi] -[builtins fixtures/tuple.pyi] +[typing fixtures/typing-override.pyi] [case explicitOverrideOnMultipleOverloads] # flags: --python-version 3.12 @@ -3005,5 +3004,157 @@ class C(A): def f(self, y: str) -> str: pass @override def f(self, y: int | str) -> str: pass -[typing fixtures/typing-full.pyi] +[typing fixtures/typing-override.pyi] + +[case explicitOverrideCyclicDependency] +# flags: --python-version 3.12 +import b +[file a.py] +from typing import override +import b +import c + +class A(b.B): + @override # This is fine + @c.deco + def meth(self) -> int: ... +[file b.py] +import a +import c + +class B: + @c.deco + def meth(self) -> int: ... +[file c.py] +from typing import TypeVar, Tuple, Callable +T = TypeVar('T') +def deco(f: Callable[..., T]) -> Callable[..., Tuple[T, int]]: ... [builtins fixtures/tuple.pyi] +[typing fixtures/typing-override.pyi] + +[case requireExplicitOverrideMethod] +# flags: --enable-error-code explicit-override --python-version 3.12 +from typing import override + +class A: + def f(self, x: int) -> str: pass + +class B(A): + @override + def f(self, y: int) -> str: pass + +class C(A): + def f(self, y: int) -> str: pass # E: Method "f" is not using @override but is overriding a method in class "__main__.A" + +class D(B): + def f(self, y: int) -> str: pass # E: Method "f" is not using @override but is overriding a method in class "__main__.B" +[typing fixtures/typing-override.pyi] + +[case requireExplicitOverrideSpecialMethod] +# flags: --enable-error-code explicit-override --python-version 3.12 +from typing import Callable, Self, TypeVar, override, overload + +T = TypeVar('T') +def some_decorator(f: Callable[..., T]) -> Callable[..., T]: ... + +# Don't require override decorator for __init__ and __new__ +# See: https://github.com/python/typing/issues/1376 +class A: + def __init__(self) -> None: pass + def __new__(cls) -> Self: pass + +class B(A): + def __init__(self) -> None: pass + def __new__(cls) -> Self: pass + +class C(A): + @some_decorator + def __init__(self) -> None: pass + + @some_decorator + def __new__(cls) -> Self: pass + +class D(A): + @overload + def __init__(self, x: int) -> None: ... + @overload + def __init__(self, x: str) -> None: ... + def __init__(self, x): pass + + @overload + def __new__(cls, x: int) -> Self: pass + @overload + def __new__(cls, x: str) -> Self: pass + def __new__(cls, x): pass +[typing fixtures/typing-override.pyi] + +[case requireExplicitOverrideProperty] +# flags: --enable-error-code explicit-override --python-version 3.12 +from typing import override + +class A: + @property + def prop(self) -> int: pass + +class B(A): + @override + @property + def prop(self) -> int: pass + +class C(A): + @property + def prop(self) -> int: pass # E: Method "prop" is not using @override but is overriding a method in class "__main__.A" +[typing fixtures/typing-override.pyi] +[builtins fixtures/property.pyi] + +[case requireExplicitOverrideOverload] +# flags: --enable-error-code explicit-override --python-version 3.12 +from typing import overload, override + +class A: + @overload + def f(self, x: int) -> str: ... + @overload + def f(self, x: str) -> str: ... + def f(self, x): pass + +class B(A): + @overload + def f(self, y: int) -> str: ... + @overload + def f(self, y: str) -> str: ... + @override + def f(self, y): pass + +class C(A): + @overload + @override + def f(self, y: int) -> str: ... + @overload + def f(self, y: str) -> str: ... + def f(self, y): pass + +class D(A): + @overload + def f(self, y: int) -> str: ... + @overload + def f(self, y: str) -> str: ... + def f(self, y): pass # E: Method "f" is not using @override but is overriding a method in class "__main__.A" +[typing fixtures/typing-override.pyi] + +[case requireExplicitOverrideMultipleInheritance] +# flags: --enable-error-code explicit-override --python-version 3.12 +from typing import override + +class A: + def f(self, x: int) -> str: pass +class B: + def f(self, y: int) -> str: pass + +class C(A, B): + @override + def f(self, z: int) -> str: pass + +class D(A, B): + def f(self, z: int) -> str: pass # E: Method "f" is not using @override but is overriding a method in class "__main__.A" +[typing fixtures/typing-override.pyi] diff --git a/test-data/unit/check-generic-alias.test b/test-data/unit/check-generic-alias.test index 574a57607d11..8c90b5adba34 100644 --- a/test-data/unit/check-generic-alias.test +++ b/test-data/unit/check-generic-alias.test @@ -200,7 +200,6 @@ t23: collections.abc.ValuesView[str] [case testGenericBuiltinTupleTyping] -# flags: --python-version 3.6 from typing import Tuple t01: Tuple = () @@ -248,7 +247,6 @@ reveal_type(tuple[int, ...]()) # N: Revealed type is "builtins.tuple[builtins.i [builtins fixtures/tuple.pyi] [case testTypeAliasWithBuiltinTupleInStub] -# flags: --python-version 3.6 import m reveal_type(m.a) # N: Revealed type is "builtins.tuple[builtins.int, ...]" reveal_type(m.b) # N: Revealed type is "Tuple[builtins.int, builtins.str]" @@ -261,7 +259,6 @@ b: B [builtins fixtures/tuple.pyi] [case testTypeAliasWithBuiltinListInStub] -# flags: --python-version 3.6 import m reveal_type(m.a) # N: Revealed type is "builtins.list[builtins.int]" reveal_type(m.b) # N: Revealed type is "builtins.list[builtins.list[builtins.int]]" @@ -280,7 +277,6 @@ d: type[str] [case testTypeAliasWithBuiltinListAliasInStub] -# flags: --python-version 3.6 import m reveal_type(m.a()[0]) # N: Revealed type is "builtins.int" diff --git a/test-data/unit/check-generic-subtyping.test b/test-data/unit/check-generic-subtyping.test index a34e054fd827..11c92d07021a 100644 --- a/test-data/unit/check-generic-subtyping.test +++ b/test-data/unit/check-generic-subtyping.test @@ -9,9 +9,9 @@ [case testSubtypingAndInheritingNonGenericTypeFromGenericType] from typing import TypeVar, Generic T = TypeVar('T') -ac = None # type: A[C] -ad = None # type: A[D] -b = None # type: B +ac: A[C] +ad: A[D] +b: B if int(): b = ad # E: Incompatible types in assignment (expression has type "A[D]", variable has type "B") @@ -31,9 +31,9 @@ class D: pass [case testSubtypingAndInheritingGenericTypeFromNonGenericType] from typing import TypeVar, Generic T = TypeVar('T') -a = None # type: A -bc = None # type: B[C] -bd = None # type: B[D] +a: A +bc: B[C] +bd: B[D] if int(): bc = bd # E: Incompatible types in assignment (expression has type "B[D]", variable has type "B[C]") @@ -56,10 +56,10 @@ class D: pass from typing import TypeVar, Generic T = TypeVar('T') S = TypeVar('S') -ac = None # type: A[C] -ad = None # type: A[D] -bcc = None # type: B[C, C] -bdc = None # type: B[D, C] +ac: A[C] +ad: A[D] +bcc: B[C, C] +bdc: B[D, C] if int(): ad = bcc # E: Incompatible types in assignment (expression has type "B[C, C]", variable has type "A[D]") @@ -86,12 +86,12 @@ T = TypeVar('T') S = TypeVar('S') X = TypeVar('X') Y = TypeVar('Y') -ae = None # type: A[A[E]] -af = None # type: A[A[F]] +ae: A[A[E]] +af: A[A[F]] -cef = None # type: C[E, F] -cff = None # type: C[F, F] -cfe = None # type: C[F, E] +cef: C[E, F] +cff: C[F, F] +cfe: C[F, E] if int(): ae = cef # E: Incompatible types in assignment (expression has type "C[E, F]", variable has type "A[A[E]]") @@ -125,8 +125,9 @@ class C: pass from typing import TypeVar, Generic T = TypeVar('T') S = TypeVar('S') -b = None # type: B[C, D] -c, d = None, None # type: (C, D) +b: B[C, D] +c: C +d: D b.f(c) # E: Argument 1 to "f" of "A" has incompatible type "C"; expected "D" b.f(d) @@ -142,7 +143,9 @@ class D: pass [case testAccessingMethodInheritedFromGenericTypeInNonGenericType] from typing import TypeVar, Generic T = TypeVar('T') -b, c, d = None, None, None # type: (B, C, D) +b: B +c: C +d: D b.f(c) # E: Argument 1 to "f" of "A" has incompatible type "C"; expected "D" b.f(d) @@ -163,8 +166,9 @@ class A(Generic[T]): def __init__(self, a: T) -> None: self.a = a -b = None # type: B[C, D] -c, d = None, None # type: (C, D) +b: B[C, D] +c: C +d: D b.a = c # E: Incompatible types in assignment (expression has type "C", variable has type "D") b.a = d @@ -311,9 +315,9 @@ main:14: note: def [T1 <: str, S] f(self, x: T1, y: S) -> None [case testInheritanceFromGenericWithImplicitDynamicAndSubtyping] from typing import TypeVar, Generic T = TypeVar('T') -a = None # type: A -bc = None # type: B[C] -bd = None # type: B[D] +a: A +bc: B[C] +bd: B[D] if int(): a = bc # E: Incompatible types in assignment (expression has type "B[C]", variable has type "A") @@ -337,9 +341,9 @@ class B(Generic[T]): class A(B): pass class C: pass -a = None # type: A -c = None # type: C -bc = None # type: B[C] +a: A +c: C +bc: B[C] a.x = c # E: Incompatible types in assignment (expression has type "C", variable has type "B[Any]") a.f(c) # E: Argument 1 to "f" of "B" has incompatible type "C"; expected "B[Any]" @@ -350,9 +354,9 @@ a.f(bc) [case testInheritanceFromGenericWithImplicitDynamic] from typing import TypeVar, Generic T = TypeVar('T') -a = None # type: A -c = None # type: C -bc = None # type: B[C] +a: A +c: C +bc: B[C] class B(Generic[T]): def f(self, a: 'B[T]') -> None: pass @@ -458,10 +462,10 @@ from typing import TypeVar, Generic from abc import abstractmethod T = TypeVar('T') S = TypeVar('S') -acd = None # type: A[C, D] -adc = None # type: A[D, C] -ic = None # type: I[C] -id = None # type: I[D] +acd: A[C, D] +adc: A[D, C] +ic: I[C] +id: I[D] if int(): ic = acd # E: Incompatible types in assignment (expression has type "A[C, D]", variable has type "I[C]") @@ -482,8 +486,11 @@ class D: pass [case testSubtypingWithTypeImplementingGenericABCViaInheritance] from typing import TypeVar, Generic S = TypeVar('S') -a, b = None, None # type: (A, B) -ic, id, ie = None, None, None # type: (I[C], I[D], I[E]) +a: A +b: B +ic: I[C] +id: I[D] +ie: I[E] class I(Generic[S]): pass class B(I[C]): pass @@ -523,7 +530,9 @@ main:5: error: Class "B" has base "I" duplicated inconsistently from typing import TypeVar, Generic from abc import abstractmethod, ABCMeta t = TypeVar('t') -a, i, j = None, None, None # type: (A[object], I[object], J[object]) +a: A[object] +i: I[object] +j: J[object] (ii, jj) = (i, j) if int(): ii = a @@ -573,8 +582,9 @@ class D: pass from typing import Any, TypeVar, Generic from abc import abstractmethod T = TypeVar('T') -a = None # type: A -ic, id = None, None # type: (I[C], I[D]) +a: A +ic: I[C] +id: I[D] if int(): id = a # E: Incompatible types in assignment (expression has type "A", variable has type "I[D]") @@ -625,9 +635,9 @@ class D: pass from typing import Any, TypeVar, Generic from abc import abstractmethod T = TypeVar('T') -a = None # type: Any -ic = None # type: I[C] -id = None # type: I[D] +a: Any +ic: I[C] +id: I[D] ic = a id = a @@ -645,9 +655,9 @@ class D: pass from typing import Any, TypeVar, Generic from abc import abstractmethod T = TypeVar('T') -a = None # type: Any -ic = None # type: I[C] -id = None # type: I[D] +a: Any +ic: I[C] +id: I[D] ic = a id = a @@ -666,9 +676,9 @@ class D: pass from typing import Any, TypeVar, Generic from abc import abstractmethod T = TypeVar('T') -a = None # type: Any -jc = None # type: J[C] -jd = None # type: J[D] +a: Any +jc: J[C] +jd: J[D] jc = a jd = a @@ -700,8 +710,9 @@ class I(Generic[T]): class A: pass class B: pass -a, b = None, None # type: (A, B) -ia = None # type: I[A] +a: A +b: B +ia: I[A] ia.f(b) # E: Argument 1 to "f" of "I" has incompatible type "B"; expected "A" ia.f(a) @@ -717,8 +728,9 @@ class J(Generic[T]): class I(J[T], Generic[T]): pass class A: pass class B: pass -a, b = None, None # type: (A, B) -ia = None # type: I[A] +a: A +b: B +ia: I[A] ia.f(b) # E: Argument 1 to "f" of "J" has incompatible type "B"; expected "A" ia.f(a) @@ -731,7 +743,8 @@ ia.f(a) [case testMultipleAssignmentAndGenericSubtyping] from typing import Iterable -n, s = None, None # type: int, str +n: int +s: str class Nums(Iterable[int]): def __iter__(self): pass def __next__(self): pass @@ -754,9 +767,9 @@ class A: pass class B(A): pass class C(B): pass -a = None # type: G[A] -b = None # type: G[B] -c = None # type: G[C] +a: G[A] +b: G[B] +c: G[C] if int(): b = a # E: Incompatible types in assignment (expression has type "G[A]", variable has type "G[B]") @@ -773,9 +786,9 @@ class A: pass class B(A): pass class C(B): pass -a = None # type: G[A] -b = None # type: G[B] -c = None # type: G[C] +a: G[A] +b: G[B] +c: G[C] if int(): b = a @@ -792,9 +805,9 @@ class A: pass class B(A): pass class C(B): pass -a = None # type: G[A] -b = None # type: G[B] -c = None # type: G[C] +a: G[A] +b: G[B] +c: G[C] if int(): b = a # E: Incompatible types in assignment (expression has type "G[A]", variable has type "G[B]") diff --git a/test-data/unit/check-generics.test b/test-data/unit/check-generics.test index 06b80be85096..93674c0c2d5c 100644 --- a/test-data/unit/check-generics.test +++ b/test-data/unit/check-generics.test @@ -5,7 +5,9 @@ [case testGenericMethodReturnType] from typing import TypeVar, Generic T = TypeVar('T') -a, b, c = None, None, None # type: (A[B], B, C) +a: A[B] +b: B +c: C if int(): c = a.f() # E: Incompatible types in assignment (expression has type "B", variable has type "C") b = a.f() @@ -24,9 +26,9 @@ T = TypeVar('T') class A(Generic[T]): def f(self, a: T) -> None: pass -a = None # type: A[B] -b = None # type: B -c = None # type: C +a: A[B] +b: B +c: C a.f(c) # E: Argument 1 to "f" of "A" has incompatible type "C"; expected "B" a.f(b) @@ -40,7 +42,9 @@ class A(Generic[T]): def __init__(self, v: T) -> None: self.v = v -a, b, c = None, None, None # type: (A[B], B, C) +a: A[B] +b: B +c: C a.v = c # Fail a.v = b @@ -48,27 +52,31 @@ class B: pass class C: pass [builtins fixtures/tuple.pyi] [out] -main:8: error: Incompatible types in assignment (expression has type "C", variable has type "B") +main:10: error: Incompatible types in assignment (expression has type "C", variable has type "B") [case testGenericMemberVariable2] from typing import TypeVar, Generic T = TypeVar('T') -a, b, c = None, None, None # type: (A[B], B, C) +a: A[B] +b: B +c: C a.v = c # Fail a.v = b class A(Generic[T]): - v = None # type: T + v: T class B: pass class C: pass [builtins fixtures/tuple.pyi] [out] -main:4: error: Incompatible types in assignment (expression has type "C", variable has type "B") +main:6: error: Incompatible types in assignment (expression has type "C", variable has type "B") [case testSimpleGenericSubtyping] from typing import TypeVar, Generic T = TypeVar('T') -b, bb, c = None, None, None # type: (A[B], A[B], A[C]) +b: A[B] +bb: A[B] +c: A[C] if int(): c = b # E: Incompatible types in assignment (expression has type "A[B]", variable has type "A[C]") b = c # E: Incompatible types in assignment (expression has type "A[C]", variable has type "A[B]") @@ -86,7 +94,9 @@ class C(B): pass [case testGenericTypeCompatibilityWithAny] from typing import Any, TypeVar, Generic T = TypeVar('T') -b, c, d = None, None, None # type: (A[B], A[C], A[Any]) +b: A[B] +c: A[C] +d: A[Any] b = d c = d @@ -102,9 +112,9 @@ class C(B): pass [case testTypeVariableAsTypeArgument] from typing import TypeVar, Generic T = TypeVar('T') -a = None # type: A[B] -b = None # type: A[B] -c = None # type: A[C] +a: A[B] +b: A[B] +c: A[C] a.v = c # E: Incompatible types in assignment (expression has type "A[C]", variable has type "A[B]") if int(): @@ -123,9 +133,9 @@ class C: pass from typing import TypeVar, Generic S = TypeVar('S') T = TypeVar('T') -a = None # type: A[B, C] -s = None # type: B -t = None # type: C +a: A[B, C] +s: B +t: C if int(): t = a.s # E: Incompatible types in assignment (expression has type "B", variable has type "C") @@ -136,8 +146,8 @@ if int(): t = a.t class A(Generic[S, T]): - s = None # type: S - t = None # type: T + s: S + t: T class B: pass class C: pass @@ -145,9 +155,9 @@ class C: pass from typing import TypeVar, Generic S = TypeVar('S') T = TypeVar('T') -a = None # type: A[B, C] -s = None # type: B -t = None # type: C +a: A[B, C] +s: B +t: C a.f(s, s) # Fail a.f(t, t) # Fail @@ -165,9 +175,9 @@ main:9: error: Argument 1 to "f" of "A" has incompatible type "C"; expected "B" from typing import TypeVar, Generic S = TypeVar('S') T = TypeVar('T') -bc = None # type: A[B, C] -bb = None # type: A[B, B] -cb = None # type: A[C, B] +bc: A[B, C] +bb: A[B, B] +cb: A[C, B] if int(): bb = bc # E: Incompatible types in assignment (expression has type "A[B, C]", variable has type "A[B, B]") @@ -180,8 +190,8 @@ if int(): bc = bc class A(Generic[S, T]): - s = None # type: S - t = None # type: T + s: S + t: T class B: pass class C(B):pass @@ -195,7 +205,7 @@ class C(B):pass from typing import TypeVar, Generic T = TypeVar('T') class A(Generic[T]): - a = None # type: T + a: T def f(self, b: T) -> T: self.f(x) # Fail @@ -203,7 +213,7 @@ class A(Generic[T]): self.a = self.f(self.a) return self.a c = self # type: A[T] -x = None # type: B +x: B class B: pass [out] main:7: error: Argument 1 to "f" of "A" has incompatible type "B"; expected "T" @@ -215,8 +225,8 @@ S = TypeVar('S') T = TypeVar('T') class A(Generic[S, T]): def f(self) -> None: - s = None # type: S - t = None # type: T + s: S + t: T if int(): s = t # E: Incompatible types in assignment (expression has type "T", variable has type "S") t = s # E: Incompatible types in assignment (expression has type "S", variable has type "T") @@ -230,6 +240,7 @@ class B: pass [out] [case testCompatibilityOfNoneWithTypeVar] +# flags: --no-strict-optional from typing import TypeVar, Generic T = TypeVar('T') class A(Generic[T]): @@ -239,6 +250,7 @@ class A(Generic[T]): [out] [case testCompatibilityOfTypeVarWithObject] +# flags: --no-strict-optional from typing import TypeVar, Generic T = TypeVar('T') class A(Generic[T]): @@ -261,9 +273,9 @@ class A(Generic[T]): from typing import TypeVar, Generic S = TypeVar('S') T = TypeVar('T') -a = None # type: A[B, C] -b = None # type: B -c = None # type: C +a: A[B, C] +b: B +c: C if int(): b = a + b # E: Incompatible types in assignment (expression has type "C", variable has type "B") @@ -286,9 +298,9 @@ class C: pass [case testOperatorAssignmentWithIndexLvalue1] from typing import TypeVar, Generic T = TypeVar('T') -b = None # type: B -c = None # type: C -ac = None # type: A[C] +b: B +c: C +ac: A[C] ac[b] += b # Fail ac[c] += c # Fail @@ -309,9 +321,9 @@ main:8: error: Invalid index type "C" for "A[C]"; expected type "B" [case testOperatorAssignmentWithIndexLvalue2] from typing import TypeVar, Generic T = TypeVar('T') -b = None # type: B -c = None # type: C -ac = None # type: A[C] +b: B +c: C +ac: A[C] ac[b] += c # Fail ac[c] += c # Fail @@ -337,10 +349,10 @@ main:9: error: Invalid index type "B" for "A[C]"; expected type "C" [case testNestedGenericTypes] from typing import TypeVar, Generic T = TypeVar('T') -aab = None # type: A[A[B]] -aac = None # type: A[A[C]] -ab = None # type: A[B] -ac = None # type: A[C] +aab: A[A[B]] +aac: A[A[C]] +ab: A[B] +ac: A[C] if int(): ac = aab.x # E: Incompatible types in assignment (expression has type "A[B]", variable has type "A[C]") @@ -353,8 +365,8 @@ ab.y = aab ac.y = aac class A(Generic[T]): - x = None # type: T - y = None # type: A[A[T]] + x: T + y: A[A[T]] class B: pass @@ -377,12 +389,12 @@ def f(s: S, t: T) -> p[T, A]: a = t # type: S # E: Incompatible types in assignment (expression has type "T", variable has type "S") if int(): s = t # E: Incompatible types in assignment (expression has type "T", variable has type "S") - p_s_a = None # type: p[S, A] + p_s_a: p[S, A] if s: return p_s_a # E: Incompatible return value type (got "p[S, A]", expected "p[T, A]") b = t # type: T c = s # type: S - p_t_a = None # type: p[T, A] + p_t_a: p[T, A] return p_t_a [out] @@ -396,16 +408,16 @@ class A(Generic[T]): def f(self, s: S, t: T) -> p[S, T]: if int(): s = t # E: Incompatible types in assignment (expression has type "T", variable has type "S") - p_s_s = None # type: p[S, S] + p_s_s: p[S, S] if s: return p_s_s # E: Incompatible return value type (got "p[S, S]", expected "p[S, T]") - p_t_t = None # type: p[T, T] + p_t_t: p[T, T] if t: return p_t_t # E: Incompatible return value type (got "p[T, T]", expected "p[S, T]") if 1: t = t s = s - p_s_t = None # type: p[S, T] + p_s_t: p[S, T] return p_s_t [out] @@ -442,7 +454,7 @@ A[int, str, int]() # E: Type application has too many types (2 expected) [out] [case testInvalidTypeApplicationType] -a = None # type: A +a: A class A: pass a[A]() # E: Value of type "A" is not indexable A[A]() # E: The type "Type[A]" is not generic and not indexable @@ -546,7 +558,7 @@ IntIntNode = Node[int, int] SameNode = Node[T, T] def output_bad() -> IntNode[str]: - return Node(1, 1) # Eroor - bad return type, see out + return Node(1, 1) # Error - bad return type, see out def input(x: IntNode[str]) -> None: pass @@ -576,7 +588,7 @@ reveal_type(y) # N: Revealed type is "__main__.Node[builtins.str, builtins.str]" def wrap(x: T) -> IntNode[T]: return Node(1, x) -z = None # type: str +z: str reveal_type(wrap(z)) # N: Revealed type is "__main__.Node[builtins.int, builtins.str]" [out] @@ -584,7 +596,7 @@ main:13: error: Argument 2 to "Node" has incompatible type "int"; expected "str" -- Error formatting is a bit different (and probably better) with new analyzer [case testGenericTypeAliasesWrongAliases] -# flags: --show-column-numbers --python-version 3.6 --no-strict-optional +# flags: --show-column-numbers --no-strict-optional from typing import TypeVar, Generic, List, Callable, Tuple, Union T = TypeVar('T') S = TypeVar('S') @@ -686,7 +698,7 @@ def output() -> IntNode[str]: return Node(1, 'x') x = output() # type: IntNode # This is OK (implicit Any) -y = None # type: IntNode +y: IntNode y.x = 1 y.x = 'x' # E: Incompatible types in assignment (expression has type "str", variable has type "int") y.y = 1 # Both are OK (implicit Any) @@ -707,7 +719,7 @@ class Node(Generic[T]): return self.x ListedNode = Node[List[T]] -l = None # type: ListedNode[int] +l: ListedNode[int] l.x.append(1) l.meth().append(1) reveal_type(l.meth()) # N: Revealed type is "builtins.list[builtins.int]" @@ -848,7 +860,7 @@ def use_cb(arg: T, cb: C2[T]) -> Node[T]: return cb(arg, arg) use_cb(1, 1) # E: Argument 2 to "use_cb" has incompatible type "int"; expected "Callable[[int, int], Node[int]]" -my_cb = None # type: C2[int] +my_cb: C2[int] use_cb('x', my_cb) # E: Argument 2 to "use_cb" has incompatible type "Callable[[int, int], Node[int]]"; expected "Callable[[str, str], Node[str]]" reveal_type(use_cb(1, my_cb)) # N: Revealed type is "__main__.Node[builtins.int]" [builtins fixtures/tuple.pyi] @@ -884,7 +896,7 @@ from typing import TypeVar from a import Node, TupledNode T = TypeVar('T') -n = None # type: TupledNode[int] +n: TupledNode[int] n.x = 1 n.y = (1, 1) n.y = 'x' # E: Incompatible types in assignment (expression has type "str", variable has type "Tuple[int, int]") @@ -1282,9 +1294,9 @@ from typing import List class A: pass class B: pass class B2(B): pass -a = None # type: A -b = None # type: B -b2 = None # type: B2 +a: A +b: B +b2: B2 list_a = [a] list_b = [b] @@ -1313,8 +1325,8 @@ e, f = list_a # type: (A, object) [case testMultipleAssignmentWithListAndIndexing] from typing import List -a = None # type: List[A] -b = None # type: List[int] +a: List[A] +b: List[int] a[1], b[1] = a # E: Incompatible types in assignment (expression has type "A", target has type "int") a[1], a[2] = a @@ -1335,8 +1347,8 @@ class dict: pass [case testMultipleAssignmentWithIterable] from typing import Iterable, TypeVar -a = None # type: int -b = None # type: str +a: int +b: str T = TypeVar('T') def f(x: T) -> Iterable[T]: pass @@ -1380,7 +1392,7 @@ X = TypeVar('X') Y = TypeVar('Y') Z = TypeVar('Z') class OO: pass -a = None # type: A[object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object] +a: A[object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object] def f(a: OO) -> None: pass @@ -1393,7 +1405,7 @@ class A(Generic[B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W from typing import TypeVar, Generic S = TypeVar('S') T = TypeVar('T') -a = None # type: A[object, B] +a: A[object, B] def f(a: 'B') -> None: pass f(a) # E: Argument 1 to "f" has incompatible type "A[object, B]"; expected "B" @@ -1405,7 +1417,7 @@ class B: pass from typing import Callable, TypeVar, Generic S = TypeVar('S') T = TypeVar('T') -a = None # type: A[object, Callable[[], None]] +a: A[object, Callable[[], None]] def f(a: 'B') -> None: pass f(a) # E: Argument 1 to "f" has incompatible type "A[object, Callable[[], None]]"; expected "B" @@ -1424,7 +1436,8 @@ from foo import * from typing import overload, List class A: pass class B: pass -a, b = None, None # type: (A, B) +a: A +b: B @overload def f(a: List[A]) -> A: pass @@ -1452,7 +1465,8 @@ def f(a: B) -> B: pass @overload def f(a: List[T]) -> T: pass -a, b = None, None # type: (A, B) +a: A +b: B if int(): b = f([a]) # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -2309,7 +2323,6 @@ class B(A): [builtins fixtures/classmethod.pyi] [case testSubclassingGenericSelfClassMethodOptional] -# flags: --strict-optional from typing import TypeVar, Type, Optional AT = TypeVar('AT', bound='A') @@ -2700,6 +2713,7 @@ reveal_type(func(1)) # N: Revealed type is "builtins.int" [builtins fixtures/tuple.pyi] [case testGenericLambdaGenericMethodNoCrash] +# flags: --new-type-inference from typing import TypeVar, Union, Callable, Generic S = TypeVar("S") @@ -2710,7 +2724,7 @@ def f(x: Callable[[G[T]], int]) -> T: ... class G(Generic[T]): def g(self, x: S) -> Union[S, T]: ... -f(lambda x: x.g(0)) # E: Cannot infer type argument 1 of "f" +f(lambda x: x.g(0)) # E: Incompatible return value type (got "Union[int, T]", expected "int") [case testDictStarInference] class B: ... @@ -2733,3 +2747,658 @@ dict1: Any dict2 = {"a": C1(), **{x: C2() for x in dict1}} reveal_type(dict2) # N: Revealed type is "builtins.dict[Any, __main__.B]" [builtins fixtures/dict.pyi] + +-- Type inference for generic decorators applied to generic callables +-- ------------------------------------------------------------------ + +[case testInferenceAgainstGenericCallable] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +X = TypeVar('X') +T = TypeVar('T') + +def foo(x: Callable[[int], X]) -> List[X]: + ... +def bar(x: Callable[[X], int]) -> List[X]: + ... + +def id(x: T) -> T: + ... +reveal_type(foo(id)) # N: Revealed type is "builtins.list[builtins.int]" +reveal_type(bar(id)) # N: Revealed type is "builtins.list[builtins.int]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableNoLeak] +# flags: --new-type-inference +from typing import TypeVar, Callable + +T = TypeVar('T') + +def f(x: Callable[..., T]) -> T: + return x() + +def tpl(x: T) -> T: + return x + +# This is valid because of "..." +reveal_type(f(tpl)) # N: Revealed type is "Any" +[out] + +[case testInferenceAgainstGenericCallableChain] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +X = TypeVar('X') +T = TypeVar('T') + +def chain(f: Callable[[X], T], g: Callable[[T], int]) -> Callable[[X], int]: ... +def id(x: T) -> T: + ... +reveal_type(chain(id, id)) # N: Revealed type is "def (builtins.int) -> builtins.int" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableGeneric] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +def dec(f: Callable[[S], T]) -> Callable[[S], List[T]]: + ... +def id(x: U) -> U: + ... +reveal_type(dec(id)) # N: Revealed type is "def [S] (S`1) -> builtins.list[S`1]" + +@dec +def same(x: U) -> U: + ... +reveal_type(same) # N: Revealed type is "def [S] (S`3) -> builtins.list[S`3]" +reveal_type(same(42)) # N: Revealed type is "builtins.list[builtins.int]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableGenericReverse] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +def dec(f: Callable[[S], List[T]]) -> Callable[[S], T]: + ... +def id(x: U) -> U: + ... +reveal_type(dec(id)) # N: Revealed type is "def [T] (builtins.list[T`2]) -> T`2" + +@dec +def same(x: U) -> U: + ... +reveal_type(same) # N: Revealed type is "def [T] (builtins.list[T`4]) -> T`4" +reveal_type(same([42])) # N: Revealed type is "builtins.int" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableGenericArg] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +def dec(f: Callable[[S], T]) -> Callable[[S], T]: + ... +def test(x: U) -> List[U]: + ... +reveal_type(dec(test)) # N: Revealed type is "def [S] (S`1) -> builtins.list[S`1]" + +@dec +def single(x: U) -> List[U]: + ... +reveal_type(single) # N: Revealed type is "def [S] (S`3) -> builtins.list[S`3]" +reveal_type(single(42)) # N: Revealed type is "builtins.list[builtins.int]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableGenericChain] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +def comb(f: Callable[[T], S], g: Callable[[S], U]) -> Callable[[T], U]: ... +def id(x: U) -> U: + ... +reveal_type(comb(id, id)) # N: Revealed type is "def [T] (T`1) -> T`1" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableGenericNonLinear] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +def mix(fs: List[Callable[[S], T]]) -> Callable[[S], List[T]]: + def inner(x: S) -> List[T]: + return [f(x) for f in fs] + return inner + +# Errors caused by arg *name* mismatch are truly cryptic, but this is a known issue :/ +def id(__x: U) -> U: + ... +fs = [id, id, id] +reveal_type(mix(fs)) # N: Revealed type is "def [S] (S`3) -> builtins.list[S`3]" +reveal_type(mix([id, id, id])) # N: Revealed type is "def [S] (S`5) -> builtins.list[S`5]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCurry] +# flags: --new-type-inference +from typing import Callable, List, TypeVar + +S = TypeVar("S") +T = TypeVar("T") +U = TypeVar("U") +V = TypeVar("V") + +def dec1(f: Callable[[T], S]) -> Callable[[], Callable[[T], S]]: ... +def dec2(f: Callable[[T, U], S]) -> Callable[[U], Callable[[T], S]]: ... + +def test1(x: V) -> V: ... +def test2(x: V, y: V) -> V: ... + +reveal_type(dec1(test1)) # N: Revealed type is "def () -> def [T] (T`1) -> T`1" +reveal_type(dec2(test2)) # N: Revealed type is "def [T] (T`3) -> def (T`3) -> T`3" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableNewVariable] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +def dec(f: Callable[[S], T]) -> Callable[[S], T]: + ... +def test(x: List[U]) -> List[U]: + ... +reveal_type(dec(test)) # N: Revealed type is "def [U] (builtins.list[U`-1]) -> builtins.list[U`-1]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableGenericAlias] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +A = Callable[[S], T] +B = Callable[[S], List[T]] + +def dec(f: A[S, T]) -> B[S, T]: + ... +def id(x: U) -> U: + ... +reveal_type(dec(id)) # N: Revealed type is "def [S] (S`1) -> builtins.list[S`1]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericCallableGenericProtocol] +# flags: --new-type-inference +from typing import TypeVar, Protocol, Generic, Optional + +T = TypeVar('T') + +class F(Protocol[T]): + def __call__(self, __x: T) -> T: ... + +def lift(f: F[T]) -> F[Optional[T]]: ... +def g(x: T) -> T: + return x + +reveal_type(lift(g)) # N: Revealed type is "def [T] (Union[T`1, None]) -> Union[T`1, None]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericSplitOrder] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') + +def dec(f: Callable[[T], S], g: Callable[[T], int]) -> Callable[[T], List[S]]: ... +def id(x: U) -> U: + ... + +reveal_type(dec(id, id)) # N: Revealed type is "def (builtins.int) -> builtins.list[builtins.int]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericSplitOrderGeneric] +# flags: --new-type-inference +from typing import TypeVar, Callable, Tuple + +S = TypeVar('S') +T = TypeVar('T') +U = TypeVar('U') +V = TypeVar('V') + +def dec(f: Callable[[T], S], g: Callable[[T], U]) -> Callable[[T], Tuple[S, U]]: ... +def id(x: V) -> V: + ... + +reveal_type(dec(id, id)) # N: Revealed type is "def [T] (T`1) -> Tuple[T`1, T`1]" +[builtins fixtures/tuple.pyi] + +[case testInferenceAgainstGenericEllipsisSelfSpecialCase] +# flags: --new-type-inference +from typing import Self, Callable, TypeVar + +T = TypeVar("T") +def dec(f: Callable[..., T]) -> Callable[..., T]: ... + +class C: + @dec + def test(self) -> Self: ... + +c: C +reveal_type(c.test()) # N: Revealed type is "__main__.C" + +[case testInferenceAgainstGenericBoundsAndValues] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +class B: ... +class C(B): ... + +S = TypeVar('S') +T = TypeVar('T') +UB = TypeVar('UB', bound=B) +UC = TypeVar('UC', bound=C) +V = TypeVar('V', int, str) + +def dec1(f: Callable[[S], T]) -> Callable[[S], List[T]]: + ... +def dec2(f: Callable[[UC], T]) -> Callable[[UC], List[T]]: + ... +def id1(x: UB) -> UB: + ... +def id2(x: V) -> V: + ... + +reveal_type(dec1(id1)) # N: Revealed type is "def [S <: __main__.B] (S`1) -> builtins.list[S`1]" +reveal_type(dec1(id2)) # N: Revealed type is "def [S in (builtins.int, builtins.str)] (S`3) -> builtins.list[S`3]" +reveal_type(dec2(id1)) # N: Revealed type is "def [UC <: __main__.C] (UC`5) -> builtins.list[UC`5]" +reveal_type(dec2(id2)) # N: Revealed type is "def () -> builtins.list[]" \ + # E: Argument 1 to "dec2" has incompatible type "Callable[[V], V]"; expected "Callable[[], ]" + +[case testInferenceAgainstGenericLambdas] +# flags: --new-type-inference +from typing import TypeVar, Callable, List + +S = TypeVar('S') +T = TypeVar('T') + +def dec1(f: Callable[[T], T]) -> Callable[[T], List[T]]: + ... +def dec2(f: Callable[[S], T]) -> Callable[[S], List[T]]: + ... +def dec3(f: Callable[[List[S]], T]) -> Callable[[S], T]: + def g(x: S) -> T: + return f([x]) + return g +def dec4(f: Callable[[S], List[T]]) -> Callable[[S], T]: + ... +def dec5(f: Callable[[int], T]) -> Callable[[int], List[T]]: + def g(x: int) -> List[T]: + return [f(x)] * x + return g + +reveal_type(dec1(lambda x: x)) # N: Revealed type is "def [T] (T`3) -> builtins.list[T`3]" +reveal_type(dec2(lambda x: x)) # N: Revealed type is "def [S] (S`4) -> builtins.list[S`4]" +reveal_type(dec3(lambda x: x[0])) # N: Revealed type is "def [S] (S`6) -> S`6" +reveal_type(dec4(lambda x: [x])) # N: Revealed type is "def [S] (S`9) -> S`9" +reveal_type(dec1(lambda x: 1)) # N: Revealed type is "def (builtins.int) -> builtins.list[builtins.int]" +reveal_type(dec5(lambda x: x)) # N: Revealed type is "def (builtins.int) -> builtins.list[builtins.int]" +reveal_type(dec3(lambda x: x)) # N: Revealed type is "def [S] (S`16) -> builtins.list[S`16]" +dec4(lambda x: x) # E: Incompatible return value type (got "S", expected "List[object]") +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecBasicInList] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple +from typing_extensions import ParamSpec + +T = TypeVar('T') +P = ParamSpec('P') +U = TypeVar('U') +V = TypeVar('V') + +def dec(f: Callable[P, T]) -> Callable[P, List[T]]: ... +def id(x: U) -> U: ... +def either(x: U, y: U) -> U: ... +def pair(x: U, y: V) -> Tuple[U, V]: ... +reveal_type(dec(id)) # N: Revealed type is "def [T] (x: T`2) -> builtins.list[T`2]" +reveal_type(dec(either)) # N: Revealed type is "def [T] (x: T`4, y: T`4) -> builtins.list[T`4]" +reveal_type(dec(pair)) # N: Revealed type is "def [U, V] (x: U`-1, y: V`-2) -> builtins.list[Tuple[U`-1, V`-2]]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecBasicDeList] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple +from typing_extensions import ParamSpec + +T = TypeVar('T') +P = ParamSpec('P') +U = TypeVar('U') +V = TypeVar('V') + +def dec(f: Callable[P, List[T]]) -> Callable[P, T]: ... +def id(x: U) -> U: ... +def either(x: U, y: U) -> U: ... +reveal_type(dec(id)) # N: Revealed type is "def [T] (x: builtins.list[T`2]) -> T`2" +reveal_type(dec(either)) # N: Revealed type is "def [T] (x: builtins.list[T`4], y: builtins.list[T`4]) -> T`4" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecPopOff] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple +from typing_extensions import ParamSpec, Concatenate + +T = TypeVar('T') +S = TypeVar('S') +P = ParamSpec('P') +U = TypeVar('U') +V = TypeVar('V') + +def dec(f: Callable[Concatenate[T, P], S]) -> Callable[P, Callable[[T], S]]: ... +def id(x: U) -> U: ... +def either(x: U, y: U) -> U: ... +def pair(x: U, y: V) -> Tuple[U, V]: ... +reveal_type(dec(id)) # N: Revealed type is "def () -> def [T] (T`1) -> T`1" +reveal_type(dec(either)) # N: Revealed type is "def [T] (y: T`4) -> def (T`4) -> T`4" +reveal_type(dec(pair)) # N: Revealed type is "def [V] (y: V`-2) -> def [T] (T`7) -> Tuple[T`7, V`-2]" +reveal_type(dec(dec)) # N: Revealed type is "def () -> def [T, P, S] (def (T`-1, *P.args, **P.kwargs) -> S`-3) -> def (*P.args, **P.kwargs) -> def (T`-1) -> S`-3" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecPopOn] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple +from typing_extensions import ParamSpec, Concatenate + +T = TypeVar('T') +S = TypeVar('S') +P = ParamSpec('P') +U = TypeVar('U') +V = TypeVar('V') + +def dec(f: Callable[P, Callable[[T], S]]) -> Callable[Concatenate[T, P], S]: ... +def id() -> Callable[[U], U]: ... +def either(x: U) -> Callable[[U], U]: ... +def pair(x: U) -> Callable[[V], Tuple[V, U]]: ... +reveal_type(dec(id)) # N: Revealed type is "def [T] (T`2) -> T`2" +reveal_type(dec(either)) # N: Revealed type is "def [T] (T`5, x: T`5) -> T`5" +reveal_type(dec(pair)) # N: Revealed type is "def [T, U] (T`8, x: U`-1) -> Tuple[T`8, U`-1]" +# This is counter-intuitive but looks correct, dec matches itself only if P can be empty +reveal_type(dec(dec)) # N: Revealed type is "def [T, S] (T`11, f: def () -> def (T`11) -> S`12) -> S`12" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecVsParamSpec] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple, Generic +from typing_extensions import ParamSpec, Concatenate + +T = TypeVar('T') +P = ParamSpec('P') +Q = ParamSpec('Q') + +class Foo(Generic[P]): ... +class Bar(Generic[P, T]): ... + +def dec(f: Callable[P, T]) -> Callable[P, List[T]]: ... +def f(*args: Q.args, **kwargs: Q.kwargs) -> Foo[Q]: ... +reveal_type(dec(f)) # N: Revealed type is "def [P] (*P.args, **P.kwargs) -> builtins.list[__main__.Foo[P`1]]" +g: Callable[Concatenate[int, Q], Foo[Q]] +reveal_type(dec(g)) # N: Revealed type is "def [Q] (builtins.int, *Q.args, **Q.kwargs) -> builtins.list[__main__.Foo[Q`-1]]" +h: Callable[Concatenate[T, Q], Bar[Q, T]] +reveal_type(dec(h)) # N: Revealed type is "def [T, Q] (T`-1, *Q.args, **Q.kwargs) -> builtins.list[__main__.Bar[Q`-2, T`-1]]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecVsParamSpecConcatenate] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple, Generic +from typing_extensions import ParamSpec, Concatenate + +T = TypeVar('T') +P = ParamSpec('P') +Q = ParamSpec('Q') + +class Foo(Generic[P]): ... + +def dec(f: Callable[P, int]) -> Callable[P, Foo[P]]: ... +h: Callable[Concatenate[T, Q], int] +g: Callable[Concatenate[T, Q], int] +h = g +reveal_type(dec(h)) # N: Revealed type is "def [T, Q] (T`-1, *Q.args, **Q.kwargs) -> __main__.Foo[[T`-1, **Q`-2]]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecSecondary] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple, Generic +from typing_extensions import ParamSpec, Concatenate + +T = TypeVar('T') +P = ParamSpec('P') +Q = ParamSpec('Q') + +class Foo(Generic[P]): ... + +def dec(f: Callable[P, Foo[P]]) -> Callable[P, Foo[P]]: ... +g: Callable[[T], Foo[[int]]] +reveal_type(dec(g)) # N: Revealed type is "def (builtins.int) -> __main__.Foo[[builtins.int]]" +h: Callable[Q, Foo[[int]]] +reveal_type(dec(g)) # N: Revealed type is "def (builtins.int) -> __main__.Foo[[builtins.int]]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericParamSpecSecondOrder] +# flags: --new-type-inference +from typing import TypeVar, Callable +from typing_extensions import ParamSpec, Concatenate + +T = TypeVar('T') +S = TypeVar('S') +P = ParamSpec('P') +Q = ParamSpec('Q') +U = TypeVar('U') +W = ParamSpec('W') + +def transform( + dec: Callable[[Callable[P, T]], Callable[Q, S]] +) -> Callable[[Callable[Concatenate[int, P], T]], Callable[Concatenate[int, Q], S]]: ... + +def dec(f: Callable[W, U]) -> Callable[W, U]: ... +def dec2(f: Callable[Concatenate[str, W], U]) -> Callable[Concatenate[bytes, W], U]: ... +reveal_type(transform(dec)) # N: Revealed type is "def [P, T] (def (builtins.int, *P.args, **P.kwargs) -> T`2) -> def (builtins.int, *P.args, **P.kwargs) -> T`2" +reveal_type(transform(dec2)) # N: Revealed type is "def [W, T] (def (builtins.int, builtins.str, *W.args, **W.kwargs) -> T`6) -> def (builtins.int, builtins.bytes, *W.args, **W.kwargs) -> T`6" +[builtins fixtures/tuple.pyi] + +[case testNoAccidentalVariableClashInNestedGeneric] +# flags: --new-type-inference +from typing import TypeVar, Callable, Generic, Tuple + +T = TypeVar('T') +S = TypeVar('S') +U = TypeVar('U') + +def pipe(x: T, f1: Callable[[T], S], f2: Callable[[S], U]) -> U: ... +def and_then(a: T) -> Callable[[S], Tuple[S, T]]: ... + +def apply(a: S, b: T) -> None: + v1 = and_then(b) + v2: Callable[[Tuple[S, T]], None] + return pipe(a, v1, v2) +[builtins fixtures/tuple.pyi] + +[case testInferenceAgainstGenericParamSpecSpuriousBoundsNotUsed] +# flags: --new-type-inference +from typing import TypeVar, Callable, Generic +from typing_extensions import ParamSpec, Concatenate + +Q = ParamSpec("Q") +class Foo(Generic[Q]): ... + +T1 = TypeVar("T1", bound=Foo[...]) +T2 = TypeVar("T2", bound=Foo[...]) +P = ParamSpec("P") +def pop_off(fn: Callable[Concatenate[T1, P], T2]) -> Callable[P, Callable[[T1], T2]]: + ... + +@pop_off +def test(command: Foo[Q]) -> Foo[Q]: ... +reveal_type(test) # N: Revealed type is "def () -> def [Q] (__main__.Foo[Q`-1]) -> __main__.Foo[Q`-1]" +[builtins fixtures/tuple.pyi] + +[case testInferenceAgainstGenericVariadicBasicInList] +# flags: --new-type-inference +from typing import Tuple, TypeVar, List, Callable +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +Ts = TypeVarTuple("Ts") +def dec(f: Callable[[Unpack[Ts]], T]) -> Callable[[Unpack[Ts]], List[T]]: ... + +U = TypeVar("U") +V = TypeVar("V") +def id(x: U) -> U: ... +def either(x: U, y: U) -> U: ... +def pair(x: U, y: V) -> Tuple[U, V]: ... + +reveal_type(dec(id)) # N: Revealed type is "def [T] (T`2) -> builtins.list[T`2]" +reveal_type(dec(either)) # N: Revealed type is "def [T] (T`4, T`4) -> builtins.list[T`4]" +reveal_type(dec(pair)) # N: Revealed type is "def [U, V] (U`-1, V`-2) -> builtins.list[Tuple[U`-1, V`-2]]" +[builtins fixtures/tuple.pyi] + +[case testInferenceAgainstGenericVariadicBasicDeList] +# flags: --new-type-inference +from typing import Tuple, TypeVar, List, Callable +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +Ts = TypeVarTuple("Ts") +def dec(f: Callable[[Unpack[Ts]], List[T]]) -> Callable[[Unpack[Ts]], T]: ... + +U = TypeVar("U") +V = TypeVar("V") +def id(x: U) -> U: ... +def either(x: U, y: U) -> U: ... + +reveal_type(dec(id)) # N: Revealed type is "def [T] (builtins.list[T`2]) -> T`2" +reveal_type(dec(either)) # N: Revealed type is "def [T] (builtins.list[T`4], builtins.list[T`4]) -> T`4" +[builtins fixtures/tuple.pyi] + +[case testInferenceAgainstGenericVariadicPopOff] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") +def dec(f: Callable[[T, Unpack[Ts]], S]) -> Callable[[Unpack[Ts]], Callable[[T], S]]: ... + +U = TypeVar("U") +V = TypeVar("V") +def id(x: U) -> U: ... +def either(x: U, y: U) -> U: ... +def pair(x: U, y: V) -> Tuple[U, V]: ... + +reveal_type(dec(id)) # N: Revealed type is "def () -> def [T] (T`1) -> T`1" +reveal_type(dec(either)) # N: Revealed type is "def [T] (T`4) -> def (T`4) -> T`4" +reveal_type(dec(pair)) # N: Revealed type is "def [V] (V`-2) -> def [T] (T`7) -> Tuple[T`7, V`-2]" +reveal_type(dec(dec)) # N: Revealed type is "def () -> def [T, Ts, S] (def (T`-1, *Unpack[Ts`-2]) -> S`-3) -> def (*Unpack[Ts`-2]) -> def (T`-1) -> S`-3" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericVariadicPopOn] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Tuple +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") +def dec(f: Callable[[Unpack[Ts]], Callable[[T], S]]) -> Callable[[T, Unpack[Ts]], S]: ... + +U = TypeVar("U") +V = TypeVar("V") +def id() -> Callable[[U], U]: ... +def either(x: U) -> Callable[[U], U]: ... +def pair(x: U) -> Callable[[V], Tuple[V, U]]: ... + +reveal_type(dec(id)) # N: Revealed type is "def [T] (T`2) -> T`2" +reveal_type(dec(either)) # N: Revealed type is "def [T] (T`5, T`5) -> T`5" +reveal_type(dec(pair)) # N: Revealed type is "def [T, U] (T`8, U`-1) -> Tuple[T`8, U`-1]" +# This is counter-intuitive but looks correct, dec matches itself only if Ts is empty +reveal_type(dec(dec)) # N: Revealed type is "def [T, S] (T`11, def () -> def (T`11) -> S`12) -> S`12" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericVariadicVsVariadic] +# flags: --new-type-inference +from typing import TypeVar, Callable, List, Generic +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") +Us = TypeVarTuple("Us") + +class Foo(Generic[Unpack[Ts]]): ... +class Bar(Generic[Unpack[Ts], T]): ... + +def dec(f: Callable[[Unpack[Ts]], T]) -> Callable[[Unpack[Ts]], List[T]]: ... +def f(*args: Unpack[Us]) -> Foo[Unpack[Us]]: ... +reveal_type(dec(f)) # N: Revealed type is "def [Ts] (*Unpack[Ts`1]) -> builtins.list[__main__.Foo[Unpack[Ts`1]]]" +g: Callable[[Unpack[Us]], Foo[Unpack[Us]]] +reveal_type(dec(g)) # N: Revealed type is "def [Ts] (*Unpack[Ts`3]) -> builtins.list[__main__.Foo[Unpack[Ts`3]]]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericVariadicVsVariadicConcatenate] +# flags: --new-type-inference +from typing import TypeVar, Callable, Generic +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") +Us = TypeVarTuple("Us") + +class Foo(Generic[Unpack[Ts]]): ... + +def dec(f: Callable[[Unpack[Ts]], int]) -> Callable[[Unpack[Ts]], Foo[Unpack[Ts]]]: ... +h: Callable[[T, Unpack[Us]], int] +g: Callable[[T, Unpack[Us]], int] +h = g +reveal_type(dec(h)) # N: Revealed type is "def [T, Us] (T`-1, *Unpack[Us`-2]) -> __main__.Foo[T`-1, Unpack[Us`-2]]" +[builtins fixtures/list.pyi] + +[case testInferenceAgainstGenericVariadicSecondary] +# flags: --new-type-inference +from typing import TypeVar, Callable, Generic +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +Ts = TypeVarTuple("Ts") +Us = TypeVarTuple("Us") + +class Foo(Generic[Unpack[Ts]]): ... + +def dec(f: Callable[[Unpack[Ts]], Foo[Unpack[Ts]]]) -> Callable[[Unpack[Ts]], Foo[Unpack[Ts]]]: ... +g: Callable[[T], Foo[int]] +reveal_type(dec(g)) # N: Revealed type is "def (builtins.int) -> __main__.Foo[builtins.int]" +h: Callable[[Unpack[Us]], Foo[int]] +reveal_type(dec(g)) # N: Revealed type is "def (builtins.int) -> __main__.Foo[builtins.int]" +[builtins fixtures/list.pyi] diff --git a/test-data/unit/check-incremental.test b/test-data/unit/check-incremental.test index 661afca807f4..fcab0545b982 100644 --- a/test-data/unit/check-incremental.test +++ b/test-data/unit/check-incremental.test @@ -1734,12 +1734,12 @@ class R: pass [file r/s.py] from . import m R = m.R -a = None # type: R +a: R [file r/s.py.2] from . import m R = m.R -a = None # type: R +a: R [case testIncrementalBaseClassAttributeConflict] class A: pass @@ -2603,7 +2603,7 @@ def output() -> IntNode[str]: return Node(1, 'x') x = output() # type: IntNode -y = None # type: IntNode +y: IntNode y.x = 1 y.y = 1 y.y = 'x' @@ -2625,7 +2625,7 @@ def output() -> IntNode[str]: return Node(1, 'x') x = output() # type: IntNode -y = None # type: IntNode +y: IntNode y.x = 1 y.y = 1 y.y = 'x' @@ -2901,7 +2901,6 @@ tmp/main.py:2: error: Expression has type "Any" tmp/main.py:2: error: Expression has type "Any" [case testIncrementalStrictOptional] -# flags: --strict-optional import a 1 + a.foo() [file a.py] @@ -2911,8 +2910,8 @@ from typing import Optional def foo() -> Optional[int]: return 0 [out1] [out2] -main:3: error: Unsupported operand types for + ("int" and "None") -main:3: note: Right operand is of type "Optional[int]" +main:2: error: Unsupported operand types for + ("int" and "None") +main:2: note: Right operand is of type "Optional[int]" [case testAttrsIncrementalSubclassingCached] from a import A @@ -3036,10 +3035,10 @@ main:15: error: Unsupported left operand type for >= ("NoCmp") [case testAttrsIncrementalDunder] from a import A reveal_type(A) # N: Revealed type is "def (a: builtins.int) -> a.A" -reveal_type(A.__lt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(A.__le__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(A.__gt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(A.__ge__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" +reveal_type(A.__lt__) # N: Revealed type is "def [_AT] (self: _AT`3, other: _AT`3) -> builtins.bool" +reveal_type(A.__le__) # N: Revealed type is "def [_AT] (self: _AT`4, other: _AT`4) -> builtins.bool" +reveal_type(A.__gt__) # N: Revealed type is "def [_AT] (self: _AT`5, other: _AT`5) -> builtins.bool" +reveal_type(A.__ge__) # N: Revealed type is "def [_AT] (self: _AT`6, other: _AT`6) -> builtins.bool" A(1) < A(2) A(1) <= A(2) @@ -3073,10 +3072,10 @@ class A: [stale] [out2] main:2: note: Revealed type is "def (a: builtins.int) -> a.A" -main:3: note: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -main:4: note: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -main:5: note: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -main:6: note: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" +main:3: note: Revealed type is "def [_AT] (self: _AT`1, other: _AT`1) -> builtins.bool" +main:4: note: Revealed type is "def [_AT] (self: _AT`2, other: _AT`2) -> builtins.bool" +main:5: note: Revealed type is "def [_AT] (self: _AT`3, other: _AT`3) -> builtins.bool" +main:6: note: Revealed type is "def [_AT] (self: _AT`4, other: _AT`4) -> builtins.bool" main:15: error: Unsupported operand types for < ("A" and "int") main:16: error: Unsupported operand types for <= ("A" and "int") main:17: error: Unsupported operand types for > ("A" and "int") @@ -3457,7 +3456,6 @@ main:2: error: Cannot find implementation or library stub for module named "a" main:2: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports [case testIncrementalInheritanceAddAnnotation] -# flags: --strict-optional import a [file a.py] import b @@ -3740,7 +3738,7 @@ import b [file b.py] -- This is a heinous hack, but we simulate having a invalid cache by clobbering -- the proto deps file with something with mtime mismatches. -[file ../.mypy_cache/3.7/@deps.meta.json.2] +[file ../.mypy_cache/3.8/@deps.meta.json.2] {"snapshot": {"__main__": "a7c958b001a45bd6a2a320f4e53c4c16", "a": "d41d8cd98f00b204e9800998ecf8427e", "b": "d41d8cd98f00b204e9800998ecf8427e", "builtins": "c532c89da517a4b779bcf7a964478d67"}, "deps_meta": {"@root": {"path": "@root.deps.json", "mtime": 0}, "__main__": {"path": "__main__.deps.json", "mtime": 0}, "a": {"path": "a.deps.json", "mtime": 0}, "b": {"path": "b.deps.json", "mtime": 0}, "builtins": {"path": "builtins.deps.json", "mtime": 0}}} [file ../.mypy_cache/.gitignore] # Another hack to not trigger a .gitignore creation failure "false positive" @@ -3775,7 +3773,7 @@ import b [file b.py] -- This is a heinous hack, but we simulate having a invalid cache by deleting -- the proto deps file. -[delete ../.mypy_cache/3.7/@deps.meta.json.2] +[delete ../.mypy_cache/3.8/@deps.meta.json.2] [file b.py.2] # uh -- Every file should get reloaded, since the cache was invalidated @@ -3965,10 +3963,10 @@ class A: tmp/b.py:3: note: Revealed type is "def (a: builtins.int) -> a.A" tmp/b.py:4: note: Revealed type is "def (builtins.object, builtins.object) -> builtins.bool" tmp/b.py:5: note: Revealed type is "def (builtins.object, builtins.object) -> builtins.bool" -tmp/b.py:6: note: Revealed type is "def [_DT] (self: _DT`-1, other: _DT`-1) -> builtins.bool" -tmp/b.py:7: note: Revealed type is "def [_DT] (self: _DT`-1, other: _DT`-1) -> builtins.bool" -tmp/b.py:8: note: Revealed type is "def [_DT] (self: _DT`-1, other: _DT`-1) -> builtins.bool" -tmp/b.py:9: note: Revealed type is "def [_DT] (self: _DT`-1, other: _DT`-1) -> builtins.bool" +tmp/b.py:6: note: Revealed type is "def [_DT] (self: _DT`1, other: _DT`1) -> builtins.bool" +tmp/b.py:7: note: Revealed type is "def [_DT] (self: _DT`2, other: _DT`2) -> builtins.bool" +tmp/b.py:8: note: Revealed type is "def [_DT] (self: _DT`3, other: _DT`3) -> builtins.bool" +tmp/b.py:9: note: Revealed type is "def [_DT] (self: _DT`4, other: _DT`4) -> builtins.bool" tmp/b.py:18: error: Unsupported operand types for < ("A" and "int") tmp/b.py:19: error: Unsupported operand types for <= ("A" and "int") tmp/b.py:20: error: Unsupported operand types for > ("A" and "int") @@ -5413,7 +5411,8 @@ reveal_type(z) [out] tmp/c.py:2: note: Revealed type is "a." [out2] -tmp/c.py:2: note: Revealed type is "a.A" +tmp/b.py:2: error: Cannot determine type of "y" +tmp/c.py:2: note: Revealed type is "Any" [case testIsInstanceAdHocIntersectionIncrementalUnreachaableToIntersection] import c @@ -5444,7 +5443,8 @@ from b import z reveal_type(z) [builtins fixtures/isinstance.pyi] [out] -tmp/c.py:2: note: Revealed type is "a.A" +tmp/b.py:2: error: Cannot determine type of "y" +tmp/c.py:2: note: Revealed type is "Any" [out2] tmp/c.py:2: note: Revealed type is "a." @@ -5506,7 +5506,6 @@ class Foo: class C: pass [case testIncrementalNestedNamedTuple] -# flags: --python-version 3.6 import a [file a.py] @@ -5756,7 +5755,6 @@ class C: [builtins fixtures/tuple.pyi] [case testNamedTupleUpdateNonRecursiveToRecursiveCoarse] -# flags: --strict-optional import c [file a.py] from b import M @@ -5799,7 +5797,6 @@ tmp/c.py:5: error: Incompatible types in assignment (expression has type "Option tmp/c.py:7: note: Revealed type is "Tuple[Union[Tuple[Union[..., None], builtins.int, fallback=b.M], None], builtins.int, fallback=a.N]" [case testTupleTypeUpdateNonRecursiveToRecursiveCoarse] -# flags: --strict-optional import c [file a.py] from b import M @@ -5832,7 +5829,6 @@ tmp/c.py:4: note: Revealed type is "Tuple[Union[Tuple[Union[..., None], builtins tmp/c.py:5: error: Incompatible types in assignment (expression has type "Optional[N]", variable has type "int") [case testTypeAliasUpdateNonRecursiveToRecursiveCoarse] -# flags: --strict-optional import c [file a.py] from b import M @@ -5865,7 +5861,6 @@ tmp/c.py:4: note: Revealed type is "Tuple[Union[Tuple[Union[..., None], builtins tmp/c.py:5: error: Incompatible types in assignment (expression has type "Optional[N]", variable has type "int") [case testTypedDictUpdateNonRecursiveToRecursiveCoarse] -# flags: --strict-optional import c [file a.py] from b import M @@ -6060,7 +6055,6 @@ tmp/m.py:9: note: Got: tmp/m.py:9: note: def update() -> str [case testAbstractBodyTurnsEmptyCoarse] -# flags: --strict-optional from b import Base class Sub(Base): @@ -6080,7 +6074,7 @@ class Base: def meth(self) -> int: ... [out] [out2] -main:6: error: Call to abstract method "meth" of "Base" with trivial body via super() is unsafe +main:5: error: Call to abstract method "meth" of "Base" with trivial body via super() is unsafe [case testNoCrashDoubleReexportFunctionEmpty] import m @@ -6331,7 +6325,7 @@ reveal_type(D.meth) reveal_type(D().meth) [out] [out2] -tmp/m.py:4: note: Revealed type is "def [Self <: lib.C] (self: Self`0, other: Self`0) -> Self`0" +tmp/m.py:4: note: Revealed type is "def [Self <: lib.C] (self: Self`1, other: Self`1) -> Self`1" tmp/m.py:5: note: Revealed type is "def (other: m.D) -> m.D" [case testIncrementalNestedGenericCallableCrash] diff --git a/test-data/unit/check-inference-context.test b/test-data/unit/check-inference-context.test index 625ab091a6a9..5f25b007dd47 100644 --- a/test-data/unit/check-inference-context.test +++ b/test-data/unit/check-inference-context.test @@ -13,9 +13,9 @@ def f() -> 'A[T]': pass class A(Generic[T]): pass class B: pass -ab = None # type: A[B] -ao = None # type: A[object] -b = None # type: B +ab: A[B] +ao: A[object] +b: B if int(): ao = f() @@ -28,9 +28,9 @@ from typing import TypeVar, Generic T = TypeVar('T') class A(Generic[T]): pass class B: pass -ab = None # type: A[B] -ao = None # type: A[object] -b = None # type: B +ab: A[B] +ao: A[object] +b: B if int(): ao = A() @@ -48,11 +48,11 @@ class A(Generic[T]): pass class B: pass class C: pass -b = None # type: B -c = None # type: C -ab = None # type: A[B] -ao = None # type: A[object] -ac = None # type: A[C] +b: B +c: C +ab: A[B] +ao: A[object] +ac: A[C] if int(): ac = f(b) # E: Argument 1 to "f" has incompatible type "B"; expected "C" @@ -77,10 +77,10 @@ if int(): from typing import TypeVar, Generic T = TypeVar('T') def g() -> None: - ao = None # type: A[object] - ab = None # type: A[B] - o = None # type: object - b = None # type: B + ao: A[object] + ab: A[B] + o: object + b: B x = f(o) if int(): @@ -111,9 +111,9 @@ class A(Generic[T]): pass from typing import TypeVar, Generic T = TypeVar('T') def g() -> None: - ao = None # type: A[object] - ab = None # type: A[B] - b = None # type: B + ao: A[object] + ab: A[B] + b: B x, y = f(b), f(b) if int(): ao = x # E: Incompatible types in assignment (expression has type "A[B]", variable has type "A[object]") @@ -130,9 +130,9 @@ class B: pass from typing import TypeVar, List, Generic T = TypeVar('T') def h() -> None: - ao = None # type: A[object] - ab = None # type: A[B] - b = None # type: B + ao: A[object] + ab: A[B] + b: B x, y = g(f(b)) if int(): ao = x # E: Incompatible types in assignment (expression has type "A[B]", variable has type "A[object]") @@ -162,10 +162,10 @@ def f(a: T) -> 'Tuple[A[T], A[T]]': pass class A(Generic[T]): pass class B: pass -b = None # type: B -o = None # type: object -ab = None # type: A[B] -ao = None # type: A[object] +b: B +o: object +ab: A[B] +ao: A[object] if int(): ab, ao = f(b) # E: Incompatible types in assignment (expression has type "A[B]", variable has type "A[object]") @@ -192,10 +192,10 @@ def h(a: S, b: T) -> 'Tuple[A[S], A[S], A[T], A[T]]': pass class A(Generic[T]): pass class B: pass -b = None # type: B -o = None # type: object -ab = None # type: A[B] -ao = None # type: A[object] +b: B +o: object +ab: A[B] +ao: A[object] if int(): ao, ao, ab = f(b, b) # E: Incompatible types in assignment (expression has type "A[B]", variable has type "A[object]") @@ -229,12 +229,12 @@ class A(Generic[T]): pass class B: pass class C(B): pass -ac = None # type: A[C] -ab = None # type: A[B] -ao = None # type: A[object] -b = None # type: B -c = None # type: C -o = None # type: object +ac: A[C] +ab: A[B] +ao: A[object] +b: B +c: C +o: object if int(): ab = f(b, o) # E: Argument 2 to "f" has incompatible type "object"; expected "B" @@ -266,11 +266,11 @@ def f(a: T) -> 'A[T]': pass class A(Generic[T]): pass class B: pass -aab = None # type: A[A[B]] -aao = None # type: A[A[object]] -ao = None # type: A[object] -b = None # type: B -o = None # type: object +aab: A[A[B]] +aao: A[A[object]] +ao: A[object] +b: B +o: object if int(): aab = f(f(o)) # E: Argument 1 to "f" has incompatible type "object"; expected "B" @@ -279,6 +279,7 @@ if int(): aab = f(f(b)) aao = f(f(b)) ao = f(f(b)) + [case testNestedGenericFunctionCall2] from typing import TypeVar, Generic T = TypeVar('T') @@ -289,10 +290,10 @@ def g(a: T) -> 'A[T]': pass class A(Generic[T]): pass class B: pass -ab = None # type: A[B] -ao = None # type: A[object] -b = None # type: B -o = None # type: object +ab: A[B] +ao: A[object] +b: B +o: object if int(): ab = f(g(o)) # E: Argument 1 to "g" has incompatible type "object"; expected "B" @@ -300,6 +301,7 @@ if int(): if int(): ab = f(g(b)) ao = f(g(b)) + [case testNestedGenericFunctionCall3] from typing import TypeVar, Generic T = TypeVar('T') @@ -310,10 +312,10 @@ def g(a: T) -> 'A[T]': pass class A(Generic[T]): pass class B: pass -ab = None # type: A[B] -ao = None # type: A[object] -b = None # type: B -o = None # type: object +ab: A[B] +ao: A[object] +b: B +o: object if int(): ab = f(g(o), g(b)) # E: Argument 1 to "g" has incompatible type "object"; expected "B" @@ -334,9 +336,9 @@ if int(): [case testMethodCallWithContextInference] from typing import TypeVar, Generic T = TypeVar('T') -o = None # type: object -b = None # type: B -c = None # type: C +o: object +b: B +c: C def f(a: T) -> 'A[T]': pass class A(Generic[T]): @@ -344,9 +346,9 @@ class A(Generic[T]): class B: pass class C(B): pass -ao = None # type: A[object] -ab = None # type: A[B] -ac = None # type: A[C] +ao: A[object] +ab: A[B] +ac: A[C] ab.g(f(o)) # E: Argument 1 to "f" has incompatible type "object"; expected "B" if int(): @@ -365,9 +367,9 @@ ab.g(f(c)) [case testEmptyListExpression] from typing import List -aa = None # type: List[A] -ao = None # type: List[object] -a = None # type: A +aa: List[A] +ao: List[object] +a: A def f(): a, aa, ao # Prevent redefinition a = [] # E: Incompatible types in assignment (expression has type "List[]", variable has type "A") @@ -379,15 +381,15 @@ class A: pass [builtins fixtures/list.pyi] [case testSingleItemListExpressions] -from typing import List -aa = None # type: List[A] -ab = None # type: List[B] -ao = None # type: List[object] -a = None # type: A -b = None # type: B +from typing import List, Optional +aa: List[Optional[A]] +ab: List[B] +ao: List[object] +a: A +b: B def f(): aa, ab, ao # Prevent redefinition -aa = [b] # E: List item 0 has incompatible type "B"; expected "A" +aa = [b] # E: List item 0 has incompatible type "B"; expected "Optional[A]" ab = [a] # E: List item 0 has incompatible type "A"; expected "B" aa = [a] @@ -402,11 +404,11 @@ class B: pass [case testMultiItemListExpressions] from typing import List -aa = None # type: List[A] -ab = None # type: List[B] -ao = None # type: List[object] -a = None # type: A -b = None # type: B +aa: List[A] +ab: List[B] +ao: List[object] +a: A +b: B def f(): ab, aa, ao # Prevent redefinition ab = [b, a] # E: List item 1 has incompatible type "A"; expected "B" @@ -433,6 +435,7 @@ class B: pass [out] [case testNestedListExpressions] +# flags: --no-strict-optional from typing import List aao = None # type: List[List[object]] aab = None # type: List[List[B]] @@ -596,17 +599,17 @@ y = C() # type: Any [case testInferLambdaArgumentTypeUsingContext] from typing import Callable -f = None # type: Callable[[B], A] +f: Callable[[B], A] if int(): f = lambda x: x.o f = lambda x: x.x # E: "B" has no attribute "x" class A: pass class B: - o = None # type: A + o: A [case testInferLambdaReturnTypeUsingContext] from typing import List, Callable -f = None # type: Callable[[], List[A]] +f: Callable[[], List[A]] if int(): f = lambda: [] f = lambda: [B()] # E: List item 0 has incompatible type "B"; expected "A" @@ -622,8 +625,10 @@ reveal_type((lambda x, y: x + y)(1, 2)) # N: Revealed type is "builtins.int" reveal_type((lambda s, i: s)(i=0, s='x')) # N: Revealed type is "Literal['x']?" reveal_type((lambda s, i: i)(i=0, s='x')) # N: Revealed type is "Literal[0]?" reveal_type((lambda x, s, i: x)(1.0, i=0, s='x')) # N: Revealed type is "builtins.float" -(lambda x, s, i: x)() # E: Too few arguments -(lambda: 0)(1) # E: Too many arguments +if object(): + (lambda x, s, i: x)() # E: Too few arguments +if object(): + (lambda: 0)(1) # E: Too many arguments -- varargs are not handled, but it should not crash reveal_type((lambda *k, s, i: i)(type, i=0, s='x')) # N: Revealed type is "Any" reveal_type((lambda s, *k, i: i)(i=0, s='x')) # N: Revealed type is "Any" @@ -680,6 +685,7 @@ def foo(arg: Callable[..., T]) -> None: pass foo(lambda: 1) [case testLambdaNoneInContext] +# flags: --no-strict-optional from typing import Callable def f(x: Callable[[], None]) -> None: pass def g(x: Callable[[], int]) -> None: pass @@ -687,14 +693,15 @@ f(lambda: None) g(lambda: None) [case testIsinstanceInInferredLambda] -from typing import TypeVar, Callable +# flags: --new-type-inference +from typing import TypeVar, Callable, Optional T = TypeVar('T') S = TypeVar('S') class A: pass class B(A): pass class C(A): pass -def f(func: Callable[[T], S], *z: T, r: S = None) -> S: pass -f(lambda x: 0 if isinstance(x, B) else 1) # E: Cannot infer type argument 1 of "f" +def f(func: Callable[[T], S], *z: T, r: Optional[S] = None) -> S: pass +reveal_type(f(lambda x: 0 if isinstance(x, B) else 1)) # N: Revealed type is "builtins.int" f(lambda x: 0 if isinstance(x, B) else 1, A())() # E: "int" not callable f(lambda x: x if isinstance(x, B) else B(), A(), r=B())() # E: "B" not callable f( @@ -739,7 +746,9 @@ from typing import List class A: pass class B: pass class C(B): pass -a, b, c = None, None, None # type: (List[A], List[B], List[C]) +a: List[A] +b: List[B] +c: List[C] if int(): a = a or [] if int(): @@ -762,7 +771,7 @@ from typing import List, TypeVar t = TypeVar('t') s = TypeVar('s') # Some type variables can be inferred using context, but not all of them. -a = None # type: List[A] +a: List[A] def f(a: s, b: t) -> List[s]: pass class A: pass class B: pass @@ -780,7 +789,7 @@ def f(a: s, b: t) -> List[s]: pass class A: pass class B: pass # Like testSomeTypeVarsInferredFromContext, but tvars in different order. -a = None # type: List[A] +a: List[A] if int(): a = f(A(), B()) if int(): @@ -800,8 +809,8 @@ map( [case testChainedAssignmentInferenceContexts] from typing import List -i = None # type: List[int] -s = None # type: List[str] +i: List[int] +s: List[str] if int(): i = i = [] if int(): @@ -821,10 +830,10 @@ a.x = [''] # E: List item 0 has incompatible type "str"; expected "int" [case testListMultiplyInContext] from typing import List -a = None # type: List[int] +a: List[int] if int(): - a = [None] * 3 - a = [''] * 3 # E: List item 0 has incompatible type "str"; expected "int" + a = [None] * 3 # E: List item 0 has incompatible type "None"; expected "int" + a = [''] * 3 # E: List item 0 has incompatible type "str"; expected "int" [builtins fixtures/list.pyi] [case testUnionTypeContext] @@ -847,7 +856,7 @@ g(f(a)) [case testStar2Context] from typing import Any, Dict, Tuple, Iterable -def f1(iterable: Iterable[Tuple[str, Any]] = None) -> None: +def f1(iterable: Iterable[Tuple[str, Any]] = ()) -> None: f2(**dict(iterable)) def f2(iterable: Iterable[Tuple[str, Any]], **kw: Any) -> None: pass @@ -913,11 +922,10 @@ T = TypeVar('T') def f(x: Union[T, List[int]]) -> Union[T, List[int]]: pass reveal_type(f(1)) # N: Revealed type is "Union[builtins.int, builtins.list[builtins.int]]" reveal_type(f([])) # N: Revealed type is "builtins.list[builtins.int]" -reveal_type(f(None)) # N: Revealed type is "builtins.list[builtins.int]" +reveal_type(f(None)) # N: Revealed type is "Union[None, builtins.list[builtins.int]]" [builtins fixtures/list.pyi] [case testUnionWithGenericTypeItemContextAndStrictOptional] -# flags: --strict-optional from typing import TypeVar, Union, List T = TypeVar('T') @@ -941,11 +949,10 @@ c = C[List[int]]() reveal_type(c.f('')) # N: Revealed type is "Union[builtins.list[builtins.int], builtins.str]" reveal_type(c.f([1])) # N: Revealed type is "builtins.list[builtins.int]" reveal_type(c.f([])) # N: Revealed type is "builtins.list[builtins.int]" -reveal_type(c.f(None)) # N: Revealed type is "builtins.list[builtins.int]" +reveal_type(c.f(None)) # N: Revealed type is "Union[builtins.list[builtins.int], None]" [builtins fixtures/list.pyi] [case testGenericMethodCalledInGenericContext] -# flags: --strict-optional from typing import TypeVar, Generic _KT = TypeVar('_KT') @@ -1213,7 +1220,6 @@ x: Iterable[Union[A, B]] = f(B()) [builtins fixtures/list.pyi] [case testWideOuterContextOptional] -# flags: --strict-optional from typing import Optional, Type, TypeVar class Custom: @@ -1227,7 +1233,6 @@ def b(x: T) -> Optional[T]: return a(x) [case testWideOuterContextOptionalGenericReturn] -# flags: --strict-optional from typing import Optional, Type, TypeVar, Iterable class Custom: @@ -1241,7 +1246,6 @@ def b(x: T) -> Iterable[Optional[T]]: return a(x) [case testWideOuterContextOptionalMethod] -# flags: --strict-optional from typing import Optional, Type, TypeVar class A: pass @@ -1274,7 +1278,6 @@ def bar(xs: List[S]) -> S: [builtins fixtures/list.pyi] [case testWideOuterContextOptionalTypeVarReturn] -# flags: --strict-optional from typing import Callable, Iterable, List, Optional, TypeVar class C: @@ -1290,7 +1293,6 @@ def g(l: List[C], x: str) -> Optional[C]: [builtins fixtures/list.pyi] [case testWideOuterContextOptionalTypeVarReturnLambda] -# flags: --strict-optional from typing import Callable, Iterable, List, Optional, TypeVar class C: @@ -1327,7 +1329,6 @@ y: List[str] = f([]) \ [builtins fixtures/list.pyi] [case testWideOuterContextNoArgs] -# flags: --strict-optional from typing import TypeVar, Optional T = TypeVar('T', bound=int) @@ -1336,7 +1337,6 @@ def f(x: Optional[T] = None) -> T: ... y: str = f() [case testWideOuterContextNoArgsError] -# flags: --strict-optional from typing import TypeVar, Optional, List T = TypeVar('T', bound=int) @@ -1419,7 +1419,6 @@ bar({1: 2}) [builtins fixtures/dict.pyi] [case testOptionalTypeNarrowedByGenericCall] -# flags: --strict-optional from typing import Dict, Optional d: Dict[str, str] = {} @@ -1431,7 +1430,6 @@ def foo(arg: Optional[str] = None) -> None: [builtins fixtures/dict.pyi] [case testOptionalTypeNarrowedByGenericCall2] -# flags: --strict-optional from typing import Dict, Optional d: Dict[str, str] = {} @@ -1443,7 +1441,6 @@ if x: [builtins fixtures/dict.pyi] [case testOptionalTypeNarrowedByGenericCall3] -# flags: --strict-optional from typing import Generic, TypeVar, Union T = TypeVar("T") @@ -1456,7 +1453,6 @@ def foo(arg: Union[str, int]) -> None: [builtins fixtures/isinstance.pyi] [case testOptionalTypeNarrowedByGenericCall4] -# flags: --strict-optional from typing import Optional, List, Generic, TypeVar T = TypeVar("T", covariant=True) diff --git a/test-data/unit/check-inference.test b/test-data/unit/check-inference.test index 166e173e7301..56d3fe2b4ce7 100644 --- a/test-data/unit/check-inference.test +++ b/test-data/unit/check-inference.test @@ -54,7 +54,7 @@ class B: pass [case testInferringLvarTypeFromGvar] -g = None # type: B +g: B def f() -> None: a = g @@ -78,7 +78,7 @@ def g(): pass [case testInferringExplicitDynamicTypeForLvar] from typing import Any -g = None # type: Any +g: Any def f(a: Any) -> None: b = g @@ -95,8 +95,8 @@ def f(a: Any) -> None: def f() -> None: a = A(), B() - aa = None # type: A - bb = None # type: B + aa: A + bb: B if int(): bb = a[0] # E: Incompatible types in assignment (expression has type "A", variable has type "B") aa = a[1] # E: Incompatible types in assignment (expression has type "B", variable has type "A") @@ -122,8 +122,8 @@ class A: pass from typing import TypeVar, Generic T = TypeVar('T') class A(Generic[T]): pass -a_i = None # type: A[int] -a_s = None # type: A[str] +a_i: A[int] +a_s: A[str] def f() -> None: a_int = A() # type: A[int] @@ -182,7 +182,7 @@ class B: pass [case testInferringLvarTypesInTupleAssignment] from typing import Tuple def f() -> None: - t = None # type: Tuple[A, B] + t: Tuple[A, B] a, b = t if int(): a = b # E: Incompatible types in assignment (expression has type "B", variable has type "A") @@ -200,7 +200,7 @@ class B: pass [case testInferringLvarTypesInNestedTupleAssignment1] from typing import Tuple def f() -> None: - t = None # type: Tuple[A, B] + t: Tuple[A, B] a1, (a, b) = A(), t if int(): a = b # E: Incompatible types in assignment (expression has type "B", variable has type "A") @@ -386,7 +386,7 @@ reveal_type(C) # N: Revealed type is "__main__.Foo" [case testInferringLvarTypesInMultiDefWithInvalidTuple] from typing import Tuple -t = None # type: Tuple[object, object, object] +t: Tuple[object, object, object] def f() -> None: a, b = t # Fail @@ -543,9 +543,9 @@ if int(): [case testInferSimpleGenericFunction] from typing import Tuple, TypeVar T = TypeVar('T') -a = None # type: A -b = None # type: B -c = None # type: Tuple[A, object] +a: A +b: B +c: Tuple[A, object] def id(a: T) -> T: pass @@ -569,8 +569,8 @@ from typing import TypeVar T = TypeVar('T') def f() -> None: a = id - b = None # type: int - c = None # type: str + b: int + c: str if int(): b = a(c) # E: Incompatible types in assignment (expression has type "str", variable has type "int") b = a(b) @@ -584,7 +584,7 @@ def id(x: T) -> T: from typing import TypeVar T = TypeVar('T') class A: pass -a = None # type: A +a: A def ff() -> None: x = f() # E: Need type annotation for "x" @@ -605,8 +605,8 @@ class A: pass class B(A): pass T = TypeVar('T') -a = None # type: A -b = None # type: B +a: A +b: B def f(a: T, b: T) -> T: pass @@ -629,10 +629,11 @@ def f(a: T, b: S) -> Tuple[T, S]: pass class A: pass class B: pass -a, b = None, None # type: (A, B) -taa = None # type: Tuple[A, A] -tab = None # type: Tuple[A, B] -tba = None # type: Tuple[B, A] +a: A +b: B +taa: Tuple[A, A] +tab: Tuple[A, B] +tba: Tuple[B, A] if int(): taa = f(a, b) # E: Argument 2 to "f" has incompatible type "B"; expected "A" @@ -651,9 +652,9 @@ if int(): [case testConstraintSolvingWithSimpleGenerics] from typing import TypeVar, Generic T = TypeVar('T') -ao = None # type: A[object] -ab = None # type: A[B] -ac = None # type: A[C] +ao: A[object] +ab: A[B] +ac: A[C] def f(a: 'A[T]') -> 'A[T]': pass @@ -683,8 +684,8 @@ if int(): [case testConstraintSolvingFailureWithSimpleGenerics] from typing import TypeVar, Generic T = TypeVar('T') -ao = None # type: A[object] -ab = None # type: A[B] +ao: A[object] +ab: A[B] def f(a: 'A[T]', b: 'A[T]') -> None: pass @@ -696,7 +697,9 @@ f(ao, ab) # E: Cannot infer type argument 1 of "f" f(ab, ao) # E: Cannot infer type argument 1 of "f" f(ao, ao) f(ab, ab) + [case testTypeInferenceWithCalleeDefaultArgs] +# flags: --no-strict-optional from typing import TypeVar T = TypeVar('T') a = None # type: A @@ -809,7 +812,9 @@ class C(A, I, J): pass def f(a: T, b: T) -> T: pass T = TypeVar('T') -a, i, j = None, None, None # type: (A, I, J) +a: A +i: I +j: J a = f(B(), C()) @@ -846,7 +851,7 @@ from typing import Callable, Type, TypeVar T = TypeVar('T') def f(x: Callable[..., T]) -> T: return x() class A: pass -x = None # type: Type[A] +x: Type[A] y = f(x) reveal_type(y) # N: Revealed type is "__main__.A" @@ -908,7 +913,6 @@ def call(c: Callable[[int], Any], i: int) -> None: [out] [case testCallableMeetAndJoin] -# flags: --python-version 3.6 from typing import Callable, Any, TypeVar class A: ... @@ -1031,7 +1035,8 @@ class A: pass class B: pass def d_ab() -> Dict[A, B]: return {} def d_aa() -> Dict[A, A]: return {} -a, b = None, None # type: (A, B) +a: A +b: B d = {a:b} if int(): d = d_ab() @@ -1041,7 +1046,8 @@ if int(): [case testSetLiteral] from typing import Any, Set -a, x = None, None # type: (int, Any) +a: int +x: Any def s_i() -> Set[int]: return set() def s_s() -> Set[str]: return set() s = {a} @@ -1113,7 +1119,8 @@ list_2 = [f, h] [case testInferenceOfFor1] -a, b = None, None # type: (A, B) +a: A +b: B class A: pass class B: pass @@ -1132,7 +1139,9 @@ class A: pass class B: pass class C: pass -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C for x, (y, z) in [(A(), (B(), C()))]: b = x # E: Incompatible types in assignment (expression has type "A", variable has type "B") c = y # E: Incompatible types in assignment (expression has type "B", variable has type "C") @@ -1152,7 +1161,8 @@ for xxx, yyy in [(None, None)]: class A: pass class B: pass -a, b = None, None # type: (A, B) +a: A +b: B for x, y in [[A()]]: b = x # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -1234,7 +1244,7 @@ class B: pass [case testMultipleAssignmentWithPartialDefinition] -a = None # type: A +a: A if int(): x, a = a, a if int(): @@ -1246,7 +1256,7 @@ if int(): class A: pass [case testMultipleAssignmentWithPartialDefinition2] -a = None # type: A +a: A if int(): a, x = [a, a] if int(): @@ -1260,7 +1270,7 @@ class A: pass [case testMultipleAssignmentWithPartialDefinition3] from typing import Any, cast -a = None # type: A +a: A if int(): x, a = cast(Any, a) if int(): @@ -1275,7 +1285,7 @@ class A: pass class A: pass class B: pass -if A: +if int(): a = A() if int(): a = A() @@ -1365,21 +1375,22 @@ class B: pass [builtins fixtures/list.pyi] [case testUninferableLambda] +# flags: --new-type-inference from typing import TypeVar, Callable X = TypeVar('X') def f(x: Callable[[X], X]) -> X: pass -y = f(lambda x: x) # E: Cannot infer type argument 1 of "f" +y = f(lambda x: x) # E: Need type annotation for "y" [case testUninferableLambdaWithTypeError] +# flags: --new-type-inference from typing import TypeVar, Callable X = TypeVar('X') def f(x: Callable[[X], X], y: str) -> X: pass -y = f(lambda x: x, 1) # Fail -[out] -main:4: error: Cannot infer type argument 1 of "f" -main:4: error: Argument 2 to "f" has incompatible type "int"; expected "str" +y = f(lambda x: x, 1) # E: Need type annotation for "y" \ + # E: Argument 2 to "f" has incompatible type "int"; expected "str" [case testInferLambdaNone] +# flags: --no-strict-optional from typing import Callable def f(x: Callable[[], None]) -> None: pass def g(x: Callable[[], int]) -> None: pass @@ -1391,7 +1402,6 @@ f(b) g(b) [case testLambdaDefaultContext] -# flags: --strict-optional from typing import Callable def f(a: Callable[..., None] = lambda *a, **k: None): pass @@ -1442,8 +1452,8 @@ def g(cond: bool) -> Any: [case testOrOperationWithGenericOperands] from typing import List -a = None # type: List[A] -o = None # type: List[object] +a: List[A] +o: List[object] a2 = a or [] if int(): a = a2 @@ -1464,7 +1474,7 @@ x.y # E: "object" has no attribute "y" [case testAccessDataAttributeBeforeItsTypeIsAvailable] -a = None # type: A +a: A a.x.y # E: Cannot determine type of "x" class A: def __init__(self) -> None: @@ -1481,7 +1491,7 @@ from typing import List, _promote class A: pass @_promote(A) class B: pass -a = None # type: List[A] +a: List[A] x1 = [A(), B()] x2 = [B(), A()] x3 = [B(), B()] @@ -1504,7 +1514,7 @@ class A: pass class B: pass @_promote(B) class C: pass -a = None # type: List[A] +a: List[A] x1 = [A(), C()] x2 = [C(), A()] x3 = [B(), C()] @@ -1800,7 +1810,6 @@ reveal_type(C().a) # N: Revealed type is "builtins.dict[builtins.int, builtins. [builtins fixtures/dict.pyi] [case testInferAttributeInitializedToNoneAndAssigned] -# flags: --strict-optional class C: def __init__(self) -> None: self.a = None @@ -1847,7 +1856,6 @@ reveal_type(C().a) # N: Revealed type is "builtins.dict[Any, Any]" [builtins fixtures/dict.pyi] [case testInferAttributeInitializedToNoneAndAssignedOtherMethod] -# flags: --strict-optional class C: def __init__(self) -> None: self.a = None @@ -1880,7 +1888,6 @@ reveal_type(C().a) # N: Revealed type is "builtins.dict[Any, Any]" [builtins fixtures/dict.pyi] [case testInferAttributeInitializedToNoneAndAssignedClassBody] -# flags: --strict-optional class C: a = None def __init__(self) -> None: @@ -1915,6 +1922,7 @@ reveal_type(dd) # N: Revealed type is "builtins.dict[builtins.str, builtins.int [builtins fixtures/dict.pyi] [case testInferFromEmptyDictWhenUsingInSpecialCase] +# flags: --no-strict-optional d = None if 'x' in d: # E: "None" has no attribute "__iter__" (not iterable) pass @@ -1937,6 +1945,7 @@ reveal_type(a) # N: Revealed type is "builtins.list[builtins.int]" [builtins fixtures/list.pyi] [case testInferSetTypeFromInplaceOr] +# flags: --no-strict-optional a = set() a |= {'x'} reveal_type(a) # N: Revealed type is "builtins.set[builtins.str]" @@ -1953,7 +1962,8 @@ def f() -> None: x = None else: x = 1 - x() # E: "int" not callable + x() # E: "int" not callable \ + # E: "None" not callable [out] [case testLocalVariablePartiallyTwiceInitializedToNone] @@ -1964,7 +1974,8 @@ def f() -> None: x = None else: x = 1 - x() # E: "int" not callable + x() # E: "int" not callable \ + # E: "None" not callable [out] [case testLvarInitializedToNoneWithoutType] @@ -1978,7 +1989,8 @@ def f() -> None: x = None if object(): x = 1 -x() # E: "int" not callable +x() # E: "int" not callable \ + # E: "None" not callable [case testPartiallyInitializedToNoneAndThenToPartialList] x = None @@ -2339,8 +2351,8 @@ reveal_type(A.g) # N: Revealed type is "def (x: builtins.str)" [case testUnificationRedundantUnion] from typing import Union -a = None # type: Union[int, str] -b = None # type: Union[str, tuple] +a: Union[int, str] +b: Union[str, tuple] def f(): pass def g(x: Union[int, str]): pass c = a if f() else b @@ -2522,7 +2534,6 @@ if bool(): [out] [case testDontMarkUnreachableAfterInferenceUninhabited2] -# flags: --strict-optional from typing import TypeVar, Optional T = TypeVar('T') def f(x: Optional[T] = None) -> T: pass @@ -2593,7 +2604,7 @@ x = '' reveal_type(x) # N: Revealed type is "builtins.str" [case testLocalPartialTypesWithGlobalInitializedToNoneStrictOptional] -# flags: --local-partial-types --strict-optional +# flags: --local-partial-types x = None def f() -> None: @@ -2745,7 +2756,7 @@ class B(A): reveal_type(B.x) # N: Revealed type is "None" [case testLocalPartialTypesWithInheritance2] -# flags: --local-partial-types --strict-optional +# flags: --local-partial-types class A: x: str @@ -2753,7 +2764,7 @@ class B(A): x = None # E: Incompatible types in assignment (expression has type "None", base class "A" defined the type as "str") [case testLocalPartialTypesWithAnyBaseClass] -# flags: --local-partial-types --strict-optional +# flags: --local-partial-types from typing import Any A: Any @@ -2765,7 +2776,7 @@ class C(B): y = None [case testLocalPartialTypesInMultipleMroItems] -# flags: --local-partial-types --strict-optional +# flags: --local-partial-types from typing import Optional class A: @@ -3090,7 +3101,6 @@ class B(A): x = 2 # E: Incompatible types in assignment (expression has type "int", base class "A" defined the type as "str") [case testInheritedAttributeStrictOptional] -# flags: --strict-optional class A: x: str @@ -3193,7 +3203,6 @@ x: Inv[int] reveal_type(f(x)) # N: Revealed type is "builtins.int" [case testOptionalTypeVarAgainstOptional] -# flags: --strict-optional from typing import Optional, TypeVar, Iterable, Iterator, List _T = TypeVar('_T') @@ -3240,7 +3249,6 @@ reveal_type(b) # N: Revealed type is "collections.defaultdict[builtins.int, buil [builtins fixtures/dict.pyi] [case testPartialDefaultDictListValueStrictOptional] -# flags: --strict-optional from collections import defaultdict a = defaultdict(list) a['x'].append(1) @@ -3317,7 +3325,6 @@ def g() -> None: pass reveal_type(f(g)) # N: Revealed type is "None" [case testInferCallableReturningNone2] -# flags: --strict-optional from typing import Callable, TypeVar T = TypeVar("T") @@ -3388,7 +3395,6 @@ def collection_from_dict_value(model: Type[T2]) -> None: [builtins fixtures/isinstancelist.pyi] [case testRegression11705_Strict] -# flags: --strict-optional # See: https://github.com/python/mypy/issues/11705 from typing import Dict, Optional, NamedTuple class C(NamedTuple): @@ -3438,7 +3444,6 @@ foo(("a", {"a": "b"}, "b")) [builtins fixtures/dict.pyi] [case testUseSupertypeAsInferenceContext] -# flags: --strict-optional from typing import List, Optional class B: @@ -3548,3 +3553,136 @@ class E(D): ... reveal_type([E(), D()]) # N: Revealed type is "builtins.list[__main__.D]" reveal_type([D(), E()]) # N: Revealed type is "builtins.list[__main__.D]" + +[case testCallableInferenceAgainstCallablePosVsStar] +from typing import TypeVar, Callable, Tuple + +T = TypeVar('T') +S = TypeVar('S') + +def f(x: Callable[[T, S], None]) -> Tuple[T, S]: ... +def g(*x: int) -> None: ... +reveal_type(f(g)) # N: Revealed type is "Tuple[builtins.int, builtins.int]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallableStarVsPos] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T, S]): + def __call__(self, __x: T, *args: S) -> None: ... + +def f(x: Call[T, S]) -> Tuple[T, S]: ... +def g(*x: int) -> None: ... +reveal_type(f(g)) # N: Revealed type is "Tuple[builtins.int, builtins.int]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallableNamedVsStar] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T, S]): + def __call__(self, *, x: T, y: S) -> None: ... + +def f(x: Call[T, S]) -> Tuple[T, S]: ... +def g(**kwargs: int) -> None: ... +reveal_type(f(g)) # N: Revealed type is "Tuple[builtins.int, builtins.int]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallableStarVsNamed] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T, S]): + def __call__(self, *, x: T, **kwargs: S) -> None: ... + +def f(x: Call[T, S]) -> Tuple[T, S]: ... +def g(**kwargs: int) -> None: pass +reveal_type(f(g)) # N: Revealed type is "Tuple[builtins.int, builtins.int]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallableNamedVsNamed] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T, S]): + def __call__(self, *, x: T, y: S) -> None: ... + +def f(x: Call[T, S]) -> Tuple[T, S]: ... + +# Note: order of names is different w.r.t. protocol +def g(*, y: int, x: str) -> None: pass +reveal_type(f(g)) # N: Revealed type is "Tuple[builtins.str, builtins.int]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallablePosOnlyVsNamed] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T]): + def __call__(self, *, x: T) -> None: ... + +def f(x: Call[T]) -> Tuple[T, T]: ... + +def g(__x: str) -> None: pass +reveal_type(f(g)) # N: Revealed type is "Tuple[, ]" \ + # E: Argument 1 to "f" has incompatible type "Callable[[str], None]"; expected "Call[]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallableNamedVsPosOnly] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T]): + def __call__(self, __x: T) -> None: ... + +def f(x: Call[T]) -> Tuple[T, T]: ... + +def g(*, x: str) -> None: pass +reveal_type(f(g)) # N: Revealed type is "Tuple[, ]" \ + # E: Argument 1 to "f" has incompatible type "Callable[[NamedArg(str, 'x')], None]"; expected "Call[]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallablePosOnlyVsKwargs] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T]): + def __call__(self, __x: T) -> None: ... + +def f(x: Call[T]) -> Tuple[T, T]: ... + +def g(**x: str) -> None: pass +reveal_type(f(g)) # N: Revealed type is "Tuple[, ]" \ + # E: Argument 1 to "f" has incompatible type "Callable[[KwArg(str)], None]"; expected "Call[]" +[builtins fixtures/list.pyi] + +[case testCallableInferenceAgainstCallableNamedVsArgs] +from typing import TypeVar, Callable, Tuple, Protocol + +T = TypeVar('T', contravariant=True) +S = TypeVar('S', contravariant=True) + +class Call(Protocol[T]): + def __call__(self, *, x: T) -> None: ... + +def f(x: Call[T]) -> Tuple[T, T]: ... + +def g(*args: str) -> None: pass +reveal_type(f(g)) # N: Revealed type is "Tuple[, ]" \ + # E: Argument 1 to "f" has incompatible type "Callable[[VarArg(str)], None]"; expected "Call[]" +[builtins fixtures/list.pyi] diff --git a/test-data/unit/check-inline-config.test b/test-data/unit/check-inline-config.test index 0cc2bd71270a..bedba811d95b 100644 --- a/test-data/unit/check-inline-config.test +++ b/test-data/unit/check-inline-config.test @@ -4,8 +4,8 @@ # mypy: disallow-any-generics, no-warn-no-return -from typing import List -def foo() -> List: # E: Missing type parameters for generic type "List" +from typing import List, Optional +def foo() -> Optional[List]: # E: Missing type parameters for generic type "List" 20 [builtins fixtures/list.pyi] @@ -15,8 +15,8 @@ def foo() -> List: # E: Missing type parameters for generic type "List" # mypy: disallow-any-generics # mypy: no-warn-no-return -from typing import List -def foo() -> List: # E: Missing type parameters for generic type "List" +from typing import List, Optional +def foo() -> Optional[List]: # E: Missing type parameters for generic type "List" 20 [builtins fixtures/list.pyi] @@ -25,8 +25,8 @@ def foo() -> List: # E: Missing type parameters for generic type "List" # mypy: disallow-any-generics=true, warn-no-return=0 -from typing import List -def foo() -> List: # E: Missing type parameters for generic type "List" +from typing import List, Optional +def foo() -> Optional[List]: # E: Missing type parameters for generic type "List" 20 [builtins fixtures/list.pyi] @@ -36,8 +36,8 @@ def foo() -> List: # E: Missing type parameters for generic type "List" # mypy: disallow-any-generics = true, warn-no-return = 0 -from typing import List -def foo() -> List: # E: Missing type parameters for generic type "List" +from typing import List, Optional +def foo() -> Optional[List]: # E: Missing type parameters for generic type "List" 20 [builtins fixtures/list.pyi] @@ -84,20 +84,20 @@ import a [file a.py] # mypy: disallow-any-generics, no-warn-no-return -from typing import List -def foo() -> List: +from typing import List, Optional +def foo() -> Optional[List]: 20 [file a.py.2] # mypy: no-warn-no-return -from typing import List -def foo() -> List: +from typing import List, Optional +def foo() -> Optional[List]: 20 [file a.py.3] -from typing import List -def foo() -> List: +from typing import List, Optional +def foo() -> Optional[List]: 20 [out] tmp/a.py:4: error: Missing type parameters for generic type "List" @@ -131,8 +131,9 @@ tmp/a.py:4: error: Missing type parameters for generic type "List" import a, b [file a.py] # mypy: no-warn-no-return +from typing import Optional -def foo() -> int: +def foo() -> Optional[int]: 20 [file b.py] @@ -164,7 +165,6 @@ main:1: error: Unrecognized option: skip_file = True main:1: error: Setting "strict" not supported in inline configuration: specify it in a configuration file instead, or set individual inline flags (see "mypy -h" for the list of flags enabled in strict mode) [case testInlineErrorCodes] -# flags: --strict-optional # mypy: enable-error-code="ignore-without-code,truthy-bool" class Foo: pass @@ -174,7 +174,7 @@ if foo: ... # E: "__main__.foo" has type "Foo" which does not implement __bool_ 42 + "no" # type: ignore # E: "type: ignore" comment without error code (consider "type: ignore[operator]" instead) [case testInlineErrorCodesOverrideConfig] -# flags: --strict-optional --config-file tmp/mypy.ini +# flags: --config-file tmp/mypy.ini import foo import tests.bar import tests.baz @@ -242,7 +242,6 @@ class C: self.x = 1 [case testIgnoreErrorsWithUnsafeSuperCall_no_empty] -# flags: --strict-optional from m import C diff --git a/test-data/unit/check-isinstance.test b/test-data/unit/check-isinstance.test index 2d010b8ba38d..361d4db78752 100644 --- a/test-data/unit/check-isinstance.test +++ b/test-data/unit/check-isinstance.test @@ -10,7 +10,7 @@ y = x [case testJoinAny] from typing import List, Any -x = None # type: List[Any] +x: List[Any] def foo() -> List[int]: pass def bar() -> List[str]: pass @@ -51,9 +51,9 @@ def f(x: Union[int, str, List]) -> None: [case testClassAttributeInitialization] class A: - x = None # type: int + x: int def __init__(self) -> None: - self.y = None # type: int + self.y: int z = self.x w = self.y @@ -71,7 +71,7 @@ def foo(x: Union[str, int]): y + [1] # E: List item 0 has incompatible type "int"; expected "str" z = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "str") -x = None # type: int +x: int y = [x] [builtins fixtures/isinstancelist.pyi] @@ -124,7 +124,7 @@ x = 'a' [case testUnionMultiAssignment] from typing import Union -x = None # type: Union[int, str] +x: Union[int, str] if int(): x = 1 x = 'a' @@ -488,7 +488,7 @@ x.y # OK: x is known to be a B [case testIsInstanceBasic] from typing import Union -x = None # type: Union[int, str] +x: Union[int, str] if isinstance(x, str): x = x + 1 # E: Unsupported operand types for + ("str" and "int") x = x + 'a' @@ -499,7 +499,7 @@ else: [case testIsInstanceIndexing] from typing import Union -x = None # type: Union[int, str] +x: Union[int, str] j = [x] if isinstance(j[0], str): j[0] = j[0] + 'a' @@ -671,7 +671,7 @@ foo() from typing import Union def foo() -> None: - x = None # type: Union[int, str] + x: Union[int, str] if isinstance(x, int): for z in [1,2]: break @@ -686,7 +686,7 @@ foo() [case testIsInstanceThreeUnion] from typing import Union, List -x = None # type: Union[int, str, List[int]] +x: Union[int, str, List[int]] while bool(): if isinstance(x, int): @@ -706,7 +706,7 @@ x + [1] # E: Unsupported operand types for + ("int" and "List[int] [case testIsInstanceThreeUnion2] from typing import Union, List -x = None # type: Union[int, str, List[int]] +x: Union[int, str, List[int]] while bool(): if isinstance(x, int): x + 1 @@ -725,7 +725,7 @@ x + [1] # E: Unsupported operand types for + ("int" and "List[int] from typing import Union, List while bool(): - x = None # type: Union[int, str, List[int]] + x: Union[int, str, List[int]] def f(): x # Prevent redefinition x = 1 if isinstance(x, int): @@ -1787,6 +1787,7 @@ issubclass(x, (int, Iterable[int])) # E: Parameterized generics cannot be used [typing fixtures/typing-full.pyi] [case testIssubclassWithMetaclasses] +# flags: --no-strict-optional class FooMetaclass(type): ... class Foo(metaclass=FooMetaclass): ... class Bar: ... @@ -1800,7 +1801,6 @@ if issubclass(fm, Bar): [builtins fixtures/isinstance.pyi] [case testIssubclassWithMetaclassesStrictOptional] -# flags: --strict-optional class FooMetaclass(type): ... class BarMetaclass(type): ... class Foo(metaclass=FooMetaclass): ... @@ -1905,7 +1905,6 @@ def narrow_any_to_str_then_reassign_to_int() -> None: [builtins fixtures/isinstance.pyi] [case testNarrowTypeAfterInList] -# flags: --strict-optional from typing import List, Optional x: List[int] @@ -1923,7 +1922,6 @@ else: [out] [case testNarrowTypeAfterInListOfOptional] -# flags: --strict-optional from typing import List, Optional x: List[Optional[int]] @@ -1937,7 +1935,6 @@ else: [out] [case testNarrowTypeAfterInListNonOverlapping] -# flags: --strict-optional from typing import List, Optional x: List[str] @@ -1951,7 +1948,6 @@ else: [out] [case testNarrowTypeAfterInListNested] -# flags: --strict-optional from typing import List, Optional, Any x: Optional[int] @@ -1966,7 +1962,6 @@ if x in nested_any: [out] [case testNarrowTypeAfterInTuple] -# flags: --strict-optional from typing import Optional class A: pass class B(A): pass @@ -1981,7 +1976,6 @@ else: [out] [case testNarrowTypeAfterInNamedTuple] -# flags: --strict-optional from typing import NamedTuple, Optional class NT(NamedTuple): x: int @@ -1997,7 +1991,6 @@ else: [out] [case testNarrowTypeAfterInDict] -# flags: --strict-optional from typing import Dict, Optional x: Dict[str, int] y: Optional[str] @@ -2014,7 +2007,6 @@ else: [out] [case testNarrowTypeAfterInNoAnyOrObject] -# flags: --strict-optional from typing import Any, List, Optional x: List[Any] z: List[object] @@ -2034,7 +2026,6 @@ else: [out] [case testNarrowTypeAfterInUserDefined] -# flags: --strict-optional from typing import Container, Optional class C(Container[int]): @@ -2056,7 +2047,6 @@ else: [out] [case testNarrowTypeAfterInSet] -# flags: --strict-optional from typing import Optional, Set s: Set[str] @@ -2073,7 +2063,6 @@ else: [out] [case testNarrowTypeAfterInTypedDict] -# flags: --strict-optional from typing import Optional from mypy_extensions import TypedDict class TD(TypedDict): @@ -2149,7 +2138,6 @@ else: [builtins fixtures/isinstance.pyi] [case testIsInstanceInitialNoneCheckSkipsImpossibleCasesNoStrictOptional] -# flags: --strict-optional from typing import Optional, Union class A: pass @@ -2196,7 +2184,6 @@ def foo2(x: Optional[str]) -> None: [builtins fixtures/isinstance.pyi] [case testNoneCheckDoesNotNarrowWhenUsingTypeVars] -# flags: --strict-optional # Note: this test (and the following one) are testing checker.conditional_type_map: # if you set the 'prohibit_none_typevar_overlap' keyword argument to False when calling @@ -2248,7 +2235,6 @@ def bar(x: Union[List[str], List[int], None]) -> None: [builtins fixtures/isinstancelist.pyi] [case testNoneAndGenericTypesOverlapStrictOptional] -# flags: --strict-optional from typing import Union, Optional, List # This test is the same as the one above, except for strict-optional. diff --git a/test-data/unit/check-kwargs.test b/test-data/unit/check-kwargs.test index 81fdc444aced..4beac047e278 100644 --- a/test-data/unit/check-kwargs.test +++ b/test-data/unit/check-kwargs.test @@ -54,6 +54,7 @@ f(b=[], a=A()) [builtins fixtures/list.pyi] [case testGivingArgumentAsPositionalAndKeywordArg] +# flags: --no-strict-optional import typing class A: pass class B: pass @@ -61,11 +62,13 @@ def f(a: 'A', b: 'B' = None) -> None: pass f(A(), a=A()) # E: "f" gets multiple values for keyword argument "a" [case testGivingArgumentAsPositionalAndKeywordArg2] +# flags: --no-strict-optional import typing class A: pass class B: pass def f(a: 'A' = None, b: 'B' = None) -> None: pass f(A(), a=A()) # E: "f" gets multiple values for keyword argument "a" + [case testPositionalAndKeywordForSameArg] # This used to crash in check_argument_count(). See #1095. def f(a: int): pass @@ -134,7 +137,7 @@ f(otter=A()) # E: Missing positional argument "other" in call to "f" [case testKeywordArgumentsWithDynamicallyTypedCallable] from typing import Any -f = None # type: Any +f: Any f(x=f(), z=None()) # E: "None" not callable f(f, zz=None()) # E: "None" not callable f(x=None) @@ -143,10 +146,12 @@ f(x=None) from typing import Callable class A: pass class B: pass -f = None # type: Callable[[A, B], None] +f: Callable[[A, B], None] f(a=A(), b=B()) # E: Unexpected keyword argument "a" # E: Unexpected keyword argument "b" f(A(), b=B()) # E: Unexpected keyword argument "b" + [case testKeywordOnlyArguments] +# flags: --no-strict-optional import typing class A: pass class B: pass @@ -172,7 +177,9 @@ i(A(), b=B()) i(A(), aa=A()) # E: Missing named argument "b" for "i" i(A(), b=B(), aa=A()) i(A(), aa=A(), b=B()) + [case testKeywordOnlyArgumentsFastparse] +# flags: --no-strict-optional import typing class A: pass @@ -290,10 +297,10 @@ from typing import Dict class A: pass class B: pass def f( **kwargs: 'A') -> None: pass -d = None # type: Dict[str, A] +d: Dict[str, A] f(**d) f(x=A(), **d) -d2 = None # type: Dict[str, B] +d2: Dict[str, B] f(**d2) # E: Argument 1 to "f" has incompatible type "**Dict[str, B]"; expected "A" f(x=A(), **d2) # E: Argument 2 to "f" has incompatible type "**Dict[str, B]"; expected "A" f(**{'x': B()}) # E: Argument 1 to "f" has incompatible type "**Dict[str, B]"; expected "A" @@ -324,9 +331,9 @@ reveal_type(formatter.__call__) # N: Revealed type is "def (message: builtins.s [case testPassingMappingForKeywordVarArg] from typing import Mapping def f(**kwargs: 'A') -> None: pass -b = None # type: Mapping -d = None # type: Mapping[A, A] -m = None # type: Mapping[str, A] +b: Mapping +d: Mapping[A, A] +m: Mapping[str, A] f(**d) # E: Keywords must be strings f(**m) f(**b) @@ -337,13 +344,12 @@ class A: pass from typing import Mapping class MappingSubclass(Mapping[str, str]): pass def f(**kwargs: 'A') -> None: pass -d = None # type: MappingSubclass +d: MappingSubclass f(**d) class A: pass [builtins fixtures/dict.pyi] [case testInvalidTypeForKeywordVarArg] -# flags: --strict-optional from typing import Dict, Any, Optional class A: pass def f(**kwargs: 'A') -> None: pass @@ -357,9 +363,9 @@ f(**kwargs) # E: Argument after ** must be a mapping, not "Optional[Any]" [case testPassingKeywordVarArgsToNonVarArgsFunction] from typing import Any, Dict def f(a: 'A', b: 'B') -> None: pass -d = None # type: Dict[str, Any] +d: Dict[str, Any] f(**d) -d2 = None # type: Dict[str, A] +d2: Dict[str, A] f(**d2) # E: Argument 1 to "f" has incompatible type "**Dict[str, A]"; expected "B" class A: pass class B: pass @@ -368,8 +374,8 @@ class B: pass [case testBothKindsOfVarArgs] from typing import Any, List, Dict def f(a: 'A', b: 'A') -> None: pass -l = None # type: List[Any] -d = None # type: Dict[Any, Any] +l: List[Any] +d: Dict[Any, Any] f(*l, **d) class A: pass [builtins fixtures/dict.pyi] @@ -380,8 +386,8 @@ def f1(a: 'A', b: 'A') -> None: pass def f2(a: 'A') -> None: pass def f3(a: 'A', **kwargs: 'A') -> None: pass def f4(**kwargs: 'A') -> None: pass -d = None # type: Dict[Any, Any] -d2 = None # type: Dict[Any, Any] +d: Dict[Any, Any] +d2: Dict[Any, Any] f1(**d, **d2) f2(**d, **d2) f3(**d, **d2) @@ -392,7 +398,7 @@ class A: pass [case testPassingKeywordVarArgsToVarArgsOnlyFunction] from typing import Any, Dict def f(*args: 'A') -> None: pass -d = None # type: Dict[Any, Any] +d: Dict[Any, Any] f(**d) class A: pass [builtins fixtures/dict.pyi] diff --git a/test-data/unit/check-lists.test b/test-data/unit/check-lists.test index 899b7c5b209f..77acdafd3319 100644 --- a/test-data/unit/check-lists.test +++ b/test-data/unit/check-lists.test @@ -3,8 +3,12 @@ [case testNestedListAssignment] from typing import List -a1, b1, c1 = None, None, None # type: (A, B, C) -a2, b2, c2 = None, None, None # type: (A, B, C) +a1: A +a2: A +b1: B +b2: B +c1: C +c2: C if int(): a1, [b1, c1] = a2, [b2, c2] @@ -21,7 +25,9 @@ class C: pass [case testNestedListAssignmentToTuple] from typing import List -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C a, b = [a, b] a, b = [a] # E: Need more than 1 value to unpack (2 expected) @@ -35,7 +41,9 @@ class C: pass [case testListAssignmentFromTuple] from typing import List -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C t = a, b if int(): @@ -55,7 +63,9 @@ class C: pass [case testListAssignmentUnequalAmountToUnpack] from typing import List -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C def f() -> None: # needed because test parser tries to parse [a, b] as section header [a, b] = [a, b] @@ -79,7 +89,6 @@ reveal_type(c) # N: Revealed type is "builtins.list[builtins.int]" [builtins fixtures/list.pyi] [case testComprehensionShadowBinder] -# flags: --strict-optional def foo(x: object) -> None: if isinstance(x, str): [reveal_type(x) for x in [1, 2, 3]] # N: Revealed type is "builtins.int" diff --git a/test-data/unit/check-literal.test b/test-data/unit/check-literal.test index 1a529051259f..ecd4fc0a1f00 100644 --- a/test-data/unit/check-literal.test +++ b/test-data/unit/check-literal.test @@ -278,7 +278,7 @@ reveal_type(c_bytes_wrapper_alias) # N: Revealed type is "__main__.Wrap[Liter [builtins fixtures/tuple.pyi] [out] -[case testLiteralUnicodeWeirdCharacters] +[case testLiteralUnicodeWeirdCharacters-skip_path_normalization] from typing import Any from typing_extensions import Literal @@ -334,7 +334,7 @@ a1 = b3 a1 = c3 # E: Incompatible types in assignment (expression has type "Literal['¬b ∧ λ(p)']", variable has type "Literal['\x00¬b ∧ λ(p)']") [builtins fixtures/tuple.pyi] -[out skip-path-normalization] +[out] [case testLiteralRenamingImportWorks] from typing_extensions import Literal as Foo @@ -478,7 +478,7 @@ reveal_type(f5) # N: Revealed type is "def (x: Literal['foo']) -> Literal['foo' [builtins fixtures/tuple.pyi] [out] -[case testLiteralBasicStrUsageSlashes] +[case testLiteralBasicStrUsageSlashes-skip_path_normalization] from typing_extensions import Literal a: Literal[r"foo\nbar"] @@ -487,7 +487,7 @@ b: Literal["foo\nbar"] reveal_type(a) reveal_type(b) [builtins fixtures/tuple.pyi] -[out skip-path-normalization] +[out] main:6: note: Revealed type is "Literal['foo\\nbar']" main:7: note: Revealed type is "Literal['foo\nbar']" @@ -611,8 +611,7 @@ from typing_extensions import Literal a: (1, 2, 3) # E: Syntax error in type annotation \ # N: Suggestion: Use Tuple[T1, ..., Tn] instead of (T1, ..., Tn) b: Literal[[1, 2, 3]] # E: Parameter 1 of Literal[...] is invalid -c: [1, 2, 3] # E: Bracketed expression "[...]" is not valid as a type \ - # N: Did you mean "List[...]"? +c: [1, 2, 3] # E: Bracketed expression "[...]" is not valid as a type [builtins fixtures/tuple.pyi] [out] @@ -659,7 +658,6 @@ def foo(b: Literal[T]) -> Tuple[T]: pass # E: Parameter 1 of Literal[...] is i -- [case testLiteralMultipleValues] -# flags: --strict-optional from typing_extensions import Literal a: Literal[1, 2, 3] b: Literal["a", "b", "c"] @@ -689,7 +687,6 @@ reveal_type(b) # N: Revealed type is "Union[Literal[1], Literal[2], Literal[3]] [out] [case testLiteralNestedUsage] -# flags: --strict-optional from typing_extensions import Literal a: Literal[Literal[3], 4, Literal["foo"]] @@ -753,16 +750,16 @@ Foo = Literal[5] [case testLiteralBiasTowardsAssumingForwardReferencesForTypeComments] from typing_extensions import Literal -a = None # type: Foo +a: Foo reveal_type(a) # N: Revealed type is "__main__.Foo" -b = None # type: "Foo" +b: "Foo" reveal_type(b) # N: Revealed type is "__main__.Foo" -c = None # type: Literal["Foo"] +c: Literal["Foo"] reveal_type(c) # N: Revealed type is "Literal['Foo']" -d = None # type: Literal[Foo] # E: Parameter 1 of Literal[...] is invalid +d: Literal[Foo] # E: Parameter 1 of Literal[...] is invalid class Foo: pass [builtins fixtures/tuple.pyi] @@ -818,7 +815,6 @@ foo(c) # E: Argument 1 to "foo" has incompatible type "Literal[4, 'foo']"; expe [out] [case testLiteralCheckSubtypingStrictOptional] -# flags: --strict-optional from typing import Any, NoReturn from typing_extensions import Literal @@ -1760,7 +1756,7 @@ reveal_type(func1(identity(b))) # N: Revealed type is "builtins.int" -- [case testLiteralMeets] -from typing import TypeVar, List, Callable, Union +from typing import TypeVar, List, Callable, Union, Optional from typing_extensions import Literal a: Callable[[Literal[1]], int] @@ -1794,17 +1790,19 @@ def f2(x: Literal[1], y: Literal[2]) -> None: pass def f3(x: Literal[1], y: int) -> None: pass def f4(x: Literal[1], y: object) -> None: pass def f5(x: Literal[1], y: Union[Literal[1], Literal[2]]) -> None: pass +def f6(x: Optional[Literal[1]], y: Optional[Literal[2]]) -> None: pass reveal_type(unify(f1)) # N: Revealed type is "Literal[1]" -reveal_type(unify(f2)) # N: Revealed type is "None" +if object(): + reveal_type(unify(f2)) # N: Revealed type is "" reveal_type(unify(f3)) # N: Revealed type is "Literal[1]" reveal_type(unify(f4)) # N: Revealed type is "Literal[1]" reveal_type(unify(f5)) # N: Revealed type is "Literal[1]" +reveal_type(unify(f6)) # N: Revealed type is "None" [builtins fixtures/list.pyi] [out] [case testLiteralMeetsWithStrictOptional] -# flags: --strict-optional from typing import TypeVar, Callable, Union from typing_extensions import Literal @@ -1831,7 +1829,6 @@ reveal_type(unify(func)) # N: Revealed type is "" -- [case testLiteralIntelligentIndexingTuples] -# flags: --strict-optional from typing import Tuple, NamedTuple, Optional, Final from typing_extensions import Literal @@ -2013,7 +2010,7 @@ optional_keys: Literal["d", "e"] bad_keys: Literal["a", "bad"] reveal_type(test[good_keys]) # N: Revealed type is "Union[__main__.A, __main__.B]" -reveal_type(test.get(good_keys)) # N: Revealed type is "Union[__main__.A, __main__.B]" +reveal_type(test.get(good_keys)) # N: Revealed type is "Union[__main__.A, __main__.B, None]" reveal_type(test.get(good_keys, 3)) # N: Revealed type is "Union[__main__.A, Literal[3]?, __main__.B]" reveal_type(test.pop(optional_keys)) # N: Revealed type is "Union[__main__.D, __main__.E]" reveal_type(test.pop(optional_keys, 3)) # N: Revealed type is "Union[__main__.D, __main__.E, Literal[3]?]" @@ -2066,7 +2063,7 @@ x[bad_keys] # E: TypedDict "D1" has no key "d" \ # E: TypedDict "D2" has no key "a" reveal_type(x[good_keys]) # N: Revealed type is "Union[__main__.B, __main__.C]" -reveal_type(x.get(good_keys)) # N: Revealed type is "Union[__main__.B, __main__.C]" +reveal_type(x.get(good_keys)) # N: Revealed type is "Union[__main__.B, __main__.C, None]" reveal_type(x.get(good_keys, 3)) # N: Revealed type is "Union[__main__.B, Literal[3]?, __main__.C]" reveal_type(x.get(bad_keys)) # N: Revealed type is "builtins.object" reveal_type(x.get(bad_keys, 3)) # N: Revealed type is "builtins.object" @@ -2244,7 +2241,6 @@ force4(reveal_type(f.instancevar4)) # N: Revealed type is "None" [out] [case testLiteralFinalErasureInMutableDatastructures1] -# flags: --strict-optional from typing_extensions import Final var1: Final = [0, None] diff --git a/test-data/unit/check-modules.test b/test-data/unit/check-modules.test index 8a30237843a5..3da5996ed274 100644 --- a/test-data/unit/check-modules.test +++ b/test-data/unit/check-modules.test @@ -180,7 +180,8 @@ x = object() [case testChainedAssignmentAndImports] import m -i, s = None, None # type: (int, str) +i: int +s: str if int(): i = m.x if int(): @@ -566,7 +567,6 @@ x = 1 x = 1 [case testAssignToFuncDefViaImport] -# flags: --strict-optional # Errors differ with the new analyzer. (Old analyzer gave error on the # input, which is maybe better, but no error about f, which seems @@ -585,6 +585,7 @@ x = 1+0 [case testConditionalImportAndAssign] +# flags: --no-strict-optional try: from m import x except: @@ -676,6 +677,7 @@ class B(A): ... [case testImportVariableAndAssignNone] +# flags: --no-strict-optional try: from m import x except: @@ -684,6 +686,7 @@ except: x = 1 [case testImportFunctionAndAssignNone] +# flags: --no-strict-optional try: from m import f except: @@ -709,6 +712,7 @@ except: def f(): pass [case testAssignToFuncDefViaGlobalDecl2] +# flags: --no-strict-optional import typing from m import f def g() -> None: @@ -721,6 +725,7 @@ def f(): pass [out] [case testAssignToFuncDefViaNestedModules] +# flags: --no-strict-optional import m.n m.n.f = None m.n.f = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "Callable[[], Any]") @@ -730,6 +735,7 @@ def f(): pass [out] [case testAssignToFuncDefViaModule] +# flags: --no-strict-optional import m m.f = None m.f = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "Callable[[], Any]") @@ -738,6 +744,7 @@ def f(): pass [out] [case testConditionalImportAndAssignNoneToModule] +# flags: --no-strict-optional if object(): import m else: @@ -758,6 +765,7 @@ else: [out] [case testImportAndAssignToModule] +# flags: --no-strict-optional import m m = None m.f(1) # E: Argument 1 to "f" has incompatible type "int"; expected "str" @@ -930,8 +938,8 @@ tmp/a/b/__init__.py:3: error: Name "a" is not defined [case testSubmoduleMixingLocalAndQualifiedNames] from a.b import MyClass -val1 = None # type: a.b.MyClass # E: Name "a" is not defined -val2 = None # type: MyClass +val1: a.b.MyClass # E: Name "a" is not defined +val2: MyClass [file a/__init__.py] [file a/b.py] @@ -1501,7 +1509,7 @@ import bar from foo import * def bar(y: AnyAlias) -> None: pass -l = None # type: ListAlias[int] +l: ListAlias[int] reveal_type(l) [file foo.py] @@ -1532,7 +1540,7 @@ Row = Dict[str, int] [case testImportStarAliasGeneric] from y import * -notes = None # type: G[X] +notes: G[X] another = G[X]() second = XT[str]() last = XT[G]() @@ -1561,7 +1569,7 @@ from typing import Any def bar(x: Any, y: AnyCallable) -> Any: return 'foo' -cb = None # type: AnyCallable +cb: AnyCallable reveal_type(cb) # N: Revealed type is "def (*Any, **Any) -> Any" [file foo.py] @@ -2058,16 +2066,6 @@ def __getattr__(name): ... [builtins fixtures/module.pyi] -[case testModuleLevelGetattrNotStub36] -# flags: --python-version 3.6 -import has_getattr -reveal_type(has_getattr.any_attribute) # E: Module has no attribute "any_attribute" \ - # N: Revealed type is "Any" -[file has_getattr.py] -def __getattr__(name) -> str: ... - -[builtins fixtures/module.pyi] - [case testModuleLevelGetattrNotStub37] # flags: --python-version 3.7 @@ -2102,17 +2100,6 @@ def __getattr__(name: str) -> int: ... [builtins fixtures/module.pyi] -[case testModuleLevelGetattrImportFromNotStub36] -# flags: --python-version 3.6 -from non_stub import name # E: Module "non_stub" has no attribute "name" -reveal_type(name) # N: Revealed type is "Any" - -[file non_stub.py] -from typing import Any -def __getattr__(name: str) -> Any: ... - -[builtins fixtures/module.pyi] - [case testModuleLevelGetattrImportFromNotStub37] # flags: --python-version 3.7 from non_stub import name @@ -3134,26 +3121,28 @@ import google.cloud from google.cloud import x [case testErrorFromGoogleCloud] -import google.cloud +import google.cloud # E: Cannot find implementation or library stub for module named "google.cloud" \ + # E: Cannot find implementation or library stub for module named "google" from google.cloud import x -import google.non_existent +import google.non_existent # E: Cannot find implementation or library stub for module named "google.non_existent" from google.non_existent import x -[out] -main:1: error: Library stubs not installed for "google.cloud" -main:1: note: Hint: "python3 -m pip install types-google-cloud-ndb" -main:1: note: (or run "mypy --install-types" to install all missing stub packages) -main:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports -main:1: error: Cannot find implementation or library stub for module named "google" -main:3: error: Cannot find implementation or library stub for module named "google.non_existent" + +import google.cloud.ndb # E: Library stubs not installed for "google.cloud.ndb" \ + # N: Hint: "python3 -m pip install types-google-cloud-ndb" \ + # N: (or run "mypy --install-types" to install all missing stub packages) \ + # N: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports +from google.cloud import ndb [case testMissingSubmoduleOfInstalledStubPackage] import bleach.xyz from bleach.abc import fgh [file bleach/__init__.pyi] [out] -main:1: error: Cannot find implementation or library stub for module named "bleach.xyz" +main:1: error: Library stubs not installed for "bleach.xyz" +main:1: note: Hint: "python3 -m pip install types-bleach" +main:1: note: (or run "mypy --install-types" to install all missing stub packages) main:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports -main:2: error: Cannot find implementation or library stub for module named "bleach.abc" +main:2: error: Library stubs not installed for "bleach.abc" [case testMissingSubmoduleOfInstalledStubPackageIgnored] # flags: --ignore-missing-imports diff --git a/test-data/unit/check-namedtuple.test b/test-data/unit/check-namedtuple.test index 6b9f139f541c..6e3628060617 100644 --- a/test-data/unit/check-namedtuple.test +++ b/test-data/unit/check-namedtuple.test @@ -2,7 +2,7 @@ from collections import namedtuple X = namedtuple('X', 'x y') -x = None # type: X +x: X a, b = x b = x[0] a = x[1] @@ -14,7 +14,7 @@ x[2] # E: Tuple index out of range from collections import namedtuple X = namedtuple('X', ('x', 'y')) -x = None # type: X +x: X a, b = x b = x[0] a = x[1] @@ -32,24 +32,13 @@ X = namedtuple('X', 'x, _y, _z') # E: "namedtuple()" field names cannot start w from collections import namedtuple X = namedtuple('X', 'x y') -x = None # type: X +x: X x.x x.y x.z # E: "X" has no attribute "z" [builtins fixtures/tuple.pyi] -[case testNamedTupleClassPython35] -# flags: --python-version 3.5 -from typing import NamedTuple - -class A(NamedTuple): - x = 3 # type: int -[builtins fixtures/tuple.pyi] -[out] -main:4: error: NamedTuple class syntax is only supported in Python 3.6 - -[case testNamedTupleClassInStubPython35] -# flags: --python-version 3.5 +[case testNamedTupleClassInStub] import foo [file foo.pyi] @@ -63,13 +52,13 @@ class A(NamedTuple): from collections import namedtuple X = namedtuple('X', 'x y') -x = None # type: X +x: X x.x = 5 # E: Property "x" defined in "X" is read-only x.y = 5 # E: Property "y" defined in "X" is read-only x.z = 5 # E: "X" has no attribute "z" class A(X): pass -a = None # type: A +a: A a.x = 5 # E: Property "x" defined in "X" is read-only a.y = 5 # E: Property "y" defined in "X" is read-only -- a.z = 5 # not supported yet @@ -292,7 +281,7 @@ A = NamedTuple('A', [('a', int), ('b', str)]) class B(A): pass a = A(1, '') b = B(1, '') -t = None # type: Tuple[int, str] +t: Tuple[int, str] if int(): b = a # E: Incompatible types in assignment (expression has type "A", variable has type "B") if int(): @@ -357,7 +346,7 @@ C(2).b from collections import namedtuple X = namedtuple('X', ['x', 'y']) -x = None # type: X +x: X reveal_type(x._asdict()) # N: Revealed type is "builtins.dict[builtins.str, Any]" [builtins fixtures/dict.pyi] @@ -366,7 +355,7 @@ reveal_type(x._asdict()) # N: Revealed type is "builtins.dict[builtins.str, Any from collections import namedtuple X = namedtuple('X', ['x', 'y']) -x = None # type: X +x: X reveal_type(x._replace()) # N: Revealed type is "Tuple[Any, Any, fallback=__main__.X]" x._replace(y=5) x._replace(x=3) @@ -391,7 +380,7 @@ X._replace(x=1, y=2) # E: Missing positional argument "_self" in call to "_repl from typing import NamedTuple X = NamedTuple('X', [('x', int), ('y', str)]) -x = None # type: X +x: X reveal_type(x._replace()) # N: Revealed type is "Tuple[builtins.int, builtins.str, fallback=__main__.X]" x._replace(x=5) x._replace(y=5) # E: Argument "y" to "_replace" of "X" has incompatible type "int"; expected "str" @@ -405,7 +394,7 @@ reveal_type(X._make([5, 'a'])) # N: Revealed type is "Tuple[builtins.int, built X._make('a b') # E: Argument 1 to "_make" of "X" has incompatible type "str"; expected "Iterable[Any]" -- # FIX: not a proper class method --- x = None # type: X +-- x: X -- reveal_type(x._make([5, 'a'])) # N: Revealed type is "Tuple[builtins.int, builtins.str, fallback=__main__.X]" -- x._make('a b') # E: Argument 1 to "_make" of "X" has incompatible type "str"; expected Iterable[Any] @@ -423,7 +412,7 @@ from typing import NamedTuple X = NamedTuple('X', [('x', int), ('y', str)]) reveal_type(X._source) # N: Revealed type is "builtins.str" -x = None # type: X +x: X reveal_type(x._source) # N: Revealed type is "builtins.str" [builtins fixtures/tuple.pyi] @@ -459,7 +448,7 @@ from typing import NamedTuple X = NamedTuple('X', [('x', int), ('y', str)]) reveal_type(X._field_types) # N: Revealed type is "builtins.dict[builtins.str, Any]" -x = None # type: X +x: X reveal_type(x._field_types) # N: Revealed type is "builtins.dict[builtins.str, Any]" [builtins fixtures/dict.pyi] @@ -472,7 +461,7 @@ def f(x: A) -> None: pass class B(NamedTuple('B', []), A): pass f(B()) -x = None # type: A +x: A if int(): x = B() @@ -482,7 +471,7 @@ def g(x: C) -> None: pass class D(NamedTuple('D', []), A): pass g(D()) # E: Argument 1 to "g" has incompatible type "D"; expected "C" -y = None # type: C +y: C if int(): y = D() # E: Incompatible types in assignment (expression has type "D", variable has type "C") [builtins fixtures/tuple.pyi] @@ -499,9 +488,9 @@ class A(NamedTuple('A', [('x', str)])): class B(A): pass -a = None # type: A +a: A a = A('').member() -b = None # type: B +b: B b = B('').member() a = B('') a = B('').member() @@ -511,14 +500,14 @@ a = B('').member() from typing import NamedTuple, TypeVar A = NamedTuple('A', [('x', str)]) reveal_type(A('hello')._replace(x='')) # N: Revealed type is "Tuple[builtins.str, fallback=__main__.A]" -a = None # type: A +a: A a = A('hello')._replace(x='') class B(A): pass reveal_type(B('hello')._replace(x='')) # N: Revealed type is "Tuple[builtins.str, fallback=__main__.B]" -b = None # type: B +b: B b = B('hello')._replace(x='') [builtins fixtures/tuple.pyi] @@ -938,7 +927,18 @@ class A: def __init__(self) -> None: self.b = NamedTuple('x', [('s', str), ('n', int)]) # E: NamedTuple type as an attribute is not supported -reveal_type(A().b) # N: Revealed type is "Any" +reveal_type(A().b) # N: Revealed type is "typing.NamedTuple" +[builtins fixtures/tuple.pyi] +[typing fixtures/typing-namedtuple.pyi] + + +[case testEmptyNamedTupleTypeRepr] +from typing import NamedTuple + +N = NamedTuple('N', []) +n: N +reveal_type(N) # N: Revealed type is "def () -> Tuple[(), fallback=__main__.N]" +reveal_type(n) # N: Revealed type is "Tuple[(), fallback=__main__.N]" [builtins fixtures/tuple.pyi] [case testNamedTupleWrongfile] @@ -983,7 +983,7 @@ class Both2(Other, Bar): ... class Both3(Biz, Other): ... def print_namedtuple(obj: NamedTuple) -> None: - reveal_type(obj.name) # N: Revealed type is "builtins.str" + reveal_type(obj._fields) # N: Revealed type is "builtins.tuple[builtins.str, ...]" b1: Bar b2: Baz @@ -1046,7 +1046,7 @@ def good6() -> NamedTuple: def bad1() -> NamedTuple: return 1 # E: Incompatible return value type (got "int", expected "NamedTuple") def bad2() -> NamedTuple: - return () # E: Incompatible return value type (got "Tuple[]", expected "NamedTuple") + return () # E: Incompatible return value type (got "Tuple[()]", expected "NamedTuple") def bad3() -> NamedTuple: return (1, 2) # E: Incompatible return value type (got "Tuple[int, int]", expected "NamedTuple") @@ -1337,3 +1337,12 @@ class SNT(NT[int]): ... reveal_type(SNT("test", 42).meth()) # N: Revealed type is "Tuple[builtins.str, builtins.int, fallback=__main__.SNT]" [builtins fixtures/tuple.pyi] [typing fixtures/typing-namedtuple.pyi] + +[case testNoCrashUnsupportedNamedTuple] +from typing import NamedTuple +class Test: + def __init__(self, field) -> None: + self.Item = NamedTuple("x", [(field, str)]) # E: NamedTuple type as an attribute is not supported + self.item: self.Item # E: Name "self.Item" is not defined +[builtins fixtures/tuple.pyi] +[typing fixtures/typing-namedtuple.pyi] diff --git a/test-data/unit/check-narrowing.test b/test-data/unit/check-narrowing.test index c329ccf840a8..291f73a45230 100644 --- a/test-data/unit/check-narrowing.test +++ b/test-data/unit/check-narrowing.test @@ -747,7 +747,6 @@ def test3(switch: FlipFlopEnum) -> None: [builtins fixtures/primitives.pyi] [case testNarrowingEqualityRequiresExplicitStrLiteral] -# flags: --strict-optional from typing_extensions import Literal, Final A_final: Final = "A" @@ -794,7 +793,6 @@ reveal_type(x_union) # N: Revealed type is "Union[Literal['A'], Literal['B' [builtins fixtures/primitives.pyi] [case testNarrowingEqualityRequiresExplicitEnumLiteral] -# flags: --strict-optional from typing import Union from typing_extensions import Literal, Final from enum import Enum @@ -879,7 +877,7 @@ else: [builtins fixtures/primitives.pyi] [case testNarrowingEqualityDisabledForCustomEqualityChain] -# flags: --strict-optional --strict-equality --warn-unreachable +# flags: --strict-equality --warn-unreachable from typing import Union from typing_extensions import Literal @@ -916,7 +914,7 @@ else: [builtins fixtures/primitives.pyi] [case testNarrowingUnreachableCases] -# flags: --strict-optional --strict-equality --warn-unreachable +# flags: --strict-equality --warn-unreachable from typing import Union from typing_extensions import Literal @@ -964,7 +962,7 @@ else: [builtins fixtures/primitives.pyi] [case testNarrowingUnreachableCases2] -# flags: --strict-optional --strict-equality --warn-unreachable +# flags: --strict-equality --warn-unreachable from typing import Union from typing_extensions import Literal @@ -1064,7 +1062,6 @@ else: [builtins fixtures/primitives.pyi] [case testNarrowingBooleanIdentityCheck] -# flags: --strict-optional from typing import Optional from typing_extensions import Literal @@ -1087,7 +1084,6 @@ else: [builtins fixtures/primitives.pyi] [case testNarrowingBooleanTruthiness] -# flags: --strict-optional from typing import Optional from typing_extensions import Literal @@ -1109,7 +1105,6 @@ reveal_type(opt_bool_val) # N: Revealed type is "Union[builtins.bool, None]" [builtins fixtures/primitives.pyi] [case testNarrowingBooleanBoolOp] -# flags: --strict-optional from typing import Optional from typing_extensions import Literal @@ -1138,7 +1133,6 @@ reveal_type(x) # N: Revealed type is "builtins.bool" [builtins fixtures/primitives.pyi] [case testNarrowingTypedDictUsingEnumLiteral] -# flags: --python-version 3.6 from typing import Union from typing_extensions import TypedDict, Literal from enum import Enum @@ -1162,7 +1156,6 @@ def f(d: Union[Foo, Bar]) -> None: [builtins fixtures/dict.pyi] [case testNarrowingUsingMetaclass] -# flags: --strict-optional from typing import Type class M(type): @@ -1182,7 +1175,6 @@ def f(t: Type[C]) -> None: reveal_type(t) # N: Revealed type is "Type[__main__.C]" [case testNarrowingUsingTypeVar] -# flags: --strict-optional from typing import Type, TypeVar class A: pass @@ -1269,3 +1261,76 @@ def g() -> None: def foo(): ... foo() [builtins fixtures/dict.pyi] + + +[case testNarrowingOptionalEqualsNone] +from typing import Optional + +class A: ... + +val: Optional[A] + +if val == None: + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" +else: + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" +if val != None: + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" +else: + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" + +if val in (None,): + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" +else: + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" +if val not in (None,): + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" +else: + reveal_type(val) # N: Revealed type is "Union[__main__.A, None]" +[builtins fixtures/primitives.pyi] + +[case testNarrowingWithTupleOfTypes] +from typing import Tuple, Type + +class Base: ... + +class Impl1(Base): ... +class Impl2(Base): ... + +impls: Tuple[Type[Base], ...] = (Impl1, Impl2) +some: object + +if isinstance(some, impls): + reveal_type(some) # N: Revealed type is "__main__.Base" +else: + reveal_type(some) # N: Revealed type is "builtins.object" + +raw: Tuple[type, ...] +if isinstance(some, raw): + reveal_type(some) # N: Revealed type is "builtins.object" +else: + reveal_type(some) # N: Revealed type is "builtins.object" +[builtins fixtures/dict.pyi] + + +[case testNarrowingWithTupleOfTypesPy310Plus] +# flags: --python-version 3.10 +class Base: ... + +class Impl1(Base): ... +class Impl2(Base): ... + +some: int | Base + +impls: tuple[type[Base], ...] = (Impl1, Impl2) +if isinstance(some, impls): + reveal_type(some) # N: Revealed type is "__main__.Base" +else: + reveal_type(some) # N: Revealed type is "Union[builtins.int, __main__.Base]" + +raw: tuple[type, ...] +if isinstance(some, raw): + reveal_type(some) # N: Revealed type is "Union[builtins.int, __main__.Base]" +else: + reveal_type(some) # N: Revealed type is "Union[builtins.int, __main__.Base]" +[builtins fixtures/dict.pyi] diff --git a/test-data/unit/check-native-int.test b/test-data/unit/check-native-int.test index 1e945d0af27d..30314eebcb31 100644 --- a/test-data/unit/check-native-int.test +++ b/test-data/unit/check-native-int.test @@ -69,7 +69,6 @@ reveal_type(join(a, n64)) # N: Revealed type is "Any" [builtins fixtures/dict.pyi] [case testNativeIntMeets] -# flags: --strict-optional from typing import TypeVar, Callable, Any from mypy_extensions import i32, i64 @@ -87,8 +86,10 @@ reveal_type(meet(f32, f)) # N: Revealed type is "mypy_extensions.i32" reveal_type(meet(f, f32)) # N: Revealed type is "mypy_extensions.i32" reveal_type(meet(f64, f)) # N: Revealed type is "mypy_extensions.i64" reveal_type(meet(f, f64)) # N: Revealed type is "mypy_extensions.i64" -reveal_type(meet(f32, f64)) # N: Revealed type is "" -reveal_type(meet(f64, f32)) # N: Revealed type is "" +if object(): + reveal_type(meet(f32, f64)) # N: Revealed type is "" +if object(): + reveal_type(meet(f64, f32)) # N: Revealed type is "" reveal_type(meet(f, fa)) # N: Revealed type is "builtins.int" reveal_type(meet(f32, fa)) # N: Revealed type is "mypy_extensions.i32" @@ -128,7 +129,6 @@ reveal_type(y) # N: Revealed type is "builtins.int" [builtins fixtures/dict.pyi] [case testNativeIntFloatConversion] -# flags: --strict-optional from typing import TypeVar, Callable from mypy_extensions import i32 @@ -148,8 +148,10 @@ def meet(c1: Callable[[T], None], c2: Callable[[T], None]) -> T: def ff(x: float) -> None: pass def fi32(x: i32) -> None: pass -reveal_type(meet(ff, fi32)) # N: Revealed type is "" -reveal_type(meet(fi32, ff)) # N: Revealed type is "" +if object(): + reveal_type(meet(ff, fi32)) # N: Revealed type is "" +if object(): + reveal_type(meet(fi32, ff)) # N: Revealed type is "" [builtins fixtures/dict.pyi] [case testNativeIntForLoopRange] diff --git a/test-data/unit/check-newsemanal.test b/test-data/unit/check-newsemanal.test index 99f4141a4d64..ff8d346e74a1 100644 --- a/test-data/unit/check-newsemanal.test +++ b/test-data/unit/check-newsemanal.test @@ -992,7 +992,7 @@ class SubO(Out): pass o: SubO -reveal_type(SubO._make) # N: Revealed type is "def (iterable: typing.Iterable[Any], *, new: Any =, len: Any =) -> Tuple[Tuple[builtins.str, __main__.Other, fallback=__main__.In], __main__.Other, fallback=__main__.SubO]" +reveal_type(SubO._make) # N: Revealed type is "def (iterable: typing.Iterable[Any]) -> Tuple[Tuple[builtins.str, __main__.Other, fallback=__main__.In], __main__.Other, fallback=__main__.SubO]" reveal_type(o._replace(y=Other())) # N: Revealed type is "Tuple[Tuple[builtins.str, __main__.Other, fallback=__main__.In], __main__.Other, fallback=__main__.SubO]" [builtins fixtures/tuple.pyi] @@ -1009,7 +1009,7 @@ o: Out reveal_type(o) # N: Revealed type is "Tuple[Tuple[builtins.str, __main__.Other, fallback=__main__.In], __main__.Other, fallback=__main__.Out]" reveal_type(o.x) # N: Revealed type is "Tuple[builtins.str, __main__.Other, fallback=__main__.In]" reveal_type(o.x.t) # N: Revealed type is "__main__.Other" -reveal_type(Out._make) # N: Revealed type is "def (iterable: typing.Iterable[Any], *, new: Any =, len: Any =) -> Tuple[Tuple[builtins.str, __main__.Other, fallback=__main__.In], __main__.Other, fallback=__main__.Out]" +reveal_type(Out._make) # N: Revealed type is "def (iterable: typing.Iterable[Any]) -> Tuple[Tuple[builtins.str, __main__.Other, fallback=__main__.In], __main__.Other, fallback=__main__.Out]" [builtins fixtures/tuple.pyi] [case testNewAnalyzerIncompleteRefShadowsBuiltin1] @@ -2149,6 +2149,7 @@ x: C class C: def frob(self, foos: Dict[Any, Foos]) -> None: foo = foos.get(1) + assert foo dict(foo) [builtins fixtures/dict.pyi] @@ -2570,18 +2571,6 @@ import n [file n.pyi] class C: pass -[case testNewAnalyzerModuleGetAttrInPython36] -# flags: --python-version 3.6 -import m -import n - -x: m.n.C # E: Name "m.n.C" is not defined -y: n.D # E: Name "n.D" is not defined -[file m.py] -import n -[file n.py] -def __getattr__(x): pass - [case testNewAnalyzerModuleGetAttrInPython37] # flags: --python-version 3.7 import m @@ -3218,8 +3207,7 @@ class User: self.first_name = value def __init__(self, name: str) -> None: - self.name = name # E: Cannot assign to a method \ - # E: Incompatible types in assignment (expression has type "str", variable has type "Callable[..., Any]") + self.name = name # E: Cannot assign to a method [case testNewAnalyzerMemberNameMatchesTypedDict] from typing import Union, Any diff --git a/test-data/unit/check-newsyntax.test b/test-data/unit/check-newsyntax.test index 63284c34bd8b..3ed4c6d3d8e2 100644 --- a/test-data/unit/check-newsyntax.test +++ b/test-data/unit/check-newsyntax.test @@ -1,15 +1,8 @@ -[case testNewSyntaxRequire36] -# flags: --python-version 3.5 -x: int = 5 # E: Variable annotation syntax is only supported in Python 3.6 and greater -[out] - [case testNewSyntaxSyntaxError] -# flags: --python-version 3.6 x: int: int # E: invalid syntax [out] [case testNewSyntaxBasics] -# flags: --python-version 3.6 x: int x = 5 y: int = 5 @@ -19,11 +12,10 @@ a = 5 # E: Incompatible types in assignment (expression has type "int", variabl b: str = 5 # E: Incompatible types in assignment (expression has type "int", variable has type "str") zzz: int -zzz: str # E: Name "zzz" already defined on line 10 +zzz: str # E: Name "zzz" already defined on line 9 [out] [case testNewSyntaxWithDict] -# flags: --python-version 3.6 from typing import Dict, Any d: Dict[int, str] = {} @@ -34,7 +26,6 @@ d['ab'] = 'ab' # E: Invalid index type "str" for "Dict[int, str]"; expected typ [out] [case testNewSyntaxWithRevealType] -# flags: --python-version 3.6 from typing import Dict def tst_local(dct: Dict[int, T]) -> Dict[T, int]: @@ -46,7 +37,6 @@ reveal_type(tst_local({1: 'a'})) # N: Revealed type is "builtins.dict[builtins. [out] [case testNewSyntaxWithInstanceVars] -# flags: --python-version 3.6 class TstInstance: a: str def __init__(self) -> None: @@ -59,20 +49,17 @@ TstInstance().a = 'ab' [out] [case testNewSyntaxWithClassVars] -# flags: --strict-optional --python-version 3.6 class CCC: a: str = None # E: Incompatible types in assignment (expression has type "None", variable has type "str") [out] [case testNewSyntaxWithStrictOptional] -# flags: --strict-optional --python-version 3.6 strict: int strict = None # E: Incompatible types in assignment (expression has type "None", variable has type "int") strict2: int = None # E: Incompatible types in assignment (expression has type "None", variable has type "int") [out] [case testNewSyntaxWithStrictOptionalFunctions] -# flags: --strict-optional --python-version 3.6 def f() -> None: x: int if int(): @@ -80,7 +67,6 @@ def f() -> None: [out] [case testNewSyntaxWithStrictOptionalClasses] -# flags: --strict-optional --python-version 3.6 class C: def meth(self) -> None: x: int = None # E: Incompatible types in assignment (expression has type "None", variable has type "int") @@ -88,30 +74,18 @@ class C: [out] [case testNewSyntaxSpecialAssign] -# flags: --python-version 3.6 class X: x: str x[0]: int x.x: int [out] -main:4: error: Unexpected type declaration -main:4: error: Unsupported target for indexed assignment ("str") -main:5: error: Type cannot be declared in assignment to non-self attribute -main:5: error: "str" has no attribute "x" - -[case testNewSyntaxAsyncComprehensionError] -# flags: --python-version 3.5 -async def f(): - results = [i async for i in aiter() if i % 2] # E: Async comprehensions are only supported in Python 3.6 and greater - - -[case testNewSyntaxFstringError] -# flags: --python-version 3.5 -f'' # E: Format strings are only supported in Python 3.6 and greater +main:3: error: Unexpected type declaration +main:3: error: Unsupported target for indexed assignment ("str") +main:4: error: Type cannot be declared in assignment to non-self attribute +main:4: error: "str" has no attribute "x" [case testNewSyntaxFStringBasics] -# flags: --python-version 3.6 f'foobar' f'{"foobar"}' f'foo{"bar"}' @@ -123,22 +97,19 @@ a = f'{"foobar"}' [builtins fixtures/f_string.pyi] [case testNewSyntaxFStringExpressionsOk] -# flags: --python-version 3.6 f'.{1 + 1}.' f'.{1 + 1}.{"foo" + "bar"}' [builtins fixtures/f_string.pyi] [case testNewSyntaxFStringExpressionsErrors] -# flags: --python-version 3.6 f'{1 + ""}' f'.{1 + ""}' [builtins fixtures/f_string.pyi] [out] +main:1: error: Unsupported operand types for + ("int" and "str") main:2: error: Unsupported operand types for + ("int" and "str") -main:3: error: Unsupported operand types for + ("int" and "str") [case testNewSyntaxFStringParseFormatOptions] -# flags: --python-version 3.6 value = 10.5142 width = 10 precision = 4 @@ -146,7 +117,6 @@ f'result: {value:{width}.{precision}}' [builtins fixtures/f_string.pyi] [case testNewSyntaxFStringSingleField] -# flags: --python-version 3.6 v = 1 reveal_type(f'{v}') # N: Revealed type is "builtins.str" reveal_type(f'{1}') # N: Revealed type is "builtins.str" diff --git a/test-data/unit/check-overloading.test b/test-data/unit/check-overloading.test index 4209f4ec9164..ede4a2e4cf62 100644 --- a/test-data/unit/check-overloading.test +++ b/test-data/unit/check-overloading.test @@ -450,7 +450,8 @@ class C: pass from foo import * [file foo.pyi] from typing import overload -a, b = None, None # type: (A, B) +a: A +b: B if int(): b = f(a) # E: Incompatible types in assignment (expression has type "A", variable has type "B") if int(): @@ -492,7 +493,8 @@ class C: pass from foo import * [file foo.pyi] from typing import overload -a, b = None, None # type: (A, B) +a: A +b: B if int(): b = a.f(a) # E: Incompatible types in assignment (expression has type "A", variable has type "B") if int(): @@ -514,7 +516,8 @@ class B: pass from foo import * [file foo.pyi] from typing import overload -a, b = None, None # type: (A, B) +a: A +b: B if int(): a = f(a) if int(): @@ -549,7 +552,10 @@ from foo import * [file foo.pyi] from typing import overload, TypeVar, Generic t = TypeVar('t') -ab, ac, b, c = None, None, None, None # type: (A[B], A[C], B, C) +ab: A[B] +ac: A[C] +b: B +c: C if int(): b = f(ab) c = f(ac) @@ -569,7 +575,8 @@ class C: pass from foo import * [file foo.pyi] from typing import overload -a, b = None, None # type: (A, B) +a: A +b: B a = A(a) a = A(b) a = A(object()) # E: No overload variant of "A" matches argument type "object" \ @@ -589,8 +596,8 @@ class B: pass from foo import * [file foo.pyi] from typing import overload, Callable -o = None # type: object -a = None # type: A +o: object +a: A if int(): a = f # E: Incompatible types in assignment (expression has type overloaded function, variable has type "A") @@ -607,7 +614,8 @@ class A: pass from foo import * [file foo.pyi] from typing import overload -t, a = None, None # type: (type, A) +t: type +a: A if int(): a = A # E: Incompatible types in assignment (expression has type "Type[A]", variable has type "A") @@ -625,7 +633,8 @@ class B: pass from foo import * [file foo.pyi] from typing import overload -a, b = None, None # type: int, str +a: int +b: str if int(): a = A()[a] if int(): @@ -647,7 +656,9 @@ from foo import * [file foo.pyi] from typing import TypeVar, Generic, overload t = TypeVar('t') -a, b, c = None, None, None # type: (A, B, C[A]) +a: A +b: B +c: C[A] if int(): a = c[a] b = c[a] # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -761,7 +772,8 @@ from typing import overload def f(t: type) -> 'A': pass @overload def f(t: 'A') -> 'B': pass -a, b = None, None # type: (A, B) +a: A +b: B if int(): a = f(A) if int(): @@ -1133,7 +1145,7 @@ def f(x: str) -> None: pass f(1.1) f('') f(1) -f(()) # E: No overload variant of "f" matches argument type "Tuple[]" \ +f(()) # E: No overload variant of "f" matches argument type "Tuple[()]" \ # N: Possible overload variants: \ # N: def f(x: float) -> None \ # N: def f(x: str) -> None @@ -1215,6 +1227,7 @@ f(*(1, '', 1))() # E: No overload variant of "f" matches argument type "Tuple[in [builtins fixtures/tuple.pyi] [case testPreferExactSignatureMatchInOverload] +# flags: --no-strict-optional from foo import * [file foo.pyi] from typing import overload, List @@ -2172,36 +2185,63 @@ def bar2(*x: int) -> int: ... [builtins fixtures/tuple.pyi] [case testOverloadDetectsPossibleMatchesWithGenerics] -from typing import overload, TypeVar, Generic +# flags: --strict-optional +from typing import overload, TypeVar, Generic, Optional, List T = TypeVar('T') +# The examples below are unsafe, but it is a quite common pattern +# so we ignore the possibility of type variables taking value `None` +# for the purpose of overload overlap checks. @overload -def foo(x: None, y: None) -> str: ... # E: Overloaded function signatures 1 and 2 overlap with incompatible return types +def foo(x: None, y: None) -> str: ... @overload def foo(x: T, y: T) -> int: ... def foo(x): ... +oi: Optional[int] +reveal_type(foo(None, None)) # N: Revealed type is "builtins.str" +reveal_type(foo(None, 42)) # N: Revealed type is "builtins.int" +reveal_type(foo(42, 42)) # N: Revealed type is "builtins.int" +reveal_type(foo(oi, None)) # N: Revealed type is "Union[builtins.int, builtins.str]" +reveal_type(foo(oi, 42)) # N: Revealed type is "builtins.int" +reveal_type(foo(oi, oi)) # N: Revealed type is "Union[builtins.int, builtins.str]" + +@overload +def foo_list(x: None) -> None: ... +@overload +def foo_list(x: T) -> List[T]: ... +def foo_list(x): ... + +reveal_type(foo_list(oi)) # N: Revealed type is "Union[builtins.list[builtins.int], None]" + # What if 'T' is 'object'? @overload -def bar(x: None, y: int) -> str: ... # E: Overloaded function signatures 1 and 2 overlap with incompatible return types +def bar(x: None, y: int) -> str: ... @overload def bar(x: T, y: T) -> int: ... def bar(x, y): ... class Wrapper(Generic[T]): @overload - def foo(self, x: None, y: None) -> str: ... # E: Overloaded function signatures 1 and 2 overlap with incompatible return types + def foo(self, x: None, y: None) -> str: ... @overload def foo(self, x: T, y: None) -> int: ... def foo(self, x): ... @overload - def bar(self, x: None, y: int) -> str: ... # E: Overloaded function signatures 1 and 2 overlap with incompatible return types + def bar(self, x: None, y: int) -> str: ... @overload def bar(self, x: T, y: T) -> int: ... def bar(self, x, y): ... +@overload +def baz(x: str, y: str) -> str: ... # E: Overloaded function signatures 1 and 2 overlap with incompatible return types +@overload +def baz(x: T, y: T) -> int: ... +def baz(x): ... +[builtins fixtures/tuple.pyi] + [case testOverloadFlagsPossibleMatches] from wrapper import * [file wrapper.pyi] @@ -3251,7 +3291,6 @@ f(x, B()) # E: Argument 1 to "f" has incompatible type "Union[A, B]"; expected [builtins fixtures/tuple.pyi] [case testOverloadInferUnionWithMixOfPositionalAndOptionalArgs] -# flags: --strict-optional from typing import overload, Union, Optional class A: ... @@ -3590,7 +3629,6 @@ reveal_type(g(b)) # N: Revealed type is "builtins.str" reveal_type(g(c)) # N: Revealed type is "builtins.str" [case testOverloadsAndNoneWithStrictOptional] -# flags: --strict-optional from typing import overload, Optional @overload @@ -3638,7 +3676,6 @@ reveal_type(mymap(f3, seq)) # N: Revealed type is "typing.Iterable[builtins.str [typing fixtures/typing-medium.pyi] [case testOverloadsNoneAndTypeVarsWithStrictOptional] -# flags: --strict-optional from typing import Callable, Iterable, TypeVar, overload, Optional T = TypeVar('T') @@ -3695,7 +3732,6 @@ def test_narrow_int() -> None: [typing fixtures/typing-medium.pyi] [case testOverloadsAndNoReturnNarrowTypeWithStrictOptional1] -# flags: --strict-optional from typing import overload, Union, NoReturn @overload @@ -3759,7 +3795,6 @@ def test_narrow_none() -> None: [typing fixtures/typing-medium.pyi] [case testOverloadsAndNoReturnNarrowTypeWithStrictOptional2] -# flags: --strict-optional from typing import overload, Union, TypeVar, NoReturn, Optional T = TypeVar('T') @@ -3823,7 +3858,6 @@ def test_narrow_none_v2() -> None: [typing fixtures/typing-medium.pyi] [case testOverloadsAndNoReturnNarrowTypeWithStrictOptional3] -# flags: --strict-optional from typing import overload, TypeVar, NoReturn, Optional @overload @@ -3989,7 +4023,7 @@ T = TypeVar('T') class FakeAttribute(Generic[T]): @overload - def dummy(self, instance: None, owner: Type[T]) -> 'FakeAttribute[T]': ... # E: Overloaded function signatures 1 and 2 overlap with incompatible return types + def dummy(self, instance: None, owner: Type[T]) -> 'FakeAttribute[T]': ... @overload def dummy(self, instance: T, owner: Type[T]) -> int: ... def dummy(self, instance: Optional[T], owner: Type[T]) -> Union['FakeAttribute[T]', int]: ... @@ -4635,7 +4669,6 @@ def none_second(x: int) -> int: return x [case testOverloadsWithNoneComingSecondIsOkInStrictOptional] -# flags: --strict-optional from typing import overload, Optional @overload @@ -4659,8 +4692,8 @@ def none_loose_impl(x: int) -> int: ... def none_loose_impl(x: int) -> int: return x [out] -main:22: error: Overloaded function implementation does not accept all possible arguments of signature 1 -main:22: error: Overloaded function implementation cannot produce return type of signature 1 +main:21: error: Overloaded function implementation does not accept all possible arguments of signature 1 +main:21: error: Overloaded function implementation cannot produce return type of signature 1 [case testTooManyUnionsException] from typing import overload, Union @@ -4745,7 +4778,7 @@ main:12: note: @overload main:12: note: def __add__(self, Other, /) -> B main:12: note: @overload main:12: note: def __add__(self, A, /) -> A -main:12: note: Overloaded operator methods cannot have wider argument types in overrides +main:12: note: Overloaded operator methods can't have wider argument types in overrides main:32: note: Revealed type is "__main__.Other" [case testOverloadErrorMessageManyMatches] @@ -5405,26 +5438,26 @@ if False: def f2(g): ... reveal_type(f2(A())) # N: Revealed type is "__main__.A" reveal_type(f2(C())) # E: No overload variant of "f2" matches argument type "C" \ - # N: Possible overload variants: \ - # N: def f2(g: A) -> A \ - # N: def f2(g: B) -> B \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f2(g: A) -> A \ + # N: def f2(g: B) -> B \ + # N: Revealed type is "Any" @overload def f3(g: A) -> A: ... @overload def f3(g: B) -> B: ... -if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f3(g: C) -> C: ... def f3(g): ... reveal_type(f3(A())) # N: Revealed type is "__main__.A" reveal_type(f3(C())) # E: No overload variant of "f3" matches argument type "C" \ - # N: Possible overload variants: \ - # N: def f3(g: A) -> A \ - # N: def f3(g: B) -> B \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f3(g: A) -> A \ + # N: def f3(g: B) -> B \ + # N: Revealed type is "Any" if True: @overload @@ -5731,10 +5764,10 @@ def f1(x): ... reveal_type(f1(A())) # N: Revealed type is "__main__.A" reveal_type(f1(B())) # N: Revealed type is "__main__.B" reveal_type(f1(D())) # E: No overload variant of "f1" matches argument type "D" \ - # N: Possible overload variants: \ - # N: def f1(x: A) -> A \ - # N: def f1(x: B) -> B \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f1(x: A) -> A \ + # N: def f1(x: B) -> B \ + # N: Revealed type is "Any" @overload def f2(x: A) -> A: ... @@ -5751,14 +5784,14 @@ def f2(x): ... reveal_type(f2(A())) # N: Revealed type is "__main__.A" reveal_type(f2(B())) # N: Revealed type is "__main__.B" reveal_type(f2(C())) # E: No overload variant of "f2" matches argument type "C" \ - # N: Possible overload variants: \ - # N: def f2(x: A) -> A \ - # N: def f2(x: B) -> B \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f2(x: A) -> A \ + # N: def f2(x: B) -> B \ + # N: Revealed type is "Any" @overload # E: Single overload definition, multiple required def f3(x: A) -> A: ... -if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f3(x: B) -> B: ... @@ -5771,13 +5804,13 @@ else: def f3(x): ... reveal_type(f3(A())) # N: Revealed type is "__main__.A" reveal_type(f3(B())) # E: No overload variant of "f3" matches argument type "B" \ - # N: Possible overload variant: \ - # N: def f3(x: A) -> A \ - # N: Revealed type is "Any" + # N: Possible overload variant: \ + # N: def f3(x: A) -> A \ + # N: Revealed type is "Any" @overload # E: Single overload definition, multiple required def f4(x: A) -> A: ... -if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f4(x: B) -> B: ... @@ -5787,9 +5820,10 @@ else: def f4(x): ... reveal_type(f4(A())) # N: Revealed type is "__main__.A" reveal_type(f4(B())) # E: No overload variant of "f4" matches argument type "B" \ - # N: Possible overload variant: \ - # N: def f4(x: A) -> A \ - # N: Revealed type is "Any" + # N: Possible overload variant: \ + # N: def f4(x: A) -> A \ + # N: Revealed type is "Any" + [case testOverloadIfElse3] # flags: --always-false False @@ -5817,10 +5851,10 @@ else: def f1(x): ... reveal_type(f1(A())) # N: Revealed type is "__main__.A" reveal_type(f1(B())) # E: No overload variant of "f1" matches argument type "B" \ - # N: Possible overload variants: \ - # N: def f1(x: A) -> A \ - # N: def f1(x: D) -> D \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f1(x: A) -> A \ + # N: def f1(x: D) -> D \ + # N: Revealed type is "Any" reveal_type(f1(D())) # N: Revealed type is "__main__.D" @overload # E: Single overload definition, multiple required @@ -5828,7 +5862,7 @@ def f2(x: A) -> A: ... if False: @overload def f2(x: B) -> B: ... -elif maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +elif maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f2(x: C) -> C: ... @@ -5838,13 +5872,13 @@ else: def f2(x): ... reveal_type(f2(A())) # N: Revealed type is "__main__.A" reveal_type(f2(C())) # E: No overload variant of "f2" matches argument type "C" \ - # N: Possible overload variant: \ - # N: def f2(x: A) -> A \ - # N: Revealed type is "Any" + # N: Possible overload variant: \ + # N: def f2(x: A) -> A \ + # N: Revealed type is "Any" @overload # E: Single overload definition, multiple required def f3(x: A) -> A: ... -if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f3(x: B) -> B: ... @@ -5857,14 +5891,14 @@ else: def f3(x): ... reveal_type(f3(A())) # N: Revealed type is "__main__.A" reveal_type(f3(B())) # E: No overload variant of "f3" matches argument type "B" \ - # N: Possible overload variant: \ - # N: def f3(x: A) -> A \ - # N: Revealed type is "Any" + # N: Possible overload variant: \ + # N: def f3(x: A) -> A \ + # N: Revealed type is "Any" def g(bool_var: bool) -> None: @overload def f4(x: A) -> A: ... - if bool_var: # E: Condition cannot be inferred, unable to merge overloads + if bool_var: # E: Condition can't be inferred, unable to merge overloads @overload def f4(x: B) -> B: ... elif maybe_true: # E: Name "maybe_true" is not defined @@ -5880,10 +5914,11 @@ def g(bool_var: bool) -> None: def f4(x): ... reveal_type(f4(E())) # N: Revealed type is "__main__.E" reveal_type(f4(B())) # E: No overload variant of "f4" matches argument type "B" \ - # N: Possible overload variants: \ - # N: def f4(x: A) -> A \ - # N: def f4(x: E) -> E \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f4(x: A) -> A \ + # N: def f4(x: E) -> E \ + # N: Revealed type is "Any" + [case testOverloadIfSkipUnknownExecution] # flags: --always-true True @@ -5901,14 +5936,14 @@ class D: ... @overload # E: Single overload definition, multiple required def f1(x: A) -> A: ... -if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f1(x: B) -> B: ... def f1(x): ... reveal_type(f1(A())) # N: Revealed type is "__main__.A" -if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f2(x: A) -> A: ... @@ -5918,15 +5953,15 @@ def f2(x: B) -> B: ... def f2(x: C) -> C: ... def f2(x): ... reveal_type(f2(A())) # E: No overload variant of "f2" matches argument type "A" \ - # N: Possible overload variants: \ - # N: def f2(x: B) -> B \ - # N: def f2(x: C) -> C \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f2(x: B) -> B \ + # N: def f2(x: C) -> C \ + # N: Revealed type is "Any" if True: @overload # E: Single overload definition, multiple required def f3(x: A) -> A: ... - if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ + if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f3(x: B) -> B: ... @@ -5934,7 +5969,7 @@ if True: reveal_type(f3(A())) # N: Revealed type is "__main__.A" if True: - if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ + if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f4(x: A) -> A: ... @@ -5944,10 +5979,10 @@ if True: def f4(x: C) -> C: ... def f4(x): ... reveal_type(f4(A())) # E: No overload variant of "f4" matches argument type "A" \ - # N: Possible overload variants: \ - # N: def f4(x: B) -> B \ - # N: def f4(x: C) -> C \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f4(x: B) -> B \ + # N: def f4(x: C) -> C \ + # N: Revealed type is "Any" [case testOverloadIfDontSkipUnrelatedOverload] # flags: --always-true True @@ -6187,16 +6222,16 @@ if False: def f8(x): ... reveal_type(f8(A())) # N: Revealed type is "__main__.A" reveal_type(f8(C())) # E: No overload variant of "f8" matches argument type "C" \ - # N: Possible overload variants: \ - # N: def f8(x: A) -> A \ - # N: def f8(x: B) -> B \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f8(x: A) -> A \ + # N: def f8(x: B) -> B \ + # N: Revealed type is "Any" -if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f9(x: A) -> A: ... -if another_maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ +if another_maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "another_maybe_true" is not defined @overload def f9(x: B) -> B: ... @@ -6206,18 +6241,18 @@ def f9(x: C) -> C: ... def f9(x: D) -> D: ... def f9(x): ... reveal_type(f9(A())) # E: No overload variant of "f9" matches argument type "A" \ - # N: Possible overload variants: \ - # N: def f9(x: C) -> C \ - # N: def f9(x: D) -> D \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f9(x: C) -> C \ + # N: def f9(x: D) -> D \ + # N: Revealed type is "Any" reveal_type(f9(C())) # N: Revealed type is "__main__.C" if True: - if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ + if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f10(x: A) -> A: ... - if another_maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ + if another_maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "another_maybe_true" is not defined @overload def f10(x: B) -> B: ... @@ -6227,10 +6262,10 @@ if True: def f10(x: D) -> D: ... def f10(x): ... reveal_type(f10(A())) # E: No overload variant of "f10" matches argument type "A" \ - # N: Possible overload variants: \ - # N: def f10(x: C) -> C \ - # N: def f10(x: D) -> D \ - # N: Revealed type is "Any" + # N: Possible overload variants: \ + # N: def f10(x: C) -> C \ + # N: def f10(x: D) -> D \ + # N: Revealed type is "Any" reveal_type(f10(C())) # N: Revealed type is "__main__.C" if some_var: # E: Name "some_var" is not defined @@ -6251,6 +6286,7 @@ if True: def f12(x: B) -> B: ... def f12(x): ... reveal_type(f12(A())) # N: Revealed type is "__main__.A" + [typing fixtures/typing-medium.pyi] [case testOverloadIfUnconditionalFuncDef] @@ -6406,7 +6442,7 @@ def f1(g: A) -> A: ... if True: @overload # E: Single overload definition, multiple required def f1(g: B) -> B: ... - if maybe_true: # E: Condition cannot be inferred, unable to merge overloads \ + if maybe_true: # E: Condition can't be inferred, unable to merge overloads \ # E: Name "maybe_true" is not defined @overload def f1(g: C) -> C: ... @@ -6432,7 +6468,7 @@ if True: def f3(g: B) -> B: ... if True: pass # Some other node - @overload # E: Name "f3" already defined on line 32 \ + @overload # E: Name "f3" already defined on line 32 \ # E: An overloaded function outside a stub file must have an implementation def f3(g: C) -> C: ... @overload @@ -6465,7 +6501,7 @@ eggs = lambda: 'eggs' reveal_type(func(eggs)) # N: Revealed type is "def (builtins.str) -> builtins.str" spam: Callable[..., str] = lambda x, y: 'baz' -reveal_type(func(spam)) # N: Revealed type is "def (*Any, **Any) -> builtins.str" +reveal_type(func(spam)) # N: Revealed type is "def (*Any, **Any) -> Any" [builtins fixtures/paramspec.pyi] @@ -6545,3 +6581,72 @@ class Snafu(object): reveal_type(Snafu().snafu('123')) # N: Revealed type is "builtins.str" reveal_type(Snafu.snafu('123')) # N: Revealed type is "builtins.str" [builtins fixtures/staticmethod.pyi] + +[case testOverloadedWithInternalTypeVars] +# flags: --new-type-inference +import m + +[file m.pyi] +from typing import Callable, TypeVar, overload + +T = TypeVar("T") +S = TypeVar("S", bound=str) + +@overload +def foo(x: int = ...) -> Callable[[T], T]: ... +@overload +def foo(x: S = ...) -> Callable[[T], T]: ... + +[case testOverloadGenericStarArgOverlap] +from typing import Any, Callable, TypeVar, overload, Union, Tuple, List + +F = TypeVar("F", bound=Callable[..., Any]) +S = TypeVar("S", bound=int) + +def id(f: F) -> F: ... + +@overload +def struct(*cols: S) -> int: ... +@overload +def struct(__cols: Union[List[S], Tuple[S, ...]]) -> int: ... +@id +def struct(*cols: Union[S, Union[List[S], Tuple[S, ...]]]) -> int: + pass +[builtins fixtures/tuple.pyi] + +[case testRegularGenericDecoratorOverload] +from typing import Callable, overload, TypeVar, List + +S = TypeVar("S") +T = TypeVar("T") +def transform(func: Callable[[S], List[T]]) -> Callable[[S], T]: ... + +@overload +def foo(x: int) -> List[float]: ... +@overload +def foo(x: str) -> List[str]: ... +def foo(x): ... + +reveal_type(transform(foo)) # N: Revealed type is "Overload(def (builtins.int) -> builtins.float, def (builtins.str) -> builtins.str)" + +@transform +@overload +def bar(x: int) -> List[float]: ... +@transform +@overload +def bar(x: str) -> List[str]: ... +@transform +def bar(x): ... + +reveal_type(bar) # N: Revealed type is "Overload(def (builtins.int) -> builtins.float, def (builtins.str) -> builtins.str)" +[builtins fixtures/paramspec.pyi] + +[case testOverloadOverlapWithNameOnlyArgs] +from typing import overload + +@overload +def d(x: int) -> int: ... +@overload +def d(f: int, *, x: int) -> str: ... +def d(*args, **kwargs): ... +[builtins fixtures/tuple.pyi] diff --git a/test-data/unit/check-parameter-specification.test b/test-data/unit/check-parameter-specification.test index 901e73008d56..a98c92ce14e7 100644 --- a/test-data/unit/check-parameter-specification.test +++ b/test-data/unit/check-parameter-specification.test @@ -38,6 +38,74 @@ def foo6(x: Callable[[P], int]) -> None: ... # E: Invalid location for ParamSpe # N: You can use ParamSpec as the first argument to Callable, e.g., 'Callable[P, int]' [builtins fixtures/paramspec.pyi] +[case testParamSpecImports] +import lib +from lib import Base + +class C(Base[[int]]): + def test(self, x: int): ... + +class D(lib.Base[[int]]): + def test(self, x: int): ... + +class E(lib.Base[...]): ... +reveal_type(E().test) # N: Revealed type is "def (*Any, **Any)" + +[file lib.py] +from typing import Generic +from typing_extensions import ParamSpec + +P = ParamSpec("P") +class Base(Generic[P]): + def test(self, *args: P.args, **kwargs: P.kwargs) -> None: + ... +[builtins fixtures/paramspec.pyi] + +[case testParamSpecEllipsisInAliases] +from typing import Any, Callable, Generic, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec('P') +R = TypeVar('R') +Alias = Callable[P, R] + +class B(Generic[P]): ... +Other = B[P] + +T = TypeVar('T', bound=Alias[..., Any]) +Alias[..., Any] # E: Type application is only supported for generic classes +B[...] +Other[...] +[builtins fixtures/paramspec.pyi] + +[case testParamSpecEllipsisInConcatenate] +from typing import Any, Callable, Generic, TypeVar +from typing_extensions import ParamSpec, Concatenate + +P = ParamSpec('P') +R = TypeVar('R') +Alias = Callable[P, R] + +IntFun = Callable[Concatenate[int, ...], None] +f: IntFun +reveal_type(f) # N: Revealed type is "def (builtins.int, *Any, **Any)" + +g: Callable[Concatenate[int, ...], None] +reveal_type(g) # N: Revealed type is "def (builtins.int, *Any, **Any)" + +class B(Generic[P]): + def test(self, *args: P.args, **kwargs: P.kwargs) -> None: + ... + +x: B[Concatenate[int, ...]] +reveal_type(x.test) # N: Revealed type is "def (builtins.int, *Any, **Any)" + +Bad = Callable[Concatenate[int, [int, str]], None] # E: The last parameter to Concatenate needs to be a ParamSpec \ + # E: Bracketed expression "[...]" is not valid as a type +def bad(fn: Callable[Concatenate[P, int], None]): # E: The last parameter to Concatenate needs to be a ParamSpec + ... +[builtins fixtures/paramspec.pyi] + [case testParamSpecContextManagerLike] from typing import Callable, List, Iterator, TypeVar from typing_extensions import ParamSpec @@ -171,7 +239,6 @@ reveal_type(f(g, 1, y='x')) # N: Revealed type is "None" f(g, 'x', y='x') # E: Argument 2 to "f" has incompatible type "str"; expected "int" f(g, 1, y=1) # E: Argument "y" to "f" has incompatible type "int"; expected "str" f(g) # E: Missing positional arguments "x", "y" in call to "f" - [builtins fixtures/dict.pyi] [case testParamSpecSpecialCase] @@ -347,14 +414,19 @@ P = ParamSpec('P') T = TypeVar('T') # Similar to atexit.register -def register(f: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> Callable[P, T]: ... # N: "register" defined here +def register(f: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> Callable[P, T]: ... def f(x: int) -> None: pass +def g(x: int, y: str) -> None: pass reveal_type(register(lambda: f(1))) # N: Revealed type is "def ()" -reveal_type(register(lambda x: f(x), x=1)) # N: Revealed type is "def (x: Any)" -register(lambda x: f(x)) # E: Missing positional argument "x" in call to "register" -register(lambda x: f(x), y=1) # E: Unexpected keyword argument "y" for "register" +reveal_type(register(lambda x: f(x), x=1)) # N: Revealed type is "def (x: Literal[1]?)" +register(lambda x: f(x)) # E: Cannot infer type of lambda \ + # E: Argument 1 to "register" has incompatible type "Callable[[Any], None]"; expected "Callable[[], None]" +register(lambda x: f(x), y=1) # E: Argument 1 to "register" has incompatible type "Callable[[Arg(int, 'x')], None]"; expected "Callable[[Arg(int, 'y')], None]" +reveal_type(register(lambda x: f(x), 1)) # N: Revealed type is "def (Literal[1]?)" +reveal_type(register(lambda x, y: g(x, y), 1, "a")) # N: Revealed type is "def (Literal[1]?, Literal['a']?)" +reveal_type(register(lambda x, y: g(x, y), 1, y="a")) # N: Revealed type is "def (Literal[1]?, y: Literal['a']?)" [builtins fixtures/dict.pyi] [case testParamSpecInvalidCalls] @@ -570,7 +642,7 @@ reveal_type(f(n)) # N: Revealed type is "def (builtins.int, builtins.bytes) -> [builtins fixtures/paramspec.pyi] [case testParamSpecConcatenateNamedArgs] -# flags: --python-version 3.8 --strict-concatenate +# flags: --python-version 3.8 --extra-checks # this is one noticeable deviation from PEP but I believe it is for the better from typing_extensions import ParamSpec, Concatenate from typing import Callable, TypeVar @@ -592,8 +664,6 @@ def f2(c: Callable[P, R]) -> Callable[Concatenate[int, P], R]: f2(lambda x: 42)(42, x=42) [builtins fixtures/paramspec.pyi] [out] -main:10: error: invalid syntax; you likely need to run mypy using Python 3.8 or newer -[out version>=3.8] main:17: error: Incompatible return value type (got "Callable[[Arg(int, 'x'), **P], R]", expected "Callable[[int, **P], R]") main:17: note: This is likely because "result" has named arguments: "x". Consider marking them positional-only @@ -620,9 +690,6 @@ def f2(c: Callable[P, R]) -> Callable[Concatenate[int, P], R]: # reason for rejection: f2(lambda x: 42)(42, x=42) [builtins fixtures/paramspec.pyi] -[out] -main:11: error: invalid syntax; you likely need to run mypy using Python 3.8 or newer -[out version>=3.8] [case testParamSpecConcatenateWithTypeVar] from typing_extensions import ParamSpec, Concatenate @@ -662,8 +729,6 @@ reveal_type(abc) bar(abc) [builtins fixtures/paramspec.pyi] [out] -main:13: error: invalid syntax; you likely need to run mypy using Python 3.8 or newer -[out version>=3.8] main:16: note: Revealed type is "__main__.Foo[[builtins.int, b: builtins.str]]" [case testSolveParamSpecWithSelfType] @@ -840,16 +905,15 @@ class A: def func(self, action: Callable[_P, _R], *args: _P.args, **kwargs: _P.kwargs) -> _R: ... -reveal_type(A.func) # N: Revealed type is "def [_P, _R] (self: __main__.A, action: def (*_P.args, **_P.kwargs) -> _R`-2, *_P.args, **_P.kwargs) -> _R`-2" -reveal_type(A().func) # N: Revealed type is "def [_P, _R] (action: def (*_P.args, **_P.kwargs) -> _R`5, *_P.args, **_P.kwargs) -> _R`5" +reveal_type(A.func) # N: Revealed type is "def [_P, _R] (self: __main__.A, action: def (*_P.args, **_P.kwargs) -> _R`3, *_P.args, **_P.kwargs) -> _R`3" +reveal_type(A().func) # N: Revealed type is "def [_P, _R] (action: def (*_P.args, **_P.kwargs) -> _R`7, *_P.args, **_P.kwargs) -> _R`7" def f(x: int) -> int: ... reveal_type(A().func(f, 42)) # N: Revealed type is "builtins.int" -# TODO: this should reveal `int` -reveal_type(A().func(lambda x: x + x, 42)) # N: Revealed type is "Any" +reveal_type(A().func(lambda x: x + x, 42)) # N: Revealed type is "builtins.int" [builtins fixtures/paramspec.pyi] [case testParamSpecConstraintOnOtherParamSpec] @@ -873,8 +937,8 @@ class A: def func(self, action: Job[_P, None]) -> Job[_P, None]: ... -reveal_type(A.func) # N: Revealed type is "def [_P] (self: __main__.A, action: __main__.Job[_P`-1, None]) -> __main__.Job[_P`-1, None]" -reveal_type(A().func) # N: Revealed type is "def [_P] (action: __main__.Job[_P`3, None]) -> __main__.Job[_P`3, None]" +reveal_type(A.func) # N: Revealed type is "def [_P] (self: __main__.A, action: __main__.Job[_P`2, None]) -> __main__.Job[_P`2, None]" +reveal_type(A().func) # N: Revealed type is "def [_P] (action: __main__.Job[_P`4, None]) -> __main__.Job[_P`4, None]" reveal_type(A().func(Job(lambda x: x))) # N: Revealed type is "__main__.Job[[x: Any], None]" def f(x: int, y: int) -> None: ... @@ -1036,10 +1100,32 @@ j = Job(generic_f) reveal_type(j) # N: Revealed type is "__main__.Job[[x: _T`-1]]" jf = j.into_callable() -reveal_type(jf) # N: Revealed type is "def [_T] (x: _T`-1)" +reveal_type(jf) # N: Revealed type is "def [_T] (x: _T`2)" reveal_type(jf(1)) # N: Revealed type is "None" [builtins fixtures/paramspec.pyi] +[case testGenericsInInferredParamspecReturn] +# flags: --new-type-inference +from typing import Callable, TypeVar, Generic +from typing_extensions import ParamSpec + +_P = ParamSpec("_P") +_T = TypeVar("_T") + +class Job(Generic[_P, _T]): + def __init__(self, target: Callable[_P, _T]) -> None: ... + def into_callable(self) -> Callable[_P, _T]: ... + +def generic_f(x: _T) -> _T: ... + +j = Job(generic_f) +reveal_type(j) # N: Revealed type is "__main__.Job[[x: _T`2], _T`2]" + +jf = j.into_callable() +reveal_type(jf) # N: Revealed type is "def [_T] (x: _T`3) -> _T`3" +reveal_type(jf(1)) # N: Revealed type is "builtins.int" +[builtins fixtures/paramspec.pyi] + [case testStackedConcatenateIsIllegal] from typing_extensions import Concatenate, ParamSpec from typing import Callable @@ -1272,7 +1358,6 @@ P = ParamSpec('P') class Some(Generic[P]): def call(self, *args: P.args, **kwargs: P.kwargs): ... -# TODO: this probably should be reported. def call(*args: P.args, **kwargs: P.kwargs): ... [builtins fixtures/paramspec.pyi] @@ -1292,8 +1377,7 @@ reveal_type(bar(C(fn=foo, x=1))) # N: Revealed type is "__main__.C[[x: builtins [builtins fixtures/paramspec.pyi] [case testParamSpecClassConstructor] -# flags: --strict-optional -from typing import ParamSpec, Callable +from typing import ParamSpec, Callable, TypeVar P = ParamSpec("P") @@ -1301,7 +1385,10 @@ class SomeClass: def __init__(self, a: str) -> None: pass -def func(t: Callable[P, SomeClass], val: Callable[P, SomeClass]) -> None: +def func(t: Callable[P, SomeClass], val: Callable[P, SomeClass]) -> Callable[P, SomeClass]: + pass + +def func_regular(t: Callable[[T], SomeClass], val: Callable[[T], SomeClass]) -> Callable[[T], SomeClass]: pass def constructor(a: str) -> SomeClass: @@ -1310,9 +1397,13 @@ def constructor(a: str) -> SomeClass: def wrong_constructor(a: bool) -> SomeClass: return SomeClass("a") +def wrong_name_constructor(b: bool) -> SomeClass: + return SomeClass("a") + func(SomeClass, constructor) -func(SomeClass, wrong_constructor) # E: Argument 1 to "func" has incompatible type "Type[SomeClass]"; expected "Callable[[VarArg(), KwArg()], SomeClass]" \ - # E: Argument 2 to "func" has incompatible type "Callable[[bool], SomeClass]"; expected "Callable[[VarArg(), KwArg()], SomeClass]" +reveal_type(func(SomeClass, wrong_constructor)) # N: Revealed type is "def (a: ) -> __main__.SomeClass" +reveal_type(func_regular(SomeClass, wrong_constructor)) # N: Revealed type is "def () -> __main__.SomeClass" +func(SomeClass, wrong_name_constructor) # E: Argument 1 to "func" has incompatible type "Type[SomeClass]"; expected "Callable[[], SomeClass]" [builtins fixtures/paramspec.pyi] [case testParamSpecInTypeAliasBasic] @@ -1410,8 +1501,7 @@ from typing import ParamSpec, Generic, List, TypeVar, Callable P = ParamSpec("P") T = TypeVar("T") A = List[T] -def f(x: A[[int, str]]) -> None: ... # E: Bracketed expression "[...]" is not valid as a type \ - # N: Did you mean "List[...]"? +def f(x: A[[int, str]]) -> None: ... # E: Bracketed expression "[...]" is not valid as a type def g(x: A[P]) -> None: ... # E: Invalid location for ParamSpec "P" \ # N: You can use ParamSpec as the first argument to Callable, e.g., 'Callable[P, int]' @@ -1452,8 +1542,7 @@ reveal_type(gs) # N: Revealed type is "builtins.list[def (builtins.int, builtin T = TypeVar("T") class C(Generic[T]): ... -C[Callable[P, int]]() # E: The first argument to Callable must be a list of types, parameter specification, or "..." \ - # N: See https://mypy.readthedocs.io/en/stable/kinds_of_types.html#callable-types-and-lambdas +C[Callable[P, int]]() [builtins fixtures/paramspec.pyi] [case testConcatDeferralNoCrash] @@ -1520,3 +1609,233 @@ def identity(func: Callable[P, None]) -> Callable[P, None]: ... @identity def f(f: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None: ... [builtins fixtures/paramspec.pyi] + +[case testParamSpecDecoratorAppliedToGeneric] +# flags: --new-type-inference +from typing import Callable, List, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec("P") +T = TypeVar("T") +U = TypeVar("U") + +def dec(f: Callable[P, T]) -> Callable[P, List[T]]: ... +def test(x: U) -> U: ... +reveal_type(dec) # N: Revealed type is "def [P, T] (f: def (*P.args, **P.kwargs) -> T`-2) -> def (*P.args, **P.kwargs) -> builtins.list[T`-2]" +reveal_type(dec(test)) # N: Revealed type is "def [T] (x: T`2) -> builtins.list[T`2]" + +class A: ... +TA = TypeVar("TA", bound=A) + +def test_with_bound(x: TA) -> TA: ... +reveal_type(dec(test_with_bound)) # N: Revealed type is "def [T <: __main__.A] (x: T`4) -> builtins.list[T`4]" +dec(test_with_bound)(0) # E: Value of type variable "T" of function cannot be "int" +dec(test_with_bound)(A()) # OK +[builtins fixtures/paramspec.pyi] + +[case testParamSpecArgumentParamInferenceRegular] +from typing import TypeVar, Generic +from typing_extensions import ParamSpec + +P = ParamSpec("P") +class Foo(Generic[P]): + def call(self, *args: P.args, **kwargs: P.kwargs) -> None: ... +def test(*args: P.args, **kwargs: P.kwargs) -> Foo[P]: ... + +reveal_type(test(1, 2)) # N: Revealed type is "__main__.Foo[[Literal[1]?, Literal[2]?]]" +reveal_type(test(x=1, y=2)) # N: Revealed type is "__main__.Foo[[x: Literal[1]?, y: Literal[2]?]]" +ints = [1, 2, 3] +reveal_type(test(*ints)) # N: Revealed type is "__main__.Foo[[*builtins.int]]" +[builtins fixtures/paramspec.pyi] + +[case testParamSpecArgumentParamInferenceGeneric] +# flags: --new-type-inference +from typing import Callable, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec("P") +R = TypeVar("R") +def call(f: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> R: + return f(*args, **kwargs) + +T = TypeVar("T") +def identity(x: T) -> T: + return x + +reveal_type(call(identity, 2)) # N: Revealed type is "builtins.int" +y: int = call(identity, 2) +[builtins fixtures/paramspec.pyi] + +[case testParamSpecNestedApplyNoCrash] +# flags: --new-type-inference +from typing import Callable, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec("P") +T = TypeVar("T") + +def apply(fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T: ... +def test() -> int: ... +reveal_type(apply(apply, test)) # N: Revealed type is "builtins.int" +[builtins fixtures/paramspec.pyi] + +[case testParamSpecNestedApplyPosVsNamed] +from typing import Callable, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec("P") +T = TypeVar("T") + +def apply(fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> None: ... +def test(x: int) -> int: ... +apply(apply, test, x=42) # OK +apply(apply, test, 42) # Also OK (but requires some special casing) +[builtins fixtures/paramspec.pyi] + +[case testParamSpecApplyPosVsNamedOptional] +from typing import Callable, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec("P") +T = TypeVar("T") + +def apply(fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> None: ... +def test(x: str = ..., y: int = ...) -> int: ... +apply(test, y=42) # OK +[builtins fixtures/paramspec.pyi] + +[case testParamSpecPrefixSubtypingGenericInvalid] +from typing import Generic +from typing_extensions import ParamSpec, Concatenate + +P = ParamSpec("P") + +class A(Generic[P]): + def foo(self, *args: P.args, **kwargs: P.kwargs): + ... + +def bar(b: A[P]) -> A[Concatenate[int, P]]: + return b # E: Incompatible return value type (got "A[P]", expected "A[[int, **P]]") +[builtins fixtures/paramspec.pyi] + +[case testParamSpecPrefixSubtypingProtocolInvalid] +from typing import Protocol +from typing_extensions import ParamSpec, Concatenate + +P = ParamSpec("P") + +class A(Protocol[P]): + def foo(self, *args: P.args, **kwargs: P.kwargs): + ... + +def bar(b: A[P]) -> A[Concatenate[int, P]]: + return b # E: Incompatible return value type (got "A[P]", expected "A[[int, **P]]") +[builtins fixtures/paramspec.pyi] + +[case testParamSpecPrefixSubtypingValidNonStrict] +from typing import Protocol +from typing_extensions import ParamSpec, Concatenate + +P = ParamSpec("P") + +class A(Protocol[P]): + def foo(self, a: int, *args: P.args, **kwargs: P.kwargs): + ... + +class B(Protocol[P]): + def foo(self, a: int, b: int, *args: P.args, **kwargs: P.kwargs): + ... + +def bar(b: B[P]) -> A[Concatenate[int, P]]: + return b +[builtins fixtures/paramspec.pyi] + +[case testParamSpecPrefixSubtypingInvalidStrict] +# flags: --extra-checks +from typing import Protocol +from typing_extensions import ParamSpec, Concatenate + +P = ParamSpec("P") + +class A(Protocol[P]): + def foo(self, a: int, *args: P.args, **kwargs: P.kwargs): + ... + +class B(Protocol[P]): + def foo(self, a: int, b: int, *args: P.args, **kwargs: P.kwargs): + ... + +def bar(b: B[P]) -> A[Concatenate[int, P]]: + return b # E: Incompatible return value type (got "B[P]", expected "A[[int, **P]]") \ + # N: Following member(s) of "B[P]" have conflicts: \ + # N: Expected: \ + # N: def foo(self, a: int, int, /, *args: P.args, **kwargs: P.kwargs) -> Any \ + # N: Got: \ + # N: def foo(self, a: int, b: int, *args: P.args, **kwargs: P.kwargs) -> Any +[builtins fixtures/paramspec.pyi] + +[case testParamSpecDecoratorOverload] +from typing import Callable, overload, TypeVar, List +from typing_extensions import ParamSpec + +P = ParamSpec("P") +T = TypeVar("T") +def transform(func: Callable[P, List[T]]) -> Callable[P, T]: ... + +@overload +def foo(x: int) -> List[float]: ... +@overload +def foo(x: str) -> List[str]: ... +def foo(x): ... + +reveal_type(transform(foo)) # N: Revealed type is "Overload(def (x: builtins.int) -> builtins.float, def (x: builtins.str) -> builtins.str)" + +@transform +@overload +def bar(x: int) -> List[float]: ... +@transform +@overload +def bar(x: str) -> List[str]: ... +@transform +def bar(x): ... + +reveal_type(bar) # N: Revealed type is "Overload(def (x: builtins.int) -> builtins.float, def (x: builtins.str) -> builtins.str)" +[builtins fixtures/paramspec.pyi] + +[case testParamSpecDecoratorOverloadNoCrashOnInvalidTypeVar] +from typing import Any, Callable, List +from typing_extensions import ParamSpec + +P = ParamSpec("P") +T = 1 + +Alias = Callable[P, List[T]] # type: ignore +def dec(fn: Callable[P, T]) -> Alias[P, T]: ... # type: ignore +f: Any +dec(f) # No crash +[builtins fixtures/paramspec.pyi] + +[case testParamSpecErrorNestedParams] +from typing import Generic +from typing_extensions import ParamSpec + +P = ParamSpec("P") +class C(Generic[P]): ... +c: C[int, [int, str], str] # E: Nested parameter specifications are not allowed +reveal_type(c) # N: Revealed type is "__main__.C[Any]" +[builtins fixtures/paramspec.pyi] + +[case testParamSpecInferenceWithCallbackProtocol] +from typing import Protocol, Callable, ParamSpec + +class CB(Protocol): + def __call__(self, x: str, y: int) -> None: ... + +P = ParamSpec('P') +def g(fn: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None: ... + +cb: CB +g(cb, y=0, x='a') # OK +g(cb, y='a', x=0) # E: Argument "y" to "g" has incompatible type "str"; expected "int" \ + # E: Argument "x" to "g" has incompatible type "int"; expected "str" +[builtins fixtures/paramspec.pyi] diff --git a/test-data/unit/check-plugin-attrs.test b/test-data/unit/check-plugin-attrs.test index 9aa31c1ed10b..7580531bebc9 100644 --- a/test-data/unit/check-plugin-attrs.test +++ b/test-data/unit/check-plugin-attrs.test @@ -185,10 +185,10 @@ from attr import attrib, attrs class A: a: int reveal_type(A) # N: Revealed type is "def (a: builtins.int) -> __main__.A" -reveal_type(A.__lt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(A.__le__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(A.__gt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(A.__ge__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" +reveal_type(A.__lt__) # N: Revealed type is "def [_AT] (self: _AT`3, other: _AT`3) -> builtins.bool" +reveal_type(A.__le__) # N: Revealed type is "def [_AT] (self: _AT`4, other: _AT`4) -> builtins.bool" +reveal_type(A.__gt__) # N: Revealed type is "def [_AT] (self: _AT`5, other: _AT`5) -> builtins.bool" +reveal_type(A.__ge__) # N: Revealed type is "def [_AT] (self: _AT`6, other: _AT`6) -> builtins.bool" A(1) < A(2) A(1) <= A(2) @@ -672,6 +672,7 @@ class A(Generic[T]): [builtins fixtures/classmethod.pyi] [case testAttrsForwardReference] +# flags: --no-strict-optional import attr @attr.s(auto_attribs=True) class A: @@ -687,6 +688,7 @@ A(B(None)) [builtins fixtures/list.pyi] [case testAttrsForwardReferenceInClass] +# flags: --no-strict-optional import attr @attr.s(auto_attribs=True) class A: @@ -987,10 +989,10 @@ class C(A, B): pass @attr.s class D(A): pass -reveal_type(A.__lt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(B.__lt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(C.__lt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" -reveal_type(D.__lt__) # N: Revealed type is "def [_AT] (self: _AT`-1, other: _AT`-1) -> builtins.bool" +reveal_type(A.__lt__) # N: Revealed type is "def [_AT] (self: _AT`5, other: _AT`5) -> builtins.bool" +reveal_type(B.__lt__) # N: Revealed type is "def [_AT] (self: _AT`6, other: _AT`6) -> builtins.bool" +reveal_type(C.__lt__) # N: Revealed type is "def [_AT] (self: _AT`7, other: _AT`7) -> builtins.bool" +reveal_type(D.__lt__) # N: Revealed type is "def [_AT] (self: _AT`8, other: _AT`8) -> builtins.bool" A() < A() B() < B() @@ -1173,12 +1175,13 @@ class A: [builtins fixtures/bool.pyi] [case testAttrsFactoryBadReturn] +# flags: --new-type-inference import attr def my_factory() -> int: return 7 @attr.s class A: - x: int = attr.ib(factory=list) # E: Incompatible types in assignment (expression has type "List[T]", variable has type "int") + x: int = attr.ib(factory=list) # E: Incompatible types in assignment (expression has type "List[]", variable has type "int") y: str = attr.ib(factory=my_factory) # E: Incompatible types in assignment (expression has type "int", variable has type "str") [builtins fixtures/list.pyi] @@ -1196,7 +1199,6 @@ class C: [builtins fixtures/bool.pyi] [case testAttrsOptionalConverter] -# flags: --strict-optional import attr from attr.converters import optional from typing import Optional @@ -1216,7 +1218,6 @@ A(None, None) [builtins fixtures/plugin_attrs.pyi] [case testAttrsOptionalConverterNewPackage] -# flags: --strict-optional import attrs from attrs.converters import optional from typing import Optional @@ -1567,6 +1568,9 @@ reveal_type(f(A)[0]) # N: Revealed type is "attr.Attribute[builtins.int]" reveal_type(f(A).b) # N: Revealed type is "attr.Attribute[builtins.int]" f(A).x # E: "____main___A_AttrsAttributes__" has no attribute "x" +for ff in f(A): + reveal_type(ff) # N: Revealed type is "attr.Attribute[Any]" + [builtins fixtures/plugin_attrs.pyi] [case testAttrsGenericFields] @@ -1592,6 +1596,7 @@ def f(t: TA) -> None: [builtins fixtures/plugin_attrs.pyi] [case testNonattrsFields] +# flags: --no-strict-optional from typing import Any, cast, Type from attrs import fields @@ -1627,6 +1632,24 @@ reveal_type(A.__attrs_init__) # N: Revealed type is "def (self: __main__.A, b: [case testAttrsClassWithSlots] import attr +@attr.define +class Define: + b: int = attr.ib() + + def __attrs_post_init__(self) -> None: + self.b = 1 + self.c = 2 # E: Trying to assign name "c" that is not in "__slots__" of type "__main__.Define" + + +@attr.define(slots=False) +class DefineSlotsFalse: + b: int = attr.ib() + + def __attrs_post_init__(self) -> None: + self.b = 1 + self.c = 2 + + @attr.s(slots=True) class A: b: int = attr.ib() @@ -1654,6 +1677,33 @@ class C: self.c = 2 # E: Trying to assign name "c" that is not in "__slots__" of type "__main__.C" [builtins fixtures/plugin_attrs.pyi] +[case testRuntimeSlotsAttr] +from attr import dataclass + +@dataclass(slots=True) +class Some: + x: int + y: str + z: bool + +reveal_type(Some.__slots__) # N: Revealed type is "Tuple[builtins.str, builtins.str, builtins.str]" + +@dataclass(slots=True) +class Other: + x: int + y: str + +reveal_type(Other.__slots__) # N: Revealed type is "Tuple[builtins.str, builtins.str]" + + +@dataclass +class NoSlots: + x: int + y: str + +NoSlots.__slots__ # E: "Type[NoSlots]" has no attribute "__slots__" +[builtins fixtures/plugin_attrs.pyi] + [case testAttrsWithMatchArgs] # flags: --python-version 3.10 import attr @@ -2203,3 +2253,27 @@ c = attrs.assoc(c, name=42) # E: Argument "name" to "assoc" of "C" has incompat [builtins fixtures/plugin_attrs.pyi] [typing fixtures/typing-medium.pyi] + +[case testFrozenInheritFromGeneric] +from typing import Generic, TypeVar +from attrs import field, frozen + +T = TypeVar('T') + +def f(s: str) -> int: + ... + +@frozen +class A(Generic[T]): + x: T + y: int = field(converter=f) + +@frozen +class B(A[int]): + pass + +b = B(42, 'spam') +reveal_type(b.x) # N: Revealed type is "builtins.int" +reveal_type(b.y) # N: Revealed type is "builtins.int" + +[builtins fixtures/plugin_attrs.pyi] diff --git a/test-data/unit/check-protocols.test b/test-data/unit/check-protocols.test index 6976b8ee0a39..dba01be50fee 100644 --- a/test-data/unit/check-protocols.test +++ b/test-data/unit/check-protocols.test @@ -319,10 +319,11 @@ class MyHashable(Protocol): class C: __my_hash__ = None -var: MyHashable = C() # E: Incompatible types in assignment (expression has type "C", variable has type "MyHashable") +var: MyHashable = C() # E: Incompatible types in assignment (expression has type "C", variable has type "MyHashable") \ + # N: Following member(s) of "C" have conflicts: \ + # N: __my_hash__: expected "Callable[[], int]", got "None" [case testNoneDisablesProtocolSubclassingWithStrictOptional] -# flags: --strict-optional from typing import Protocol class MyHashable(Protocol): @@ -334,7 +335,6 @@ class C(MyHashable): (expression has type "None", base class "MyHashable" defined the type as "Callable[[MyHashable], int]") [case testProtocolsWithNoneAndStrictOptional] -# flags: --strict-optional from typing import Protocol class P(Protocol): x = 0 # type: int @@ -346,12 +346,12 @@ x: P = C() # Error! def f(x: P) -> None: pass f(C()) # Error! [out] -main:9: error: Incompatible types in assignment (expression has type "C", variable has type "P") -main:9: note: Following member(s) of "C" have conflicts: -main:9: note: x: expected "int", got "None" -main:11: error: Argument 1 to "f" has incompatible type "C"; expected "P" -main:11: note: Following member(s) of "C" have conflicts: -main:11: note: x: expected "int", got "None" +main:8: error: Incompatible types in assignment (expression has type "C", variable has type "P") +main:8: note: Following member(s) of "C" have conflicts: +main:8: note: x: expected "int", got "None" +main:10: error: Argument 1 to "f" has incompatible type "C"; expected "P" +main:10: note: Following member(s) of "C" have conflicts: +main:10: note: x: expected "int", got "None" -- Semanal errors in protocol types -- -------------------------------- @@ -1263,13 +1263,13 @@ if int(): [builtins fixtures/classmethod.pyi] [case testOverloadedMethodsInProtocols] -from typing import overload, Protocol, Union +from typing import overload, Protocol, Union, Optional class P(Protocol): @overload - def f(self, x: int) -> int: pass + def f(self, x: int) -> Optional[int]: pass @overload - def f(self, x: str) -> str: pass + def f(self, x: str) -> Optional[str]: pass class C: def f(self, x: Union[int, str]) -> None: @@ -1286,9 +1286,9 @@ main:18: error: Incompatible types in assignment (expression has type "D", varia main:18: note: Following member(s) of "D" have conflicts: main:18: note: Expected: main:18: note: @overload -main:18: note: def f(self, x: int) -> int +main:18: note: def f(self, x: int) -> Optional[int] main:18: note: @overload -main:18: note: def f(self, x: str) -> str +main:18: note: def f(self, x: str) -> Optional[str] main:18: note: Got: main:18: note: def f(self, x: int) -> None @@ -1441,6 +1441,7 @@ def g(x: P, y: P2) -> None: pass reveal_type(f(g)) # N: Revealed type is "__main__.P2" [case testMeetOfIncompatibleProtocols] +# flags: --no-strict-optional from typing import Protocol, Callable, TypeVar class P(Protocol): @@ -1634,6 +1635,7 @@ f(Alias) # E: Only concrete class can be given where "Type[P]" is expected f(GoodAlias) [case testInstantiationProtocolInTypeForVariables] +# flags: --no-strict-optional from typing import Type, Protocol class P(Protocol): @@ -2397,6 +2399,7 @@ x: P = None [out] [case testNoneSubtypeOfAllProtocolsWithoutStrictOptional] +# flags: --no-strict-optional from typing import Protocol class P(Protocol): attr: int @@ -2407,7 +2410,6 @@ x: P = None [out] [case testNoneSubtypeOfEmptyProtocolStrict] -# flags: --strict-optional from typing import Protocol class P(Protocol): pass @@ -2789,6 +2791,70 @@ class A(Protocol): [builtins fixtures/tuple.pyi] +[case testProtocolSlotsIsNotProtocolMember] +# https://github.com/python/mypy/issues/11884 +from typing import Protocol + +class Foo(Protocol): + __slots__ = () +class NoSlots: + pass +class EmptySlots: + __slots__ = () +class TupleSlots: + __slots__ = ('x', 'y') +class StringSlots: + __slots__ = 'x y' +class InitSlots: + __slots__ = ('x',) + def __init__(self) -> None: + self.x = None +def foo(f: Foo): + pass + +# All should pass: +foo(NoSlots()) +foo(EmptySlots()) +foo(TupleSlots()) +foo(StringSlots()) +foo(InitSlots()) +[builtins fixtures/tuple.pyi] + +[case testProtocolSlotsAndRuntimeCheckable] +from typing import Protocol, runtime_checkable + +@runtime_checkable +class Foo(Protocol): + __slots__ = () +class Bar: + pass +issubclass(Bar, Foo) # Used to be an error, when `__slots__` counted as a protocol member +[builtins fixtures/isinstance.pyi] +[typing fixtures/typing-full.pyi] + + +[case testProtocolWithClassGetItem] +# https://github.com/python/mypy/issues/11886 +from typing import Any, Iterable, Protocol, Union + +class B: + ... + +class C: + def __class_getitem__(cls, __item: Any) -> Any: + ... + +class SupportsClassGetItem(Protocol): + __slots__: Union[str, Iterable[str]] = () + def __class_getitem__(cls, __item: Any) -> Any: + ... + +b1: SupportsClassGetItem = B() +c1: SupportsClassGetItem = C() +[builtins fixtures/tuple.pyi] +[typing fixtures/typing-full.pyi] + + [case testNoneVsProtocol] # mypy: strict-optional from typing_extensions import Protocol @@ -2890,7 +2956,6 @@ class MyClass: [case testPartialAttributeNoneTypeStrictOptional] -# flags: --strict-optional from typing import Optional, Protocol, runtime_checkable @runtime_checkable @@ -3011,7 +3076,6 @@ def round(number: SupportsRound[_T], ndigits: int) -> _T: ... round(C(), 1) [case testEmptyBodyImplicitlyAbstractProtocol] -# flags: --strict-optional from typing import Protocol, overload, Union class P1(Protocol): @@ -3058,7 +3122,6 @@ C3() [builtins fixtures/classmethod.pyi] [case testEmptyBodyImplicitlyAbstractProtocolProperty] -# flags: --strict-optional from typing import Protocol class P1(Protocol): @@ -3153,7 +3216,6 @@ D() # E: Cannot instantiate abstract class "D" with abstract attribute "meth" [builtins fixtures/exception.pyi] [case testEmptyBodyNoneCompatibleProtocol] -# flags: --strict-optional from abc import abstractmethod from typing import Any, Optional, Protocol, Union, overload from typing_extensions import TypeAlias diff --git a/test-data/unit/check-python310.test b/test-data/unit/check-python310.test index feedd02d82eb..0fe6a3d5a5cc 100644 --- a/test-data/unit/check-python310.test +++ b/test-data/unit/check-python310.test @@ -1140,7 +1140,6 @@ match m: reveal_type(a) [case testMatchRedefiningPatternGuard] -# flags: --strict-optional m: str match m: @@ -1373,7 +1372,7 @@ match m: reveal_type(m) # N: Revealed type is "__main__.Medal" [case testMatchNarrowUsingPatternGuardSpecialCase] -def f(x: int | str) -> int: # E: Missing return statement +def f(x: int | str) -> int: match x: case x if isinstance(x, str): return 0 @@ -1382,7 +1381,6 @@ def f(x: int | str) -> int: # E: Missing return statement [builtins fixtures/isinstance.pyi] [case testMatchNarrowDownUnionPartially] -# flags: --strict-optional def f(x: int | str) -> None: match x: @@ -1493,7 +1491,6 @@ def f(x: A) -> None: reveal_type(y) # N: Revealed type is "Union[__main__., __main__.]" [case testMatchWithBreakAndContinue] -# flags: --strict-optional def f(x: int | str | None) -> None: i = int() while i: @@ -1571,8 +1568,8 @@ class AnnAssign(stmt): value: str simple: int -reveal_type(AST.__match_args__) # N: Revealed type is "Tuple[]" -reveal_type(stmt.__match_args__) # N: Revealed type is "Tuple[]" +reveal_type(AST.__match_args__) # N: Revealed type is "Tuple[()]" +reveal_type(stmt.__match_args__) # N: Revealed type is "Tuple[()]" reveal_type(AnnAssign.__match_args__) # N: Revealed type is "Tuple[Literal['target']?, Literal['annotation']?, Literal['value']?, Literal['simple']?]" AnnAssign.__match_args__ = ('a', 'b', 'c', 'd') # E: Cannot assign to "__match_args__" @@ -1626,7 +1623,6 @@ def func(e: Union[str, tuple[str]]) -> None: [builtins fixtures/tuple.pyi] [case testMatchTupleOptionalNoCrash] -# flags: --strict-optional foo: tuple[int] | None match foo: case x,: @@ -1865,7 +1861,6 @@ def f() -> None: reveal_type(y.a) # N: Revealed type is "builtins.int" [case testNarrowedVariableInNestedModifiedInMatch] -# flags: --strict-optional from typing import Optional def match_stmt_error1(x: Optional[str]) -> None: @@ -1958,3 +1953,66 @@ def redefinition_bad(a: int): ... [builtins fixtures/primitives.pyi] + +[case testPatternMatchingClassPatternLocation] +# See https://github.com/python/mypy/issues/15496 +from some_missing_lib import DataFrame, Series # type: ignore[import] +from typing import TypeVar + +T = TypeVar("T", Series, DataFrame) + +def f(x: T) -> None: + match x: + case Series() | DataFrame(): # type: ignore[misc] + pass + +def f2(x: T) -> None: + match x: + case Series(): # type: ignore[misc] + pass + case DataFrame(): # type: ignore[misc] + pass +[builtins fixtures/primitives.pyi] + +[case testMatchGuardReachability] +# flags: --warn-unreachable +def f1(e: int) -> int: + match e: + case x if True: + return x + case _: + return 0 # E: Statement is unreachable + e = 0 # E: Statement is unreachable + + +def f2(e: int) -> int: + match e: + case x if bool(): + return x + case _: + return 0 + e = 0 # E: Statement is unreachable + +def f3(e: int | str | bytes) -> int: + match e: + case x if isinstance(x, int): + return x + case [x]: + return 0 # E: Statement is unreachable + case str(x): + return 0 + reveal_type(e) # N: Revealed type is "builtins.bytes" + return 0 + +def f4(e: int | str | bytes) -> int: + match e: + case int(x): + pass + case [x]: + return 0 # E: Statement is unreachable + case x if isinstance(x, str): + return 0 + reveal_type(e) # N: Revealed type is "Union[builtins.int, builtins.bytes]" + return 0 + +[builtins fixtures/primitives.pyi] diff --git a/test-data/unit/check-python312.test b/test-data/unit/check-python312.test new file mode 100644 index 000000000000..91aca7794071 --- /dev/null +++ b/test-data/unit/check-python312.test @@ -0,0 +1,59 @@ +[case test695TypeAlias] +type MyInt = int # E: PEP 695 type aliases are not yet supported + +def f(x: MyInt) -> MyInt: + return reveal_type(x) # N: Revealed type is "builtins.int" + +type MyList[T] = list[T] # E: PEP 695 type aliases are not yet supported \ + # E: Name "T" is not defined + +def g(x: MyList[int]) -> MyList[int]: # E: Variable "__main__.MyList" is not valid as a type \ + # N: See https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases + return reveal_type(x) # N: Revealed type is "MyList?[builtins.int]" + +[case test695Class] +class MyGen[T]: # E: PEP 695 generics are not yet supported + def __init__(self, x: T) -> None: # E: Name "T" is not defined + self.x = x + +def f(x: MyGen[int]): # E: "MyGen" expects no type arguments, but 1 given + reveal_type(x.x) # N: Revealed type is "Any" + +[case test695Function] +def f[T](x: T) -> T: # E: PEP 695 generics are not yet supported \ + # E: Name "T" is not defined + return reveal_type(x) # N: Revealed type is "Any" + +reveal_type(f(1)) # N: Revealed type is "Any" + +async def g[T](x: T) -> T: # E: PEP 695 generics are not yet supported \ + # E: Name "T" is not defined + return reveal_type(x) # N: Revealed type is "Any" + +reveal_type(g(1)) # E: Value of type "Coroutine[Any, Any, Any]" must be used \ + # N: Are you missing an await? \ + # N: Revealed type is "typing.Coroutine[Any, Any, Any]" + +[case test695TypeVar] +from typing import Callable +type Alias1[T: int] = list[T] # E: PEP 695 type aliases are not yet supported +type Alias2[**P] = Callable[P, int] # E: PEP 695 type aliases are not yet supported \ + # E: Value of type "int" is not indexable \ + # E: Name "P" is not defined +type Alias3[*Ts] = tuple[*Ts] # E: PEP 695 type aliases are not yet supported \ + # E: Type expected within [...] \ + # E: The type "Type[Tuple[Any, ...]]" is not generic and not indexable \ + # E: Name "Ts" is not defined + +class Cls1[T: int]: ... # E: PEP 695 generics are not yet supported +class Cls2[**P]: ... # E: PEP 695 generics are not yet supported +class Cls3[*Ts]: ... # E: PEP 695 generics are not yet supported + +def func1[T: int](x: T) -> T: ... # E: PEP 695 generics are not yet supported +def func2[**P](x: Callable[P, int]) -> Callable[P, str]: ... # E: PEP 695 generics are not yet supported \ + # E: The first argument to Callable must be a list of types, parameter specification, or "..." \ + # N: See https://mypy.readthedocs.io/en/stable/kinds_of_types.html#callable-types-and-lambdas \ + # E: Name "P" is not defined +def func3[*Ts](x: tuple[*Ts]) -> tuple[int, *Ts]: ... # E: PEP 695 generics are not yet supported \ + # E: Name "Ts" is not defined +[builtins fixtures/tuple.pyi] diff --git a/test-data/unit/check-python38.test b/test-data/unit/check-python38.test index 423daaf5ae8f..d83f29f2186a 100644 --- a/test-data/unit/check-python38.test +++ b/test-data/unit/check-python38.test @@ -223,7 +223,7 @@ h(arg=0) # E: Unexpected keyword argument "arg" for "h" i(arg=0) # E: Unexpected keyword argument "arg" [case testWalrus] -# flags: --strict-optional --python-version 3.8 +# flags: --python-version 3.8 from typing import NamedTuple, Optional, List from typing_extensions import Final @@ -427,7 +427,7 @@ else: [builtins fixtures/list.pyi] [case testWalrusConditionalTypeCheck] -# flags: --strict-optional --python-version 3.8 +# flags: --python-version 3.8 from typing import Optional maybe_str: Optional[str] @@ -729,7 +729,6 @@ def f1() -> None: [builtins fixtures/dict.pyi] [case testNarrowOnSelfInGeneric] -# flags: --strict-optional from typing import Generic, TypeVar, Optional T = TypeVar("T", int, str) @@ -741,8 +740,8 @@ class C(Generic[T]): reveal_type(y) return None [out] -main:10: note: Revealed type is "builtins.int" -main:10: note: Revealed type is "builtins.str" +main:9: note: Revealed type is "builtins.int" +main:9: note: Revealed type is "builtins.str" [case testTypeGuardWithPositionalOnlyArg] # flags: --python-version 3.8 @@ -778,7 +777,6 @@ class C: [builtins fixtures/list.pyi] [case testNarrowedVariableInNestedModifiedInWalrus] -# flags: --strict-optional from typing import Optional def walrus_with_nested_error(x: Optional[str]) -> None: diff --git a/test-data/unit/check-recursive-types.test b/test-data/unit/check-recursive-types.test index 75f2433c6d8c..84593933a2de 100644 --- a/test-data/unit/check-recursive-types.test +++ b/test-data/unit/check-recursive-types.test @@ -409,8 +409,8 @@ def local() -> None: x: L reveal_type(x) # N: Revealed type is "builtins.list[Union[builtins.int, Any]]" -S = Type[S] # E: Type[...] cannot contain another Type[...] -U = Type[Union[int, U]] # E: Type[...] cannot contain another Type[...] +S = Type[S] # E: Type[...] can't contain another Type[...] +U = Type[Union[int, U]] # E: Type[...] can't contain another Type[...] x: U reveal_type(x) # N: Revealed type is "Type[Any]" @@ -422,7 +422,6 @@ reveal_type(d) # N: Revealed type is "Any" [builtins fixtures/isinstancelist.pyi] [case testBasicRecursiveNamedTuple] -# flags: --strict-optional from typing import NamedTuple, Optional NT = NamedTuple("NT", [("x", Optional[NT]), ("y", int)]) @@ -457,7 +456,6 @@ reveal_type(f(tnt, nt)) # N: Revealed type is "builtins.tuple[Any, ...]" [builtins fixtures/tuple.pyi] [case testBasicRecursiveNamedTupleClass] -# flags: --strict-optional from typing import NamedTuple, Optional class NT(NamedTuple): @@ -684,7 +682,6 @@ itd2 = TD(x=0, y=TD(x=0, y=TD(x=0, y=None))) [typing fixtures/typing-typeddict.pyi] [case testRecursiveTypedDictMethods] -# flags: --strict-optional from typing import TypedDict class TD(TypedDict, total=False): @@ -787,7 +784,6 @@ reveal_type(std) # N: Revealed type is "TypedDict('__main__.STD', {'val': built [typing fixtures/typing-typeddict.pyi] [case testRecursiveClassLevelAlias] -# flags: --strict-optional from typing import Union, Sequence class A: @@ -937,3 +933,12 @@ x: A[int, str] if last is not None: reveal_type(last) # N: Revealed type is "Tuple[builtins.int, builtins.str, Union[Tuple[builtins.int, builtins.str, Union[..., None]], None]]" [builtins fixtures/tuple.pyi] + +[case testRecursiveAliasLiteral] +from typing import Tuple +from typing_extensions import Literal + +NotFilter = Tuple[Literal["not"], "NotFilter"] +n: NotFilter +reveal_type(n[1][1][0]) # N: Revealed type is "Literal['not']" +[builtins fixtures/tuple.pyi] diff --git a/test-data/unit/check-selftype.test b/test-data/unit/check-selftype.test index 53c24584cb73..d5024412ca97 100644 --- a/test-data/unit/check-selftype.test +++ b/test-data/unit/check-selftype.test @@ -509,6 +509,58 @@ class E: def __init_subclass__(cls) -> None: reveal_type(cls) # N: Revealed type is "Type[__main__.E]" +[case testSelfTypeNew_explicit] +from typing import TypeVar, Type + +T = TypeVar('T', bound='A') +class A: + @staticmethod + def __new__(cls: Type[T]) -> T: + return cls() + + @classmethod + def __init_subclass__(cls: Type[T]) -> None: + pass + +class B: + @staticmethod + def __new__(cls: Type[T]) -> T: # E: The erased type of self "Type[__main__.A]" is not a supertype of its class "Type[__main__.B]" + return cls() + + @classmethod + def __init_subclass__(cls: Type[T]) -> None: # E: The erased type of self "Type[__main__.A]" is not a supertype of its class "Type[__main__.B]" + pass + +class C: + @staticmethod + def __new__(cls: Type[C]) -> C: + return cls() + + @classmethod + def __init_subclass__(cls: Type[C]) -> None: + pass + +class D: + @staticmethod + def __new__(cls: D) -> D: # E: The erased type of self "__main__.D" is not a supertype of its class "Type[__main__.D]" + return cls + + @classmethod + def __init_subclass__(cls: D) -> None: # E: The erased type of self "__main__.D" is not a supertype of its class "Type[__main__.D]" + pass + +class E: + @staticmethod + def __new__(cls) -> E: + reveal_type(cls) # N: Revealed type is "Type[__main__.E]" + return cls() + + @classmethod + def __init_subclass__(cls) -> None: + reveal_type(cls) # N: Revealed type is "Type[__main__.E]" + +[builtins fixtures/classmethod.pyi] + [case testSelfTypePropertyUnion] from typing import Union class A: @@ -845,7 +897,8 @@ class Base(Protocol): class TweakFunc: def func(self: Base) -> int: - return reveal_type(super().func()) # N: Revealed type is "builtins.int" + return reveal_type(super().func()) # E: Call to abstract method "func" of "Base" with trivial body via super() is unsafe \ + # N: Revealed type is "builtins.int" class Good: def func(self) -> int: ... @@ -1217,14 +1270,14 @@ class AClass: ... def foo(x: Type[AClass]) -> None: - reveal_type(x.delete) # N: Revealed type is "Overload(def (id: builtins.int, id2: builtins.int) -> builtins.int, def (id: __main__.AClass, id2: None =) -> builtins.int)" + reveal_type(x.delete) # N: Revealed type is "Overload(def (id: builtins.int, id2: builtins.int) -> Union[builtins.int, None], def (id: __main__.AClass, id2: None =) -> Union[builtins.int, None])" y = x() - reveal_type(y.delete) # N: Revealed type is "Overload(def (id: builtins.int, id2: builtins.int) -> builtins.int, def (id: __main__.AClass, id2: None =) -> builtins.int)" + reveal_type(y.delete) # N: Revealed type is "Overload(def (id: builtins.int, id2: builtins.int) -> Union[builtins.int, None], def (id: __main__.AClass, id2: None =) -> Union[builtins.int, None])" y.delete(10, 20) y.delete(y) def bar(x: AClass) -> None: - reveal_type(x.delete) # N: Revealed type is "Overload(def (id: builtins.int, id2: builtins.int) -> builtins.int, def (id: __main__.AClass, id2: None =) -> builtins.int)" + reveal_type(x.delete) # N: Revealed type is "Overload(def (id: builtins.int, id2: builtins.int) -> Union[builtins.int, None], def (id: __main__.AClass, id2: None =) -> Union[builtins.int, None])" x.delete(10, 20) [builtins fixtures/classmethod.pyi] @@ -1431,7 +1484,7 @@ class C: return self class D(C): ... -reveal_type(C.meth) # N: Revealed type is "def [Self <: __main__.C] (self: Self`0) -> builtins.list[Self`0]" +reveal_type(C.meth) # N: Revealed type is "def [Self <: __main__.C] (self: Self`1) -> builtins.list[Self`1]" C.attr # E: Access to generic instance variables via class is ambiguous reveal_type(D().meth()) # N: Revealed type is "builtins.list[__main__.D]" reveal_type(D().attr) # N: Revealed type is "builtins.list[__main__.D]" @@ -1665,6 +1718,23 @@ class C: return cls() [builtins fixtures/classmethod.pyi] +[case testTypingSelfRedundantAllowed_pep585] +# flags: --python-version 3.9 +from typing import Self + +class C: + def f(self: Self) -> Self: + d: Defer + class Defer: ... + return self + + @classmethod + def g(cls: type[Self]) -> Self: + d: DeferAgain + class DeferAgain: ... + return cls() +[builtins fixtures/classmethod.pyi] + [case testTypingSelfRedundantWarning] # mypy: enable-error-code="redundant-self" @@ -1683,6 +1753,25 @@ class C: return cls() [builtins fixtures/classmethod.pyi] +[case testTypingSelfRedundantWarning_pep585] +# flags: --python-version 3.9 +# mypy: enable-error-code="redundant-self" + +from typing import Self + +class C: + def copy(self: Self) -> Self: # E: Redundant "Self" annotation for the first method argument + d: Defer + class Defer: ... + return self + + @classmethod + def g(cls: type[Self]) -> Self: # E: Redundant "Self" annotation for the first method argument + d: DeferAgain + class DeferAgain: ... + return cls() +[builtins fixtures/classmethod.pyi] + [case testTypingSelfAssertType] from typing import Self, assert_type @@ -1704,7 +1793,7 @@ class C: def bar(self) -> Self: ... def foo(self, x: S) -> Tuple[Self, S]: ... -reveal_type(C.foo) # N: Revealed type is "def [Self <: __main__.C, S] (self: Self`0, x: S`-1) -> Tuple[Self`0, S`-1]" +reveal_type(C.foo) # N: Revealed type is "def [Self <: __main__.C, S] (self: Self`1, x: S`2) -> Tuple[Self`1, S`2]" reveal_type(C().foo(42)) # N: Revealed type is "Tuple[__main__.C, builtins.int]" [builtins fixtures/tuple.pyi] @@ -1814,7 +1903,7 @@ class C: class D(C): ... -reveal_type(D.f) # N: Revealed type is "def [T] (T`-1) -> T`-1" +reveal_type(D.f) # N: Revealed type is "def [T] (T`1) -> T`1" reveal_type(D().f) # N: Revealed type is "def () -> __main__.D" [case testTypingSelfOnSuperTypeVarValues] @@ -1867,3 +1956,83 @@ class B: return B() # E: Incompatible return value type (got "B", expected "A") [builtins fixtures/isinstancelist.pyi] + +[case testAttributeOnSelfAttributeInSubclass] +from typing import List, Self + +class A: + x: Self + xs: List[Self] + +class B(A): + extra: int + + def meth(self) -> None: + reveal_type(self.x) # N: Revealed type is "Self`0" + reveal_type(self.xs[0]) # N: Revealed type is "Self`0" + reveal_type(self.x.extra) # N: Revealed type is "builtins.int" + reveal_type(self.xs[0].extra) # N: Revealed type is "builtins.int" +[builtins fixtures/list.pyi] + +[case testSelfTypesWithParamSpecExtract] +from typing import Any, Callable, Generic, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec("P") +F = TypeVar("F", bound=Callable[..., Any]) +class Example(Generic[F]): + def __init__(self, fn: F) -> None: + ... + def __call__(self: Example[Callable[P, Any]], *args: P.args, **kwargs: P.kwargs) -> None: + ... + +def test_fn(a: int, b: str) -> None: + ... + +example = Example(test_fn) +example() # E: Missing positional arguments "a", "b" in call to "__call__" of "Example" +example(1, "b") # OK +[builtins fixtures/list.pyi] + +[case testSelfTypesWithParamSpecInfer] +from typing import TypeVar, Protocol, Type, Callable +from typing_extensions import ParamSpec + +R = TypeVar("R", covariant=True) +P = ParamSpec("P") +class AsyncP(Protocol[P]): + def meth(self, *args: P.args, **kwargs: P.kwargs) -> None: + ... + +class Async: + @classmethod + def async_func(cls: Type[AsyncP[P]]) -> Callable[P, int]: + ... + +class Add(Async): + def meth(self, x: int, y: int) -> None: ... + +reveal_type(Add.async_func()) # N: Revealed type is "def (x: builtins.int, y: builtins.int) -> builtins.int" +reveal_type(Add().async_func()) # N: Revealed type is "def (x: builtins.int, y: builtins.int) -> builtins.int" +[builtins fixtures/classmethod.pyi] + +[case testSelfTypeMethodOnClassObject] +from typing import Self + +class Object: # Needed to mimic object in typeshed + ref: Self + +class Foo: + def foo(self) -> Self: + return self + +class Ben(Object): + MY_MAP = { + "foo": Foo.foo, + } + @classmethod + def doit(cls) -> Foo: + reveal_type(cls.MY_MAP) # N: Revealed type is "builtins.dict[builtins.str, def [Self <: __main__.Foo] (self: Self`4) -> Self`4]" + foo_method = cls.MY_MAP["foo"] + return foo_method(Foo()) +[builtins fixtures/isinstancelist.pyi] diff --git a/test-data/unit/check-serialize.test b/test-data/unit/check-serialize.test index 66d5d879ae68..81da94c0591c 100644 --- a/test-data/unit/check-serialize.test +++ b/test-data/unit/check-serialize.test @@ -740,7 +740,6 @@ main:4: note: Revealed type is "def (x: builtins.int) -> Tuple[builtins.int, fal -- [case testSerializeOptionalType] -# flags: --strict-optional import a [file a.py] import b @@ -1275,6 +1274,7 @@ main:2: error: Too many arguments for "f" main:2: error: Too many arguments for "f" [case testSerializeDummyType] +# flags: --no-strict-optional import a [file a.py] import b diff --git a/test-data/unit/check-singledispatch.test b/test-data/unit/check-singledispatch.test index 1bc34c6fdaab..1adec1575b7e 100644 --- a/test-data/unit/check-singledispatch.test +++ b/test-data/unit/check-singledispatch.test @@ -80,20 +80,6 @@ def g(arg: int) -> None: # E: Argument to register "str" is incompatible with ty [builtins fixtures/args.pyi] -[case testDispatchBasedOnTypeAnnotationsRequires37-xfail] -# flags: --python-version 3.6 -# the docs for singledispatch say that register didn't accept type annotations until python 3.7 -from functools import singledispatch - -@singledispatch -def f(arg) -> None: - pass -@f.register -def g(arg: int) -> None: # E: Singledispatch based on type annotations is only supported in Python 3.7 and greater - pass - -[builtins fixtures/args.pyi] - [case testTypePassedAsArgumentToRegister] from functools import singledispatch diff --git a/test-data/unit/check-statements.test b/test-data/unit/check-statements.test index 3cb8864f9207..023e2935a158 100644 --- a/test-data/unit/check-statements.test +++ b/test-data/unit/check-statements.test @@ -95,10 +95,10 @@ def f() -> Iterator[int]: [case testIfStatement] -a = None # type: A -a2 = None # type: A -a3 = None # type: A -b = None # type: bool +a: A +a2: A +a3: A +b: bool if a: a = b # E: Incompatible types in assignment (expression has type "bool", variable has type "A") elif a2: @@ -124,8 +124,8 @@ class A: pass [case testWhileStatement] -a = None # type: A -b = None # type: bool +a: A +b: bool while a: a = b # Fail else: @@ -142,13 +142,14 @@ main:7: error: Incompatible types in assignment (expression has type "bool", var [case testForStatement] class A: pass -a = None # type: A -b = None # type: object +a: A +b: object for a in [A()]: a = b # E: Incompatible types in assignment (expression has type "object", variable has type "A") else: a = b # E: Incompatible types in assignment (expression has type "object", variable has type "A") [builtins fixtures/list.pyi] + [case testBreakStatement] import typing while None: @@ -205,8 +206,9 @@ for a, b in x: # type: int, int, int # E: Incompatible number of tuple items [case testPlusAssign] - -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C a += b # Fail b += a # Fail c += a # Fail @@ -221,13 +223,14 @@ class B: class C: pass [builtins fixtures/tuple.pyi] [out] -main:3: error: Unsupported operand types for + ("A" and "B") -main:4: error: Incompatible types in assignment (expression has type "C", variable has type "B") -main:5: error: Unsupported left operand type for + ("C") +main:4: error: Unsupported operand types for + ("A" and "B") +main:5: error: Incompatible types in assignment (expression has type "C", variable has type "B") +main:6: error: Unsupported left operand type for + ("C") [case testMinusAssign] - -a, b, c = None, None, None # type: (A, B, C) +a: A +b: B +c: C a -= b # Fail b -= a # Fail c -= a # Fail @@ -242,13 +245,13 @@ class B: class C: pass [builtins fixtures/tuple.pyi] [out] -main:3: error: Unsupported operand types for - ("A" and "B") -main:4: error: Incompatible types in assignment (expression has type "C", variable has type "B") -main:5: error: Unsupported left operand type for - ("C") +main:4: error: Unsupported operand types for - ("A" and "B") +main:5: error: Incompatible types in assignment (expression has type "C", variable has type "B") +main:6: error: Unsupported left operand type for - ("C") [case testMulAssign] - -a, c = None, None # type: (A, C) +a: A +c: C a *= a # Fail c *= a # Fail a *= c @@ -263,7 +266,8 @@ main:3: error: Unsupported operand types for * ("A" and "A") main:4: error: Unsupported left operand type for * ("C") [case testMatMulAssign] -a, c = None, None # type: (A, C) +a: A +c: C a @= a # E: Unsupported operand types for @ ("A" and "A") c @= a # E: Unsupported left operand type for @ ("C") a @= c @@ -275,8 +279,8 @@ class C: pass [builtins fixtures/tuple.pyi] [case testDivAssign] - -a, c = None, None # type: (A, C) +a: A +c: C a /= a # Fail c /= a # Fail a /= c @@ -291,8 +295,8 @@ main:3: error: Unsupported operand types for / ("A" and "A") main:4: error: Unsupported left operand type for / ("C") [case testPowAssign] - -a, c = None, None # type: (A, C) +a: A +c: C a **= a # Fail c **= a # Fail a **= c @@ -307,8 +311,8 @@ main:3: error: Unsupported operand types for ** ("A" and "A") main:4: error: Unsupported left operand type for ** ("C") [case testSubtypesInOperatorAssignment] - -a, b = None, None # type: (A, B) +a: A +b: B b += b b += a a += b @@ -321,8 +325,8 @@ class B(A): pass [out] [case testAdditionalOperatorsInOpAssign] - -a, c = None, None # type: (A, C) +a: A +c: C a &= a # Fail a >>= a # Fail a //= a # Fail @@ -389,9 +393,9 @@ main:2: error: Unsupported left operand type for + ("None") [case testRaiseStatement] -e = None # type: BaseException -f = None # type: MyError -a = None # type: A +e: BaseException +f: MyError +a: A raise a # Fail raise e raise f @@ -405,11 +409,16 @@ main:5: error: Exception must be derived from BaseException class A: pass class MyError(BaseException): pass def f(): pass -raise BaseException -raise MyError -raise A # E: Exception must be derived from BaseException -raise object # E: Exception must be derived from BaseException -raise f # E: Exception must be derived from BaseException +if object(): + raise BaseException +if object(): + raise MyError +if object(): + raise A # E: Exception must be derived from BaseException +if object(): + raise object # E: Exception must be derived from BaseException +if object(): + raise f # E: Exception must be derived from BaseException [builtins fixtures/exception.pyi] [case testRaiseClassObjectCustomInit] @@ -425,23 +434,35 @@ class MyKwError(Exception): class MyErrorWithDefault(Exception): def __init__(self, optional=1) -> None: ... -raise BaseException -raise Exception -raise BaseException(1) -raise Exception(2) -raise MyBaseError(4) -raise MyError(5, 6) -raise MyKwError(kwonly=7) -raise MyErrorWithDefault(8) -raise MyErrorWithDefault -raise MyBaseError # E: Too few arguments for "MyBaseError" -raise MyError # E: Too few arguments for "MyError" -raise MyKwError # E: Missing named argument "kwonly" for "MyKwError" +if object(): + raise BaseException +if object(): + raise Exception +if object(): + raise BaseException(1) +if object(): + raise Exception(2) +if object(): + raise MyBaseError(4) +if object(): + raise MyError(5, 6) +if object(): + raise MyKwError(kwonly=7) +if object(): + raise MyErrorWithDefault(8) +if object(): + raise MyErrorWithDefault +if object(): + raise MyBaseError # E: Too few arguments for "MyBaseError" +if object(): + raise MyError # E: Too few arguments for "MyError" +if object(): + raise MyKwError # E: Missing named argument "kwonly" for "MyKwError" [builtins fixtures/exception.pyi] [case testRaiseExceptionType] import typing -x = None # type: typing.Type[BaseException] +x: typing.Type[BaseException] raise x [builtins fixtures/exception.pyi] @@ -453,26 +474,30 @@ raise x # E: Exception must be derived from BaseException [case testRaiseUnion] import typing -x = None # type: typing.Union[BaseException, typing.Type[BaseException]] +x: typing.Union[BaseException, typing.Type[BaseException]] raise x [builtins fixtures/exception.pyi] [case testRaiseNonExceptionUnionFails] import typing -x = None # type: typing.Union[BaseException, int] +x: typing.Union[BaseException, int] raise x # E: Exception must be derived from BaseException [builtins fixtures/exception.pyi] [case testRaiseFromStatement] -e = None # type: BaseException -f = None # type: MyError -a = None # type: A -x = None # type: BaseException +e: BaseException +f: MyError +a: A +x: BaseException del x -raise e from a # E: Exception must be derived from BaseException -raise e from e -raise e from f -raise e from x # E: Trying to read deleted variable "x" +if object(): + raise e from a # E: Exception must be derived from BaseException +if object(): + raise e from e +if object(): + raise e from f +if object(): + raise e from x # E: Trying to read deleted variable "x" class A: pass class MyError(BaseException): pass [builtins fixtures/exception.pyi] @@ -482,11 +507,16 @@ import typing class A: pass class MyError(BaseException): pass def f(): pass -raise BaseException from BaseException -raise BaseException from MyError -raise BaseException from A # E: Exception must be derived from BaseException -raise BaseException from object # E: Exception must be derived from BaseException -raise BaseException from f # E: Exception must be derived from BaseException +if object(): + raise BaseException from BaseException +if object(): + raise BaseException from MyError +if object(): + raise BaseException from A # E: Exception must be derived from BaseException +if object(): + raise BaseException from object # E: Exception must be derived from BaseException +if object(): + raise BaseException from f # E: Exception must be derived from BaseException [builtins fixtures/exception.pyi] [case testTryFinallyStatement] @@ -505,27 +535,30 @@ main:5: error: Incompatible types in assignment (expression has type "object", v try: pass except BaseException as e: - a, o = None, None # type: (BaseException, object) + a: BaseException + o: object e = a e = o # Fail class A: pass class B: pass [builtins fixtures/exception.pyi] [out] -main:7: error: Incompatible types in assignment (expression has type "object", variable has type "BaseException") +main:8: error: Incompatible types in assignment (expression has type "object", variable has type "BaseException") [case testTypeErrorInBlock] class A: pass class B: pass -while object: - x = None # type: A +while int(): + x: A if int(): x = object() # E: Incompatible types in assignment (expression has type "object", variable has type "A") x = B() # E: Incompatible types in assignment (expression has type "B", variable has type "A") + [case testTypeErrorInvolvingBaseException] class A: pass -x, a = None, None # type: (BaseException, A) +x: BaseException +a: A if int(): a = BaseException() # E: Incompatible types in assignment (expression has type "BaseException", variable has type "A") if int(): @@ -1042,7 +1075,8 @@ main:12: error: Exception type must be derived from BaseException (or be a tuple [case testDelStmtWithIndex] -a, b = None, None # type: (A, B) +a: A +b: B del b[a] del b[b] # E: Argument 1 to "__delitem__" of "B" has incompatible type "B"; expected "A" del a[a] # E: "A" has no attribute "__delitem__" @@ -1965,6 +1999,7 @@ def f() -> None: [out] [case testChainedAssignmentWithType] +# flags: --no-strict-optional x = y = None # type: int if int(): x = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") @@ -1982,7 +2017,8 @@ if int(): [case testAssignListToStarExpr] from typing import List -bs, cs = None, None # type: List[A], List[B] +bs: List[A] +cs: List[B] if int(): *bs, b = bs if int(): @@ -2212,3 +2248,17 @@ main:1: error: Module "typing" has no attribute "_FutureFeatureFixture" main:1: note: Use `from typing_extensions import _FutureFeatureFixture` instead main:1: note: See https://mypy.readthedocs.io/en/stable/runtime_troubles.html#using-new-additions-to-the-typing-module [builtins fixtures/tuple.pyi] + +[case testNoCrashOnBreakOutsideLoopFunction] +def foo(): + for x in [1, 2]: + def inner(): + break # E: "break" outside loop +[builtins fixtures/list.pyi] + +[case testNoCrashOnBreakOutsideLoopClass] +class Outer: + for x in [1, 2]: + class Inner: + break # E: "break" outside loop +[builtins fixtures/list.pyi] diff --git a/test-data/unit/check-super.test b/test-data/unit/check-super.test index b3379e505be7..48a0a0250ecf 100644 --- a/test-data/unit/check-super.test +++ b/test-data/unit/check-super.test @@ -11,7 +11,8 @@ class B: def f(self) -> 'B': pass class A(B): def f(self) -> 'A': - a, b = None, None # type: (A, B) + a: A + b: B if int(): a = super().f() # E: Incompatible types in assignment (expression has type "B", variable has type "A") a = super().g() # E: "g" undefined in superclass @@ -26,7 +27,8 @@ class B: def f(self, y: 'A') -> None: pass class A(B): def f(self, y: Any) -> None: - a, b = None, None # type: (A, B) + a: A + b: B super().f(b) # E: Argument 1 to "f" of "B" has incompatible type "B"; expected "A" super().f(a) self.f(b) @@ -35,6 +37,7 @@ class A(B): [out] [case testAccessingSuperInit] +# flags: --no-strict-optional import typing class B: def __init__(self, x: A) -> None: pass @@ -90,7 +93,7 @@ class B(A): def __new__(cls, x: int, y: str = '') -> 'B': super().__new__(cls, 1) super().__new__(cls, 1, '') # E: Too many arguments for "__new__" of "A" - return None + return cls(1) B('') # E: Argument 1 to "B" has incompatible type "str"; expected "int" B(1) B(1, 'x') diff --git a/test-data/unit/check-tuples.test b/test-data/unit/check-tuples.test index e843532a2560..cff261774663 100644 --- a/test-data/unit/check-tuples.test +++ b/test-data/unit/check-tuples.test @@ -4,11 +4,11 @@ [case testTupleAssignmentWithTupleTypes] from typing import Tuple -t1 = None # type: Tuple[A] -t2 = None # type: Tuple[B] -t3 = None # type: Tuple[A, A] -t4 = None # type: Tuple[A, B] -t5 = None # type: Tuple[B, A] +t1: Tuple[A] +t2: Tuple[B] +t3: Tuple[A, A] +t4: Tuple[A, B] +t5: Tuple[B, A] if int(): t1 = t2 # E: Incompatible types in assignment (expression has type "Tuple[B]", variable has type "Tuple[A]") @@ -39,9 +39,9 @@ class B: pass [case testTupleSubtyping] from typing import Tuple -t1 = None # type: Tuple[A, A] -t2 = None # type: Tuple[A, B] -t3 = None # type: Tuple[B, A] +t1: Tuple[A, A] +t2: Tuple[A, B] +t3: Tuple[B, A] if int(): t2 = t1 # E: Incompatible types in assignment (expression has type "Tuple[A, A]", variable has type "Tuple[A, B]") @@ -57,6 +57,7 @@ class B(A): pass [builtins fixtures/tuple.pyi] [case testTupleCompatibilityWithOtherTypes] +# flags: --no-strict-optional from typing import Tuple a, o = None, None # type: (A, object) t = None # type: Tuple[A, A] @@ -80,8 +81,8 @@ class A: pass [case testNestedTupleTypes] from typing import Tuple -t1 = None # type: Tuple[A, Tuple[A, A]] -t2 = None # type: Tuple[B, Tuple[B, B]] +t1: Tuple[A, Tuple[A, A]] +t2: Tuple[B, Tuple[B, B]] if int(): t2 = t1 # E: Incompatible types in assignment (expression has type "Tuple[A, Tuple[A, A]]", variable has type "Tuple[B, Tuple[B, B]]") @@ -94,8 +95,8 @@ class B(A): pass [case testNestedTupleTypes2] from typing import Tuple -t1 = None # type: Tuple[A, Tuple[A, A]] -t2 = None # type: Tuple[B, Tuple[B, B]] +t1: Tuple[A, Tuple[A, A]] +t2: Tuple[B, Tuple[B, B]] if int(): t2 = t1 # E: Incompatible types in assignment (expression has type "Tuple[A, Tuple[A, A]]", variable has type "Tuple[B, Tuple[B, B]]") @@ -108,8 +109,8 @@ class B(A): pass [case testSubtypingWithNamedTupleType] from typing import Tuple -t1 = None # type: Tuple[A, A] -t2 = None # type: tuple +t1: Tuple[A, A] +t2: tuple if int(): t1 = t2 # E: Incompatible types in assignment (expression has type "Tuple[Any, ...]", variable has type "Tuple[A, A]") @@ -120,6 +121,7 @@ class A: pass [builtins fixtures/tuple.pyi] [case testTupleInitializationWithNone] +# flags: --no-strict-optional from typing import Tuple t = None # type: Tuple[A, A] t = None @@ -132,6 +134,7 @@ class A: pass [case testTupleExpressions] +# flags: --no-strict-optional from typing import Tuple t1 = None # type: tuple t2 = None # type: Tuple[A] @@ -140,7 +143,7 @@ t3 = None # type: Tuple[A, B] a, b, c = None, None, None # type: (A, B, C) if int(): - t2 = () # E: Incompatible types in assignment (expression has type "Tuple[]", variable has type "Tuple[A]") + t2 = () # E: Incompatible types in assignment (expression has type "Tuple[()]", variable has type "Tuple[A]") if int(): t2 = (a, a) # E: Incompatible types in assignment (expression has type "Tuple[A, A]", variable has type "Tuple[A]") if int(): @@ -177,12 +180,13 @@ def f() -> None: pass [case testIndexingTuples] from typing import Tuple -t1 = None # type: Tuple[A, B] -t2 = None # type: Tuple[A] -t3 = None # type: Tuple[A, B, C, D, E] -a, b = None, None # type: (A, B) -x = None # type: Tuple[A, B, C] -y = None # type: Tuple[A, C, E] +t1: Tuple[A, B] +t2: Tuple[A] +t3: Tuple[A, B, C, D, E] +a: A +b: B +x: Tuple[A, B, C] +y: Tuple[A, C, E] n = 0 if int(): @@ -221,9 +225,10 @@ class E: pass [case testIndexingTuplesWithNegativeIntegers] from typing import Tuple -t1 = None # type: Tuple[A, B] -t2 = None # type: Tuple[A] -a, b = None, None # type: A, B +t1: Tuple[A, B] +t2: Tuple[A] +a: A +b: B if int(): a = t1[-1] # E: Incompatible types in assignment (expression has type "B", variable has type "A") @@ -251,7 +256,7 @@ from typing import Tuple class A: pass class B: pass -t = None # type: Tuple[A, B] +t: Tuple[A, B] n = 0 t[0] = A() # E: Unsupported target for indexed assignment ("Tuple[A, B]") @@ -265,6 +270,7 @@ t[n] = A() # E: Unsupported target for indexed assignment ("Tuple[A, B]") [case testMultipleAssignmentWithTuples] +# flags: --no-strict-optional from typing import Tuple t1 = None # type: Tuple[A, B] t2 = None # type: Tuple[A, B, A] @@ -291,6 +297,7 @@ class B: pass [builtins fixtures/tuple.pyi] [case testMultipleAssignmentWithSquareBracketTuples] +# flags: --no-strict-optional from typing import Tuple def avoid_confusing_test_parser() -> None: @@ -322,8 +329,8 @@ class B: pass [case testMultipleAssignmentWithInvalidNumberOfValues] from typing import Tuple -t1 = None # type: Tuple[A, A, A] -a = None # type: A +t1: Tuple[A, A, A] +a: A a, a = t1 # E: Too many values to unpack (2 expected, 3 provided) a, a, a, a = t1 # E: Need more than 3 values to unpack (4 expected) @@ -334,8 +341,8 @@ class A: pass [builtins fixtures/tuple.pyi] [case testMultipleAssignmentWithTupleExpressionRvalue] - -a, b = None, None # type: (A, B) +a: A +b: B if int(): a, b = a, a # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -354,7 +361,8 @@ class B: pass [builtins fixtures/tuple.pyi] [case testSubtypingInMultipleAssignment] -a, b = None, None # type: (A, B) +a: A +b: B if int(): b, b = a, b # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -371,7 +379,7 @@ class B(A): pass [builtins fixtures/tuple.pyi] [case testInitializationWithMultipleValues] - +# flags: --no-strict-optional a, b = None, None # type: (A, B) a1, b1 = a, a # type: (A, B) # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -387,8 +395,8 @@ class B: pass [builtins fixtures/tuple.pyi] [case testMultipleAssignmentWithNonTupleRvalue] - -a, b = None, None # type: (A, B) +a: A +b: B def f(): pass a, b = None # E: "None" object is not iterable @@ -400,9 +408,10 @@ class B: pass [builtins fixtures/tuple.pyi] [case testMultipleAssignmentWithIndexedLvalues] - -a, b = None, None # type: (A, B) -aa, bb = None, None # type: (AA, BB) +a: A +b: B +aa: AA +bb: BB a[a], b[b] = a, bb # E: Incompatible types in assignment (expression has type "A", target has type "AA") a[a], b[b] = aa, b # E: Incompatible types in assignment (expression has type "B", target has type "BB") @@ -420,6 +429,7 @@ class BB: pass [builtins fixtures/tuple.pyi] [case testMultipleDeclarationWithParentheses] +# flags: --no-strict-optional (a, b) = (None, None) # type: int, str if int(): a = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") @@ -430,8 +440,8 @@ if int(): [builtins fixtures/tuple.pyi] [case testMultipleAssignmentWithExtraParentheses] - -a, b = None, None # type: (A, B) +a: A +b: B if int(): (a, b) = (a, a) # E: Incompatible types in assignment (expression has type "A", variable has type "B") @@ -458,6 +468,7 @@ class B: pass [builtins fixtures/tuple.pyi] [case testMultipleAssignmentUsingSingleTupleType] +# flags: --no-strict-optional from typing import Tuple a, b = None, None # type: Tuple[int, str] if int(): @@ -490,6 +501,7 @@ aa, bb, *cc = t # E: Need type annotation for "cc" (hint: "cc: List[] = . [builtins fixtures/list.pyi] [case testAssignmentToStarAnnotation] +# flags: --no-strict-optional from typing import List li, lo = None, None # type: List[int], List[object] a, b, *c = 1, 2 # type: int, int, List[int] @@ -501,7 +513,7 @@ if int(): [case testAssignmentToStarCount1] from typing import List -ca = None # type: List[int] +ca: List[int] c = [1] if int(): a, b, *c = 1, # E: Need more than 1 value to unpack (2 expected) @@ -515,7 +527,7 @@ if int(): [case testAssignmentToStarCount2] from typing import List -ca = None # type: List[int] +ca: List[int] t1 = 1, t2 = 1, 2 t3 = 1, 2, 3 @@ -541,7 +553,7 @@ c = a c = q [case testAssignmentToComplexStar] from typing import List -li = None # type: List[int] +li: List[int] if int(): a, *(li) = 1, a, *(b, c) = 1, 2 # E: Need more than 1 value to unpack (2 expected) @@ -553,9 +565,9 @@ if int(): [case testAssignmentToStarFromTupleType] from typing import List, Tuple -li = None # type: List[int] -la = None # type: List[A] -ta = None # type: Tuple[A, A, A] +li: List[int] +la: List[A] +ta: Tuple[A, A, A] if int(): a, *la = ta if int(): @@ -573,8 +585,8 @@ class A: pass [case testAssignmentToStarFromTupleInference] from typing import List class A: pass -li = None # type: List[int] -la = None # type: List[A] +li: List[int] +la: List[A] a, *l = A(), A() if int(): l = li # E: Incompatible types in assignment (expression has type "List[int]", variable has type "List[A]") @@ -588,8 +600,8 @@ from typing import List class A: pass -li = None # type: List[int] -la = None # type: List[A] +li: List[int] +la: List[A] a, *l = [A(), A()] if int(): l = li # E: Incompatible types in assignment (expression has type "List[int]", variable has type "List[A]") @@ -600,9 +612,9 @@ if int(): [case testAssignmentToStarFromTupleTypeInference] from typing import List, Tuple -li = None # type: List[int] -la = None # type: List[A] -ta = None # type: Tuple[A, A, A] +li: List[int] +la: List[A] +ta: Tuple[A, A, A] a, *l = ta if int(): l = li # E: Incompatible types in assignment (expression has type "List[int]", variable has type "List[A]") @@ -615,8 +627,8 @@ class A: pass [case testAssignmentToStarFromListTypeInference] from typing import List -li = None # type: List[int] -la = None # type: List[A] +li: List[int] +la: List[A] a, *l = la if int(): l = li # E: Incompatible types in assignment (expression has type "List[int]", variable has type "List[A]") @@ -656,9 +668,12 @@ reveal_type(e2) # N: Revealed type is "builtins.list[builtins.int]" [case testNestedTupleAssignment1] - -a1, b1, c1 = None, None, None # type: (A, B, C) -a2, b2, c2 = None, None, None # type: (A, B, C) +a1: A +a2: A +b1: B +b2: B +c1: C +c2: C if int(): a1, (b1, c1) = a2, (b2, c2) @@ -673,9 +688,12 @@ class C: pass [builtins fixtures/tuple.pyi] [case testNestedTupleAssignment2] - -a1, b1, c1 = None, None, None # type: (A, B, C) -a2, b2, c2 = None, None, None # type: (A, B, C) +a1: A +a2: A +b1: B +b2: B +c1: C +c2: C t = a1, b1 if int(): @@ -714,7 +732,7 @@ class A: def __add__(self, x: 'A') -> 'A': pass def f(x: 'A') -> None: pass -a = None # type: A +a: A (a, a) + a # E: Unsupported operand types for + ("Tuple[A, A]" and "A") a + (a, a) # E: Unsupported operand types for + ("A" and "Tuple[A, A]") @@ -724,7 +742,7 @@ f((a, a)) # E: Argument 1 to "f" has incompatible type "Tuple[A, A]"; expected [case testLargeTuplesInErrorMessages] -a = None # type: LongTypeName +a: LongTypeName a + (a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a) # Fail class LongTypeName: @@ -740,7 +758,7 @@ main:3: error: Unsupported operand types for + ("LongTypeName" and "Tuple[LongTy [case testTupleMethods] from typing import Tuple -t = None # type: Tuple[int, str] +t: Tuple[int, str] i = 0 s = '' b = bool() @@ -869,7 +887,7 @@ class A(tuple): pass import m [file m.pyi] from typing import Tuple -a = None # type: A +a: A class A(Tuple[int, str]): pass x, y = a x() # E: "int" not callable @@ -932,14 +950,14 @@ fb(aa) # E: Argument 1 to "fb" has incompatible type "Tuple[A, A]"; expected "Tu [case testSubtypingTupleIsContainer] from typing import Container -a = None # type: Container[str] +a: Container[str] a = () [typing fixtures/typing-full.pyi] [builtins fixtures/tuple.pyi] [case testSubtypingTupleIsSized] from typing import Sized -a = None # type: Sized +a: Sized a = () [typing fixtures/typing-medium.pyi] [builtins fixtures/tuple.pyi] @@ -1046,8 +1064,8 @@ class B1(A): pass class B2(A): pass class C: pass -x = None # type: Tuple[A, ...] -y = None # type: Tuple[Union[B1, C], Union[B2, C]] +x: Tuple[A, ...] +y: Tuple[Union[B1, C], Union[B2, C]] def g(x: T) -> Tuple[T, T]: return (x, x) @@ -1063,13 +1081,13 @@ from typing import Tuple class A: pass class B(A): pass -fixtup = None # type: Tuple[B, B] +fixtup: Tuple[B, B] -vartup_b = None # type: Tuple[B, ...] +vartup_b: Tuple[B, ...] reveal_type(fixtup if int() else vartup_b) # N: Revealed type is "builtins.tuple[__main__.B, ...]" reveal_type(vartup_b if int() else fixtup) # N: Revealed type is "builtins.tuple[__main__.B, ...]" -vartup_a = None # type: Tuple[A, ...] +vartup_a: Tuple[A, ...] reveal_type(fixtup if int() else vartup_a) # N: Revealed type is "builtins.tuple[__main__.A, ...]" reveal_type(vartup_a if int() else fixtup) # N: Revealed type is "builtins.tuple[__main__.A, ...]" @@ -1083,13 +1101,13 @@ from typing import Tuple, List class A: pass class B(A): pass -fixtup = None # type: Tuple[B, B] +fixtup: Tuple[B, B] -lst_b = None # type: List[B] +lst_b: List[B] reveal_type(fixtup if int() else lst_b) # N: Revealed type is "typing.Sequence[__main__.B]" reveal_type(lst_b if int() else fixtup) # N: Revealed type is "typing.Sequence[__main__.B]" -lst_a = None # type: List[A] +lst_a: List[A] reveal_type(fixtup if int() else lst_a) # N: Revealed type is "typing.Sequence[__main__.A]" reveal_type(lst_a if int() else fixtup) # N: Revealed type is "typing.Sequence[__main__.A]" @@ -1103,15 +1121,15 @@ class A: pass empty = () -fixtup = None # type: Tuple[A] +fixtup: Tuple[A] reveal_type(fixtup if int() else empty) # N: Revealed type is "builtins.tuple[__main__.A, ...]" reveal_type(empty if int() else fixtup) # N: Revealed type is "builtins.tuple[__main__.A, ...]" -vartup = None # type: Tuple[A, ...] +vartup: Tuple[A, ...] reveal_type(empty if int() else vartup) # N: Revealed type is "builtins.tuple[__main__.A, ...]" reveal_type(vartup if int() else empty) # N: Revealed type is "builtins.tuple[__main__.A, ...]" -lst = None # type: List[A] +lst: List[A] reveal_type(empty if int() else lst) # N: Revealed type is "typing.Sequence[__main__.A]" reveal_type(lst if int() else empty) # N: Revealed type is "typing.Sequence[__main__.A]" @@ -1128,9 +1146,9 @@ class NTup(NamedTuple): class SubTuple(Tuple[bool]): ... class SubVarTuple(Tuple[int, ...]): ... -ntup = None # type: NTup -subtup = None # type: SubTuple -vartup = None # type: SubVarTuple +ntup: NTup +subtup: SubTuple +vartup: SubVarTuple reveal_type(ntup if int() else vartup) # N: Revealed type is "builtins.tuple[builtins.int, ...]" reveal_type(subtup if int() else vartup) # N: Revealed type is "builtins.tuple[builtins.int, ...]" @@ -1141,8 +1159,8 @@ reveal_type(subtup if int() else vartup) # N: Revealed type is "builtins.tuple[ [case testTupleJoinIrregular] from typing import Tuple -tup1 = None # type: Tuple[bool, int] -tup2 = None # type: Tuple[bool] +tup1: Tuple[bool, int] +tup2: Tuple[bool] reveal_type(tup1 if int() else tup2) # N: Revealed type is "builtins.tuple[builtins.int, ...]" reveal_type(tup2 if int() else tup1) # N: Revealed type is "builtins.tuple[builtins.int, ...]" @@ -1168,9 +1186,9 @@ class NTup2(NamedTuple): class SubTuple(Tuple[bool, int, int]): ... -tup1 = None # type: NTup1 -tup2 = None # type: NTup2 -subtup = None # type: SubTuple +tup1: NTup1 +tup2: NTup2 +subtup: SubTuple reveal_type(tup1 if int() else tup2) # N: Revealed type is "builtins.tuple[builtins.bool, ...]" reveal_type(tup2 if int() else tup1) # N: Revealed type is "builtins.tuple[builtins.bool, ...]" @@ -1226,9 +1244,9 @@ f(0) # E: Argument 1 to "f" has incompatible type "int"; expected "Tuple[Any, . from typing import Tuple def f(a: Tuple[()]) -> None: pass f(()) -f((1,)) # E: Argument 1 to "f" has incompatible type "Tuple[int]"; expected "Tuple[]" -f(('', '')) # E: Argument 1 to "f" has incompatible type "Tuple[str, str]"; expected "Tuple[]" -f(0) # E: Argument 1 to "f" has incompatible type "int"; expected "Tuple[]" +f((1,)) # E: Argument 1 to "f" has incompatible type "Tuple[int]"; expected "Tuple[()]" +f(('', '')) # E: Argument 1 to "f" has incompatible type "Tuple[str, str]"; expected "Tuple[()]" +f(0) # E: Argument 1 to "f" has incompatible type "int"; expected "Tuple[()]" [builtins fixtures/tuple.pyi] [case testNonliteralTupleIndex] @@ -1367,42 +1385,36 @@ reveal_type(a + b) # N: Revealed type is "Tuple[builtins.int, builtins.str, bui from typing import Tuple # long initializer assignment with few mismatches -t: Tuple[int, ...] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", 11) \ - # E: Incompatible types in assignment (3 tuple items are incompatible) \ - # N: Expression tuple item 8 has type "str"; "int" expected; \ - # N: Expression tuple item 9 has type "str"; "int" expected; \ - # N: Expression tuple item 10 has type "str"; "int" expected; +t: Tuple[int, ...] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", 11) # E: Incompatible types in assignment (3 tuple items are incompatible) \ + # N: Expression tuple item 8 has type "str"; "int" expected; \ + # N: Expression tuple item 9 has type "str"; "int" expected; \ + # N: Expression tuple item 10 has type "str"; "int" expected; # long initializer assignment with more mismatches -t1: Tuple[int, ...] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", "str") \ - # E: Incompatible types in assignment (4 tuple items are incompatible; 1 items are omitted) \ - # N: Expression tuple item 8 has type "str"; "int" expected; \ - # N: Expression tuple item 9 has type "str"; "int" expected; \ - # N: Expression tuple item 10 has type "str"; "int" expected; +t1: Tuple[int, ...] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", "str") # E: Incompatible types in assignment (4 tuple items are incompatible; 1 items are omitted) \ + # N: Expression tuple item 8 has type "str"; "int" expected; \ + # N: Expression tuple item 9 has type "str"; "int" expected; \ + # N: Expression tuple item 10 has type "str"; "int" expected; # short tuple initializer assignment -t2: Tuple[int, ...] = (1, 2, "s", 4) \ - # E: Incompatible types in assignment (expression has type "Tuple[int, int, str, int]", variable has type "Tuple[int, ...]") +t2: Tuple[int, ...] = (1, 2, "s", 4) # E: Incompatible types in assignment (expression has type "Tuple[int, int, str, int]", variable has type "Tuple[int, ...]") # long initializer assignment with few mismatches, no ellipsis -t3: Tuple[int, int, int, int, int, int, int, int, int, int, int, int] = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "str", "str") \ - # E: Incompatible types in assignment (2 tuple items are incompatible) \ - # N: Expression tuple item 10 has type "str"; "int" expected; \ - # N: Expression tuple item 11 has type "str"; "int" expected; +t3: Tuple[int, int, int, int, int, int, int, int, int, int, int, int] = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "str", "str") # E: Incompatible types in assignment (2 tuple items are incompatible) \ + # N: Expression tuple item 10 has type "str"; "int" expected; \ + # N: Expression tuple item 11 has type "str"; "int" expected; # long initializer assignment with more mismatches, no ellipsis -t4: Tuple[int, int, int, int, int, int, int, int, int, int, int, int] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", "str") \ - # E: Incompatible types in assignment (4 tuple items are incompatible; 1 items are omitted) \ - # N: Expression tuple item 8 has type "str"; "int" expected; \ - # N: Expression tuple item 9 has type "str"; "int" expected; \ - # N: Expression tuple item 10 has type "str"; "int" expected; +t4: Tuple[int, int, int, int, int, int, int, int, int, int, int, int] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", "str") # E: Incompatible types in assignment (4 tuple items are incompatible; 1 items are omitted) \ + # N: Expression tuple item 8 has type "str"; "int" expected; \ + # N: Expression tuple item 9 has type "str"; "int" expected; \ + # N: Expression tuple item 10 has type "str"; "int" expected; # short tuple initializer assignment, no ellipsis t5: Tuple[int, int] = (1, 2, "s", 4) # E: Incompatible types in assignment (expression has type "Tuple[int, int, str, int]", variable has type "Tuple[int, int]") # long initializer assignment with mismatched pairs -t6: Tuple[int, int, int, int, int, int, int, int, int, int, int, int] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", "str", 1, 1, 1, 1, 1) \ - # E: Incompatible types in assignment (expression has type Tuple[int, int, ... <15 more items>], variable has type Tuple[int, int, ... <10 more items>]) +t6: Tuple[int, int, int, int, int, int, int, int, int, int, int, int] = (1, 2, 3, 4, 5, 6, 7, 8, "str", "str", "str", "str", 1, 1, 1, 1, 1) # E: Incompatible types in assignment (expression has type Tuple[int, int, ... <15 more items>], variable has type Tuple[int, int, ... <10 more items>]) [builtins fixtures/tuple.pyi] @@ -1443,8 +1455,7 @@ x7, x8, y7, y8 = *points2, *points3 # E: Contiguous iterable with same type expe x9, y9, x10, y10, z5 = *points2, 1, *points2 # E: Contiguous iterable with same type expected [builtins fixtures/tuple.pyi] -[case testAssignEmptyPy36] -# flags: --python-version 3.6 +[case testAssignEmpty] () = [] [case testAssignEmptyBogus] @@ -1456,7 +1467,7 @@ from typing import Tuple t = ('',) * 2 reveal_type(t) # N: Revealed type is "Tuple[builtins.str, builtins.str]" t2 = ('',) * -1 -reveal_type(t2) # N: Revealed type is "Tuple[]" +reveal_type(t2) # N: Revealed type is "Tuple[()]" t3 = ('', 1) * 2 reveal_type(t3) # N: Revealed type is "Tuple[builtins.str, builtins.int, builtins.str, builtins.int]" def f() -> Tuple[str, ...]: @@ -1464,12 +1475,21 @@ def f() -> Tuple[str, ...]: reveal_type(f() * 2) # N: Revealed type is "builtins.tuple[builtins.str, ...]" [builtins fixtures/tuple.pyi] +[case testEmptyTupleTypeRepr] +from typing import Tuple + +def f() -> Tuple[()]: ... + +reveal_type(f) # N: Revealed type is "def () -> Tuple[()]" +reveal_type(f()) # N: Revealed type is "Tuple[()]" +[builtins fixtures/tuple.pyi] + [case testMultiplyTupleByIntegerLiteralReverse] from typing import Tuple t = 2 * ('',) reveal_type(t) # N: Revealed type is "Tuple[builtins.str, builtins.str]" t2 = -1 * ('',) -reveal_type(t2) # N: Revealed type is "Tuple[]" +reveal_type(t2) # N: Revealed type is "Tuple[()]" t3 = 2 * ('', 1) reveal_type(t3) # N: Revealed type is "Tuple[builtins.str, builtins.int, builtins.str, builtins.int]" def f() -> Tuple[str, ...]: diff --git a/test-data/unit/check-type-aliases.test b/test-data/unit/check-type-aliases.test index 05a03ecaf7b0..3ca0c5ef0a4b 100644 --- a/test-data/unit/check-type-aliases.test +++ b/test-data/unit/check-type-aliases.test @@ -12,7 +12,7 @@ U = Union[int, str] def f(x: U) -> None: pass f(1) f('') -f(()) # E: Argument 1 to "f" has incompatible type "Tuple[]"; expected "Union[int, str]" +f(()) # E: Argument 1 to "f" has incompatible type "Tuple[()]"; expected "Union[int, str]" [targets __main__, __main__.f] [builtins fixtures/tuple.pyi] @@ -28,7 +28,7 @@ f(1) # E: Argument 1 to "f" has incompatible type "int"; expected "Tuple[int, st [case testCallableTypeAlias] from typing import Callable A = Callable[[int], None] -f = None # type: A +f: A f(1) f('') # E: Argument 1 has incompatible type "str"; expected "int" [targets __main__] @@ -64,7 +64,7 @@ from _m import U def f(x: U) -> None: pass f(1) f('x') -f(()) # E: Argument 1 to "f" has incompatible type "Tuple[]"; expected "Union[int, str]" +f(()) # E: Argument 1 to "f" has incompatible type "Tuple[()]"; expected "Union[int, str]" [file _m.py] from typing import Union U = Union[int, str] @@ -169,12 +169,12 @@ f(1) # E: Argument 1 to "f" has incompatible type "int"; expected "str" [case testEmptyTupleTypeAlias] from typing import Tuple, Callable EmptyTuple = Tuple[()] -x = None # type: EmptyTuple -reveal_type(x) # N: Revealed type is "Tuple[]" +x: EmptyTuple +reveal_type(x) # N: Revealed type is "Tuple[()]" EmptyTupleCallable = Callable[[Tuple[()]], None] -f = None # type: EmptyTupleCallable -reveal_type(f) # N: Revealed type is "def (Tuple[])" +f: EmptyTupleCallable +reveal_type(f) # N: Revealed type is "def (Tuple[()])" [builtins fixtures/list.pyi] [case testForwardTypeAlias] @@ -305,7 +305,6 @@ reveal_type(y) # N: Revealed type is "Union[builtins.int, None]" [builtins fixtures/bool.pyi] [case testNoneAliasStrict] -# flags: --strict-optional from typing import Optional, Union void = type(None) x: int diff --git a/test-data/unit/check-type-checks.test b/test-data/unit/check-type-checks.test index 106f2d680ba4..03c8de4177f3 100644 --- a/test-data/unit/check-type-checks.test +++ b/test-data/unit/check-type-checks.test @@ -2,9 +2,9 @@ [case testSimpleIsinstance] -x = None # type: object -n = None # type: int -s = None # type: str +x: object +n: int +s: str if int(): n = x # E: Incompatible types in assignment (expression has type "object", variable has type "int") if isinstance(x, int): diff --git a/test-data/unit/check-typeddict.test b/test-data/unit/check-typeddict.test index 970dc05b488d..7de8e6416f35 100644 --- a/test-data/unit/check-typeddict.test +++ b/test-data/unit/check-typeddict.test @@ -87,7 +87,6 @@ D = TypedDict('D', { -- Define TypedDict (Class syntax) [case testCanCreateTypedDictWithClass] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Point(TypedDict): @@ -99,7 +98,6 @@ reveal_type(p) # N: Revealed type is "TypedDict('__main__.Point', {'x': builtin [builtins fixtures/dict.pyi] [case testCanCreateTypedDictWithSubclass] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Point1D(TypedDict): @@ -113,7 +111,6 @@ reveal_type(p) # N: Revealed type is "TypedDict('__main__.Point2D', {'x': built [builtins fixtures/dict.pyi] [case testCanCreateTypedDictWithSubclass2] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Point1D(TypedDict): @@ -126,7 +123,6 @@ reveal_type(p) # N: Revealed type is "TypedDict('__main__.Point2D', {'x': built [builtins fixtures/dict.pyi] [case testCanCreateTypedDictClassEmpty] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class EmptyDict(TypedDict): @@ -138,10 +134,7 @@ reveal_type(p) # N: Revealed type is "TypedDict('__main__.EmptyDict', {})" [case testCanCreateTypedDictWithClassOldVersion] -# flags: --python-version 3.5 - -# Test that we can use class-syntax to merge TypedDicts even in -# versions without type annotations +# Test that we can use class-syntax to merge function-based TypedDicts from mypy_extensions import TypedDict @@ -165,7 +158,6 @@ foo({'name': 'lol', 'year': 2009, 'based_on': 0}) # E: Incompatible types (expr -- Define TypedDict (Class syntax errors) [case testCannotCreateTypedDictWithClassOtherBases] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class A: pass @@ -195,7 +187,6 @@ class C(TypedDict, TypedDict): # E: Duplicate base class "TypedDict" [typing fixtures/typing-typeddict.pyi] [case testCannotCreateTypedDictWithClassWithOtherStuff] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Point(TypedDict): @@ -251,7 +242,6 @@ Point = TypedDict('Point', {'x': int, 'y': int, '_fallback': object}) [builtins fixtures/dict.pyi] [case testCanCreateTypedDictWithClassUnderscores] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Point(TypedDict): @@ -263,7 +253,6 @@ reveal_type(p) # N: Revealed type is "TypedDict('__main__.Point', {'x': builtins [builtins fixtures/dict.pyi] [case testCannotCreateTypedDictWithDuplicateKey1] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Bad(TypedDict): @@ -291,7 +280,6 @@ reveal_type(d2) # N: Revealed type is "TypedDict('__main__.D2', {'x': builtins.s [typing fixtures/typing-typeddict.pyi] [case testCanCreateTypedDictWithClassOverwriting] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Point1(TypedDict): @@ -306,7 +294,6 @@ reveal_type(b) # N: Revealed type is "TypedDict('__main__.Bad', {'x': builtins.i [builtins fixtures/dict.pyi] [case testCanCreateTypedDictWithClassOverwriting2] -# flags: --python-version 3.6 from mypy_extensions import TypedDict class Point1(TypedDict): @@ -608,7 +595,6 @@ reveal_type(f(g)) # N: Revealed type is "TypedDict({'x': builtins.int, 'y': bui [builtins fixtures/dict.pyi] [case testMeetOfTypedDictsWithIncompatibleCommonKeysIsUninhabited] -# flags: --strict-optional from mypy_extensions import TypedDict from typing import TypeVar, Callable XYa = TypedDict('XYa', {'x': int, 'y': int}) @@ -632,7 +618,6 @@ reveal_type(f(g)) # N: Revealed type is "TypedDict({'x': builtins.int, 'z': bui # TODO: It would be more accurate for the meet to be TypedDict instead. [case testMeetOfTypedDictWithCompatibleMappingIsUninhabitedForNow] -# flags: --strict-optional from mypy_extensions import TypedDict from typing import TypeVar, Callable, Mapping X = TypedDict('X', {'x': int}) @@ -644,7 +629,6 @@ reveal_type(f(g)) # N: Revealed type is "" [builtins fixtures/dict.pyi] [case testMeetOfTypedDictWithIncompatibleMappingIsUninhabited] -# flags: --strict-optional from mypy_extensions import TypedDict from typing import TypeVar, Callable, Mapping X = TypedDict('X', {'x': int}) @@ -656,7 +640,6 @@ reveal_type(f(g)) # N: Revealed type is "" [builtins fixtures/dict.pyi] [case testMeetOfTypedDictWithCompatibleMappingSuperclassIsUninhabitedForNow] -# flags: --strict-optional from mypy_extensions import TypedDict from typing import TypeVar, Callable, Iterable X = TypedDict('X', {'x': int}) @@ -690,7 +673,6 @@ reveal_type(f(g)) # N: Revealed type is "TypedDict({'x'?: builtins.int, 'y': bu [builtins fixtures/dict.pyi] [case testMeetOfTypedDictsWithIncompatibleNonTotalAndTotal] -# flags: --strict-optional from mypy_extensions import TypedDict from typing import TypeVar, Callable XY = TypedDict('XY', {'x': int, 'y': int}, total=False) @@ -985,7 +967,6 @@ if int(): -- Other TypedDict methods [case testTypedDictGetMethod] -# flags: --strict-optional from mypy_extensions import TypedDict class A: pass D = TypedDict('D', {'x': int, 'y': str}) @@ -999,7 +980,6 @@ reveal_type(d.get('y', None)) # N: Revealed type is "Union[builtins.str, None]" [typing fixtures/typing-typeddict.pyi] [case testTypedDictGetMethodTypeContext] -# flags: --strict-optional from typing import List from mypy_extensions import TypedDict class A: pass @@ -1057,7 +1037,6 @@ p.get('x', 1 + 'y') # E: Unsupported operand types for + ("int" and "str") [typing fixtures/typing-typeddict.pyi] [case testTypedDictChainedGetWithEmptyDictDefault] -# flags: --strict-optional from mypy_extensions import TypedDict C = TypedDict('C', {'a': int}) D = TypedDict('D', {'x': C, 'y': str}) @@ -1121,8 +1100,8 @@ D = TypedDict('D', {'x': int, 'y': str}, total=False) d: D reveal_type(d['x']) # N: Revealed type is "builtins.int" reveal_type(d['y']) # N: Revealed type is "builtins.str" -reveal_type(d.get('x')) # N: Revealed type is "builtins.int" -reveal_type(d.get('y')) # N: Revealed type is "builtins.str" +reveal_type(d.get('x')) # N: Revealed type is "Union[builtins.int, None]" +reveal_type(d.get('y')) # N: Revealed type is "Union[builtins.str, None]" [builtins fixtures/dict.pyi] [typing fixtures/typing-typeddict.pyi] @@ -1642,7 +1621,7 @@ def f1(x: int, y: str, z: bytes) -> None: ... def f2(x: int, y: str) -> None: ... td: TD -d = None # type: Dict[Any, Any] +d: Dict[Any, Any] f1(**td, **d) f1(**d, **td) @@ -1740,8 +1719,8 @@ class TDB(TypedDict): td: Union[TDA, TDB] -reveal_type(td.get('a')) # N: Revealed type is "builtins.int" -reveal_type(td.get('b')) # N: Revealed type is "Union[builtins.str, builtins.int]" +reveal_type(td.get('a')) # N: Revealed type is "Union[builtins.int, None]" +reveal_type(td.get('b')) # N: Revealed type is "Union[builtins.str, None, builtins.int]" reveal_type(td.get('c')) # N: Revealed type is "builtins.object" reveal_type(td['a']) # N: Revealed type is "builtins.int" @@ -1774,7 +1753,6 @@ reveal_type(td.pop('c')) # E: TypedDict "TDA" has no key "c" \ [typing fixtures/typing-typeddict.pyi] [case testCanCreateTypedDictWithTypingExtensions] -# flags: --python-version 3.6 from typing_extensions import TypedDict class Point(TypedDict): @@ -1805,7 +1783,7 @@ from mypy_extensions import TypedDict class A(TypedDict): x: int -d: Union[A, None] +d: A d.update({'x': 1}) [builtins fixtures/dict.pyi] @@ -2873,3 +2851,376 @@ foo({"foo": {"e": "foo"}}) # E: Type of TypedDict is ambiguous, none of ("A", " # E: Argument 1 to "foo" has incompatible type "Dict[str, Dict[str, str]]"; expected "Union[A, B]" [builtins fixtures/dict.pyi] [typing fixtures/typing-typeddict.pyi] + +[case testTypedDictMissingEmptyKey] +from typing_extensions import TypedDict + +class A(TypedDict): + my_attr_1: str + my_attr_2: int + +d: A +d[''] # E: TypedDict "A" has no key "" +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictFlexibleUpdate] +from mypy_extensions import TypedDict + +A = TypedDict("A", {"foo": int, "bar": int}) +B = TypedDict("B", {"foo": int}) + +a = A({"foo": 1, "bar": 2}) +b = B({"foo": 2}) +a.update({"foo": 2}) +a.update(b) +a.update(a) +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictStrictUpdate] +# flags: --extra-checks +from mypy_extensions import TypedDict + +A = TypedDict("A", {"foo": int, "bar": int}) +B = TypedDict("B", {"foo": int}) + +a = A({"foo": 1, "bar": 2}) +b = B({"foo": 2}) +a.update({"foo": 2}) # OK +a.update(b) # E: Argument 1 to "update" of "TypedDict" has incompatible type "B"; expected "TypedDict({'foo': int, 'bar'?: int})" +a.update(a) # OK +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictFlexibleUpdateUnion] +from typing import Union +from mypy_extensions import TypedDict + +A = TypedDict("A", {"foo": int, "bar": int}) +B = TypedDict("B", {"foo": int}) +C = TypedDict("C", {"bar": int}) + +a = A({"foo": 1, "bar": 2}) +u: Union[B, C] +a.update(u) +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictFlexibleUpdateUnionExtra] +from typing import Union +from mypy_extensions import TypedDict + +A = TypedDict("A", {"foo": int, "bar": int}) +B = TypedDict("B", {"foo": int, "extra": int}) +C = TypedDict("C", {"bar": int, "extra": int}) + +a = A({"foo": 1, "bar": 2}) +u: Union[B, C] +a.update(u) +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictFlexibleUpdateUnionStrict] +# flags: --extra-checks +from typing import Union, NotRequired +from mypy_extensions import TypedDict + +A = TypedDict("A", {"foo": int, "bar": int}) +A1 = TypedDict("A1", {"foo": int, "bar": NotRequired[int]}) +A2 = TypedDict("A2", {"foo": NotRequired[int], "bar": int}) +B = TypedDict("B", {"foo": int}) +C = TypedDict("C", {"bar": int}) + +a = A({"foo": 1, "bar": 2}) +u: Union[B, C] +a.update(u) # E: Argument 1 to "update" of "TypedDict" has incompatible type "Union[B, C]"; expected "Union[TypedDict({'foo': int, 'bar'?: int}), TypedDict({'foo'?: int, 'bar': int})]" +u2: Union[A1, A2] +a.update(u2) # OK +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackSame] +# flags: --extra-checks +from typing import TypedDict + +class Foo(TypedDict): + a: int + b: int + +foo1: Foo = {"a": 1, "b": 1} +foo2: Foo = {**foo1, "b": 2} +foo3 = Foo(**foo1, b=2) +foo4 = Foo({**foo1, "b": 2}) +foo5 = Foo(dict(**foo1, b=2)) +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackCompatible] +# flags: --extra-checks +from typing import TypedDict + +class Foo(TypedDict): + a: int + +class Bar(TypedDict): + a: int + b: int + +foo: Foo = {"a": 1} +bar: Bar = {**foo, "b": 2} +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackIncompatible] +from typing import TypedDict + +class Foo(TypedDict): + a: int + b: str + +class Bar(TypedDict): + a: int + b: int + +foo: Foo = {"a": 1, "b": "a"} +bar1: Bar = {**foo, "b": 2} # Incompatible item is overriden +bar2: Bar = {**foo, "a": 2} # E: Incompatible types (expression has type "str", TypedDict item "b" has type "int") +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackNotRequiredKeyIncompatible] +from typing import TypedDict, NotRequired + +class Foo(TypedDict): + a: NotRequired[str] + +class Bar(TypedDict): + a: NotRequired[int] + +foo: Foo = {} +bar: Bar = {**foo} # E: Incompatible types (expression has type "str", TypedDict item "a" has type "int") +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + + +[case testTypedDictUnpackMissingOrExtraKey] +from typing import TypedDict + +class Foo(TypedDict): + a: int + +class Bar(TypedDict): + a: int + b: int + +foo1: Foo = {"a": 1} +bar1: Bar = {"a": 1, "b": 1} +foo2: Foo = {**bar1} # E: Extra key "b" for TypedDict "Foo" +bar2: Bar = {**foo1} # E: Missing key "b" for TypedDict "Bar" +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackNotRequiredKeyExtra] +from typing import TypedDict, NotRequired + +class Foo(TypedDict): + a: int + +class Bar(TypedDict): + a: int + b: NotRequired[int] + +foo1: Foo = {"a": 1} +bar1: Bar = {"a": 1} +foo2: Foo = {**bar1} # E: Extra key "b" for TypedDict "Foo" +bar2: Bar = {**foo1} +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackRequiredKeyMissing] +from typing import TypedDict, NotRequired + +class Foo(TypedDict): + a: NotRequired[int] + +class Bar(TypedDict): + a: int + +foo: Foo = {"a": 1} +bar: Bar = {**foo} # E: Missing key "a" for TypedDict "Bar" +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackMultiple] +# flags: --extra-checks +from typing import TypedDict + +class Foo(TypedDict): + a: int + +class Bar(TypedDict): + b: int + +class Baz(TypedDict): + a: int + b: int + c: int + +foo: Foo = {"a": 1} +bar: Bar = {"b": 1} +baz: Baz = {**foo, **bar, "c": 1} +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackNested] +from typing import TypedDict + +class Foo(TypedDict): + a: int + b: int + +class Bar(TypedDict): + c: Foo + d: int + +foo: Foo = {"a": 1, "b": 1} +bar: Bar = {"c": foo, "d": 1} +bar2: Bar = {**bar, "c": {**bar["c"], "b": 2}, "d": 2} +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackNestedError] +from typing import TypedDict + +class Foo(TypedDict): + a: int + b: int + +class Bar(TypedDict): + c: Foo + d: int + +foo: Foo = {"a": 1, "b": 1} +bar: Bar = {"c": foo, "d": 1} +bar2: Bar = {**bar, "c": {**bar["c"], "b": "wrong"}, "d": 2} # E: Incompatible types (expression has type "str", TypedDict item "b" has type "int") +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackOverrideRequired] +from mypy_extensions import TypedDict + +Details = TypedDict('Details', {'first_name': str, 'last_name': str}) +DetailsSubset = TypedDict('DetailsSubset', {'first_name': str, 'last_name': str}, total=False) +defaults: Details = {'first_name': 'John', 'last_name': 'Luther'} + +def generate(data: DetailsSubset) -> Details: + return {**defaults, **data} # OK +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackUntypedDict] +from typing import Any, Dict, TypedDict + +class Bar(TypedDict): + pass + +foo: Dict[str, Any] = {} +bar: Bar = {**foo} # E: Unsupported type "Dict[str, Any]" for ** expansion in TypedDict +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackIntoUnion] +from typing import TypedDict, Union + +class Foo(TypedDict): + a: int + +class Bar(TypedDict): + b: int + +foo: Foo = {'a': 1} +foo_or_bar: Union[Foo, Bar] = {**foo} +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackFromUnion] +from typing import TypedDict, Union + +class Foo(TypedDict): + a: int + b: int + +class Bar(TypedDict): + b: int + +foo_or_bar: Union[Foo, Bar] = {'b': 1} +foo: Bar = {**foo_or_bar} # E: Extra key "a" for TypedDict "Bar" +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackUnionRequiredMissing] +from typing import TypedDict, NotRequired, Union + +class Foo(TypedDict): + a: int + b: int + +class Bar(TypedDict): + a: int + b: NotRequired[int] + +foo_or_bar: Union[Foo, Bar] = {"a": 1} +foo: Foo = {**foo_or_bar} # E: Missing key "b" for TypedDict "Foo" +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackInference] +from typing import TypedDict, Generic, TypeVar + +class Foo(TypedDict): + a: int + b: str + +T = TypeVar("T") +class TD(TypedDict, Generic[T]): + a: T + b: str + +foo: Foo +bar = TD(**foo) +reveal_type(bar) # N: Revealed type is "TypedDict('__main__.TD', {'a': builtins.int, 'b': builtins.str})" +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackStrictMode] +# flags: --extra-checks +from typing import TypedDict, NotRequired + +class Foo(TypedDict): + a: int + +class Bar(TypedDict): + a: int + b: NotRequired[int] + +foo: Foo +bar: Bar = {**foo} # E: Non-required key "b" not explicitly found in any ** item +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] + +[case testTypedDictUnpackAny] +from typing import Any, TypedDict, NotRequired, Dict, Union + +class Foo(TypedDict): + a: int + b: NotRequired[int] + +x: Any +y: Dict[Any, Any] +z: Union[Any, Dict[Any, Any]] +t1: Foo = {**x} # E: Missing key "a" for TypedDict "Foo" +t2: Foo = {**y} # E: Missing key "a" for TypedDict "Foo" +t3: Foo = {**z} # E: Missing key "a" for TypedDict "Foo" +[builtins fixtures/dict.pyi] +[typing fixtures/typing-typeddict.pyi] diff --git a/test-data/unit/check-typeguard.test b/test-data/unit/check-typeguard.test index b4c007148903..b3b168e5c7c6 100644 --- a/test-data/unit/check-typeguard.test +++ b/test-data/unit/check-typeguard.test @@ -248,7 +248,6 @@ def main1(a: object) -> None: [builtins fixtures/tuple.pyi] [case testTypeGuardOverload] -# flags: --strict-optional from typing import overload, Any, Callable, Iterable, Iterator, List, Optional, TypeVar from typing_extensions import TypeGuard @@ -620,10 +619,6 @@ def bad_typeguard(*, x: object) -> TypeGuard[int]: # line 15 [builtins fixtures/classmethod.pyi] [out] main:4: error: TypeGuard functions must have a positional argument -main:11: error: TypeGuard functions must have a positional argument -main:15: error: TypeGuard functions must have a positional argument -[out version>=3.8] -main:4: error: TypeGuard functions must have a positional argument main:12: error: TypeGuard functions must have a positional argument main:15: error: TypeGuard functions must have a positional argument diff --git a/test-data/unit/check-typevar-defaults.test b/test-data/unit/check-typevar-defaults.test index 7bc2d4089ecd..9015d353fa08 100644 --- a/test-data/unit/check-typevar-defaults.test +++ b/test-data/unit/check-typevar-defaults.test @@ -59,9 +59,9 @@ from typing import TypeVar, ParamSpec, Tuple from typing_extensions import TypeVarTuple, Unpack T1 = TypeVar("T1", default=2) # E: TypeVar "default" must be a type -T2 = TypeVar("T2", default=[int, str]) # E: Bracketed expression "[...]" is not valid as a type \ - # N: Did you mean "List[...]"? \ - # E: TypeVar "default" must be a type +T2 = TypeVar("T2", default=[int]) # E: Bracketed expression "[...]" is not valid as a type \ + # N: Did you mean "List[...]"? \ + # E: TypeVar "default" must be a type P1 = ParamSpec("P1", default=int) # E: The default argument to ParamSpec must be a list expression, ellipsis, or a ParamSpec P2 = ParamSpec("P2", default=2) # E: The default argument to ParamSpec must be a list expression, ellipsis, or a ParamSpec @@ -72,3 +72,47 @@ Ts1 = TypeVarTuple("Ts1", default=2) # E: The default argument to TypeVarTuple m Ts2 = TypeVarTuple("Ts2", default=int) # E: The default argument to TypeVarTuple must be an Unpacked tuple Ts3 = TypeVarTuple("Ts3", default=Tuple[int]) # E: The default argument to TypeVarTuple must be an Unpacked tuple [builtins fixtures/tuple.pyi] + +[case testTypeVarDefaultsInvalid2] +from typing import TypeVar, List, Union + +T1 = TypeVar("T1", bound=str, default=int) # E: TypeVar default must be a subtype of the bound type +T2 = TypeVar("T2", bound=List[str], default=List[int]) # E: TypeVar default must be a subtype of the bound type +T3 = TypeVar("T3", int, str, default=bytes) # E: TypeVar default must be one of the constraint types +T4 = TypeVar("T4", int, str, default=Union[int, str]) # E: TypeVar default must be one of the constraint types +T5 = TypeVar("T5", float, str, default=int) # E: TypeVar default must be one of the constraint types + +[case testTypeVarDefaultsFunctions] +from typing import TypeVar, ParamSpec, List, Union, Callable, Tuple +from typing_extensions import TypeVarTuple, Unpack + +T1 = TypeVar("T1", default=str) +T2 = TypeVar("T2", bound=str, default=str) +T3 = TypeVar("T3", bytes, str, default=str) +P1 = ParamSpec("P1", default=[int, str]) +Ts1 = TypeVarTuple("Ts1", default=Unpack[Tuple[int, str]]) + +def callback1(x: str) -> None: ... + +def func_a1(x: Union[int, T1]) -> T1: ... +reveal_type(func_a1(2)) # N: Revealed type is "builtins.str" +reveal_type(func_a1(2.1)) # N: Revealed type is "builtins.float" + +def func_a2(x: Union[int, T1]) -> List[T1]: ... +reveal_type(func_a2(2)) # N: Revealed type is "builtins.list[builtins.str]" +reveal_type(func_a2(2.1)) # N: Revealed type is "builtins.list[builtins.float]" + +def func_a3(x: Union[int, T2]) -> T2: ... +reveal_type(func_a3(2)) # N: Revealed type is "builtins.str" + +def func_a4(x: Union[int, T3]) -> T3: ... +reveal_type(func_a4(2)) # N: Revealed type is "builtins.str" + +def func_b1(x: Union[int, Callable[P1, None]]) -> Callable[P1, None]: ... +reveal_type(func_b1(callback1)) # N: Revealed type is "def (x: builtins.str)" +reveal_type(func_b1(2)) # N: Revealed type is "def (builtins.int, builtins.str)" + +def func_c1(x: Union[int, Callable[[Unpack[Ts1]], None]]) -> Tuple[Unpack[Ts1]]: ... +# reveal_type(func_c1(callback1)) # Revealed type is "builtins.tuple[str]" # TODO +# reveal_type(func_c1(2)) # Revealed type is "builtins.tuple[builtins.int, builtins.str]" # TODO +[builtins fixtures/tuple.pyi] diff --git a/test-data/unit/check-typevar-tuple.test b/test-data/unit/check-typevar-tuple.test index e1fae05eac63..c8b33ec96b06 100644 --- a/test-data/unit/check-typevar-tuple.test +++ b/test-data/unit/check-typevar-tuple.test @@ -17,7 +17,7 @@ reveal_type(f(args)) # N: Revealed type is "Tuple[builtins.int, builtins.str]" reveal_type(f(varargs)) # N: Revealed type is "builtins.tuple[builtins.int, ...]" -f(0) # E: Argument 1 to "f" has incompatible type "int"; expected +f(0) # E: Argument 1 to "f" has incompatible type "int"; expected "Tuple[, ...]" def g(a: Tuple[Unpack[Ts]], b: Tuple[Unpack[Ts]]) -> Tuple[Unpack[Ts]]: return a @@ -25,7 +25,7 @@ def g(a: Tuple[Unpack[Ts]], b: Tuple[Unpack[Ts]]) -> Tuple[Unpack[Ts]]: reveal_type(g(args, args)) # N: Revealed type is "Tuple[builtins.int, builtins.str]" reveal_type(g(args, args2)) # N: Revealed type is "Tuple[builtins.int, builtins.str]" reveal_type(g(args, args3)) # N: Revealed type is "builtins.tuple[builtins.object, ...]" -reveal_type(g(any, any)) # N: Revealed type is "Any" +reveal_type(g(any, any)) # N: Revealed type is "builtins.tuple[Any, ...]" [builtins fixtures/tuple.pyi] [case testTypeVarTupleMixed] @@ -57,11 +57,12 @@ f_args3: Tuple[int, str, bool] reveal_type(f(f_args)) # N: Revealed type is "Tuple[builtins.str, builtins.str]" reveal_type(f(f_args2)) # N: Revealed type is "Tuple[builtins.str]" reveal_type(f(f_args3)) # N: Revealed type is "Tuple[builtins.str, builtins.str, builtins.bool]" -f(empty) # E: Argument 1 to "f" has incompatible type "Tuple[]"; expected "Tuple[int]" +f(empty) # E: Argument 1 to "f" has incompatible type "Tuple[()]"; expected "Tuple[int]" f(bad_args) # E: Argument 1 to "f" has incompatible type "Tuple[str, str]"; expected "Tuple[int, str]" -# TODO: This hits a crash where we assert len(templates.items) == 1. See visit_tuple_type -# in mypy/constraints.py. -#f(var_len_tuple) + +# The reason for error in subtle: actual can be empty, formal cannot. +reveal_type(f(var_len_tuple)) # N: Revealed type is "Tuple[builtins.str, Unpack[builtins.tuple[builtins.int, ...]]]" \ + # E: Argument 1 to "f" has incompatible type "Tuple[int, ...]"; expected "Tuple[int, Unpack[Tuple[int, ...]]]" g_args: Tuple[str, int] reveal_type(g(g_args)) # N: Revealed type is "Tuple[builtins.str, builtins.str]" @@ -122,13 +123,10 @@ reveal_type(empty) # N: Revealed type is "__main__.Variadic[Unpack[builtins.tup bad: Variadic[Unpack[Tuple[int, ...]], str, Unpack[Tuple[bool, ...]]] # E: More than one Unpack in a type is not allowed reveal_type(bad) # N: Revealed type is "__main__.Variadic[Unpack[builtins.tuple[builtins.int, ...]], builtins.str]" -# TODO: This is tricky to fix because we need typeanal to know whether the current -# location is valid for an Unpack or not. -# bad2: Unpack[Tuple[int, ...]] +bad2: Unpack[Tuple[int, ...]] # E: Unpack is only valid in a variadic position m1: Mixed1[int, str, bool] reveal_type(m1) # N: Revealed type is "__main__.Mixed1[builtins.int, builtins.str, builtins.bool]" - [builtins fixtures/tuple.pyi] [case testTypeVarTupleGenericClassWithFunctions] @@ -147,7 +145,6 @@ def foo(t: Variadic[int, Unpack[Ts], object]) -> Tuple[int, Unpack[Ts]]: v: Variadic[int, str, bool, object] reveal_type(foo(v)) # N: Revealed type is "Tuple[builtins.int, builtins.str, builtins.bool]" - [builtins fixtures/tuple.pyi] [case testTypeVarTupleGenericClassWithMethods] @@ -167,7 +164,6 @@ class Variadic(Generic[T, Unpack[Ts], S]): v: Variadic[float, str, bool, object] reveal_type(v.foo(0)) # N: Revealed type is "Tuple[builtins.int, builtins.str, builtins.bool]" - [builtins fixtures/tuple.pyi] [case testTypeVarTupleIsNotValidAliasTarget] @@ -210,8 +206,8 @@ shape = (Height(480), Width(640)) x: Array[Height, Width] = Array(shape) reveal_type(abs(x)) # N: Revealed type is "__main__.Array[__main__.Height, __main__.Width]" reveal_type(x + x) # N: Revealed type is "__main__.Array[__main__.Height, __main__.Width]" - [builtins fixtures/tuple.pyi] + [case testTypeVarTuplePep646ArrayExampleWithDType] from typing import Generic, Tuple, TypeVar, Protocol, NewType from typing_extensions import TypeVarTuple, Unpack @@ -246,7 +242,6 @@ shape = (Height(480), Width(640)) x: Array[float, Height, Width] = Array(shape) reveal_type(abs(x)) # N: Revealed type is "__main__.Array[builtins.float, __main__.Height, __main__.Width]" reveal_type(x + x) # N: Revealed type is "__main__.Array[builtins.float, __main__.Height, __main__.Width]" - [builtins fixtures/tuple.pyi] [case testTypeVarTuplePep646ArrayExampleInfer] @@ -292,8 +287,8 @@ c = del_batch_axis(b) reveal_type(c) # N: Revealed type is "__main__.Array[__main__.Height, __main__.Width]" d = add_batch_channels(a) reveal_type(d) # N: Revealed type is "__main__.Array[__main__.Batch, __main__.Height, __main__.Width, __main__.Channels]" - [builtins fixtures/tuple.pyi] + [case testTypeVarTuplePep646TypeVarConcatenation] from typing import Generic, TypeVar, NewType, Tuple from typing_extensions import TypeVarTuple, Unpack @@ -310,6 +305,7 @@ def prefix_tuple( z = prefix_tuple(x=0, y=(True, 'a')) reveal_type(z) # N: Revealed type is "Tuple[builtins.int, builtins.bool, builtins.str]" [builtins fixtures/tuple.pyi] + [case testTypeVarTuplePep646TypeVarTupleUnpacking] from typing import Generic, TypeVar, NewType, Any, Tuple from typing_extensions import TypeVarTuple, Unpack @@ -362,8 +358,6 @@ reveal_type(bad) # N: Revealed type is "def [Ts, Ts2] (x: Tuple[builtins.int, U def bad2(x: Tuple[int, Unpack[Tuple[int, ...]], str, Unpack[Tuple[str, ...]]]) -> None: # E: More than one Unpack in a type is not allowed ... reveal_type(bad2) # N: Revealed type is "def (x: Tuple[builtins.int, Unpack[builtins.tuple[builtins.int, ...]], builtins.str])" - - [builtins fixtures/tuple.pyi] [case testTypeVarTuplePep646TypeVarStarArgsBasic] @@ -379,8 +373,8 @@ def args_to_tuple(*args: Unpack[Ts]) -> Tuple[Unpack[Ts]]: return args reveal_type(args_to_tuple(1, 'a')) # N: Revealed type is "Tuple[Literal[1]?, Literal['a']?]" - [builtins fixtures/tuple.pyi] + [case testTypeVarTuplePep646TypeVarStarArgs] from typing import Tuple from typing_extensions import TypeVarTuple, Unpack @@ -409,8 +403,6 @@ with_prefix_suffix(*bad_t) # E: Too few arguments for "with_prefix_suffix" def foo(*args: Unpack[Ts]) -> None: reveal_type(with_prefix_suffix(True, "bar", *args, 5)) # N: Revealed type is "Tuple[builtins.bool, builtins.str, Unpack[Ts`-1], builtins.int]" - - [builtins fixtures/tuple.pyi] [case testTypeVarTuplePep646TypeVarStarArgsFixedLengthTuple] @@ -421,17 +413,23 @@ def foo(*args: Unpack[Tuple[int, str]]) -> None: reveal_type(args) # N: Revealed type is "Tuple[builtins.int, builtins.str]" foo(0, "foo") -foo(0, 1) # E: Argument 2 to "foo" has incompatible type "int"; expected "Unpack[Tuple[int, str]]" -foo("foo", "bar") # E: Argument 1 to "foo" has incompatible type "str"; expected "Unpack[Tuple[int, str]]" -foo(0, "foo", 1) # E: Invalid number of arguments -foo(0) # E: Invalid number of arguments -foo() # E: Invalid number of arguments +foo(0, 1) # E: Argument 2 to "foo" has incompatible type "int"; expected "str" +foo("foo", "bar") # E: Argument 1 to "foo" has incompatible type "str"; expected "int" +foo(0, "foo", 1) # E: Too many arguments for "foo" +foo(0) # E: Too few arguments for "foo" +foo() # E: Too few arguments for "foo" foo(*(0, "foo")) -# TODO: fix this case to do something sensible. -#def foo2(*args: Unpack[Tuple[bool, Unpack[Tuple[int, str]], bool]]) -> None: -# reveal_type(args) +def foo2(*args: Unpack[Tuple[bool, Unpack[Tuple[int, str]], bool]]) -> None: + reveal_type(args) # N: Revealed type is "Tuple[builtins.bool, builtins.int, builtins.str, builtins.bool]" +# It is hard to normalize callable types in definition, because there is deep relation between `FuncDef.type` +# and `FuncDef.arguments`, therefore various typeops need to be sure to normalize Callable types before using them. +reveal_type(foo2) # N: Revealed type is "def (*args: Unpack[Tuple[builtins.bool, builtins.int, builtins.str, builtins.bool]])" + +class C: + def foo2(self, *args: Unpack[Tuple[bool, Unpack[Tuple[int, str]], bool]]) -> None: ... +reveal_type(C().foo2) # N: Revealed type is "def (*args: Unpack[Tuple[builtins.bool, builtins.int, builtins.str, builtins.bool]])" [builtins fixtures/tuple.pyi] [case testTypeVarTuplePep646TypeVarStarArgsVariableLengthTuple] @@ -442,8 +440,7 @@ def foo(*args: Unpack[Tuple[int, ...]]) -> None: reveal_type(args) # N: Revealed type is "builtins.tuple[builtins.int, ...]" foo(0, 1, 2) -# TODO: this should say 'expected "int"' rather than the unpack -foo(0, 1, "bar") # E: Argument 3 to "foo" has incompatible type "str"; expected "Unpack[Tuple[int, ...]]" +foo(0, 1, "bar") # E: Argument 3 to "foo" has incompatible type "str"; expected "int" def foo2(*args: Unpack[Tuple[str, Unpack[Tuple[int, ...]], bool, bool]]) -> None: @@ -452,9 +449,9 @@ def foo2(*args: Unpack[Tuple[str, Unpack[Tuple[int, ...]], bool, bool]]) -> None # reveal_type(args[1]) foo2("bar", 1, 2, 3, False, True) -foo2(0, 1, 2, 3, False, True) # E: Argument 1 to "foo2" has incompatible type "int"; expected "Unpack[Tuple[str, Unpack[Tuple[int, ...]], bool, bool]]" -foo2("bar", "bar", 2, 3, False, True) # E: Argument 2 to "foo2" has incompatible type "str"; expected "Unpack[Tuple[str, Unpack[Tuple[int, ...]], bool, bool]]" -foo2("bar", 1, 2, 3, 4, True) # E: Argument 5 to "foo2" has incompatible type "int"; expected "Unpack[Tuple[str, Unpack[Tuple[int, ...]], bool, bool]]" +foo2(0, 1, 2, 3, False, True) # E: Argument 1 to "foo2" has incompatible type "int"; expected "str" +foo2("bar", "bar", 2, 3, False, True) # E: Argument 2 to "foo2" has incompatible type "str"; expected "Unpack[Tuple[Unpack[Tuple[int, ...]], bool, bool]]" +foo2("bar", 1, 2, 3, 4, True) # E: Argument 5 to "foo2" has incompatible type "int"; expected "Unpack[Tuple[Unpack[Tuple[int, ...]], bool, bool]]" foo2(*("bar", 1, 2, 3, False, True)) [builtins fixtures/tuple.pyi] @@ -478,18 +475,18 @@ vargs: Tuple[int, ...] vargs_str: Tuple[str, ...] call(target=func, args=(0, 'foo')) -call(target=func, args=('bar', 'foo')) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[object, str], None]" -call(target=func, args=(True, 'foo', 0)) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[VarArg(object)], None]" -call(target=func, args=(0, 0, 'foo')) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[VarArg(object)], None]" -call(target=func, args=vargs) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[VarArg(object)], None]" +call(target=func, args=('bar', 'foo')) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[str, str], None]" +call(target=func, args=(True, 'foo', 0)) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[bool, str, int], None]" +call(target=func, args=(0, 0, 'foo')) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[int, int, str], None]" +call(target=func, args=vargs) # E: Argument "target" to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[VarArg(int)], None]" # NOTE: This behavior may be a bit contentious, it is maybe inconsistent with our handling of # PEP646 but consistent with our handling of callable constraints. call(target=func2, args=vargs) # E: Argument "target" to "call" has incompatible type "Callable[[int, int], None]"; expected "Callable[[VarArg(int)], None]" call(target=func3, args=vargs) call(target=func3, args=(0,1)) -call(target=func3, args=(0,'foo')) # E: Argument "target" to "call" has incompatible type "Callable[[VarArg(int)], None]"; expected "Callable[[VarArg(object)], None]" -call(target=func3, args=vargs_str) # E: Argument "target" to "call" has incompatible type "Callable[[VarArg(int)], None]"; expected "Callable[[VarArg(object)], None]" +call(target=func3, args=(0,'foo')) # E: Argument "target" to "call" has incompatible type "Callable[[VarArg(int)], None]"; expected "Callable[[int, str], None]" +call(target=func3, args=vargs_str) # E: Argument "target" to "call" has incompatible type "Callable[[VarArg(int)], None]"; expected "Callable[[VarArg(str)], None]" [builtins fixtures/tuple.pyi] [case testTypeVarTuplePep646CallableWithPrefixSuffix] @@ -511,6 +508,51 @@ call_prefix(target=func_prefix, args=(0, 'foo')) call_prefix(target=func2_prefix, args=(0, 'foo')) # E: Argument "target" to "call_prefix" has incompatible type "Callable[[str, int, str], None]"; expected "Callable[[bytes, int, str], None]" [builtins fixtures/tuple.pyi] +[case testTypeVarTuplePep646CallableSuffixSyntax] +from typing import Callable, Tuple, TypeVar +from typing_extensions import Unpack, TypeVarTuple + +x: Callable[[str, Unpack[Tuple[int, ...]], bool], None] +reveal_type(x) # N: Revealed type is "def (builtins.str, *Unpack[Tuple[Unpack[builtins.tuple[builtins.int, ...]], builtins.bool]])" + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") +A = Callable[[T, Unpack[Ts], S], int] +y: A[int, str, bool] +reveal_type(y) # N: Revealed type is "def (builtins.int, builtins.str, builtins.bool) -> builtins.int" +z: A[Unpack[Tuple[int, ...]]] +reveal_type(z) # N: Revealed type is "def (builtins.int, *Unpack[Tuple[Unpack[builtins.tuple[builtins.int, ...]], builtins.int]]) -> builtins.int" +[builtins fixtures/tuple.pyi] + +[case testTypeVarTuplePep646CallableInvalidSyntax] +from typing import Callable, Tuple, TypeVar +from typing_extensions import Unpack, TypeVarTuple + +Ts = TypeVarTuple("Ts") +Us = TypeVarTuple("Us") +a: Callable[[Unpack[Ts], Unpack[Us]], int] # E: Var args may not appear after named or var args \ + # E: More than one Unpack in a type is not allowed +reveal_type(a) # N: Revealed type is "def [Ts, Us] (*Unpack[Ts`-1]) -> builtins.int" +b: Callable[[Unpack], int] # E: Unpack[...] requires exactly one type argument +reveal_type(b) # N: Revealed type is "def (*Any) -> builtins.int" +[builtins fixtures/tuple.pyi] + +[case testTypeVarTuplePep646CallableNewSyntax] +from typing import Callable, Generic, Tuple +from typing_extensions import ParamSpec + +x: Callable[[str, *Tuple[int, ...]], None] +reveal_type(x) # N: Revealed type is "def (builtins.str, *builtins.int)" +y: Callable[[str, *Tuple[int, ...], bool], None] +reveal_type(y) # N: Revealed type is "def (builtins.str, *Unpack[Tuple[Unpack[builtins.tuple[builtins.int, ...]], builtins.bool]])" + +P = ParamSpec("P") +class C(Generic[P]): ... +bad: C[[int, *Tuple[int, ...], int]] # E: Unpack is only valid in a variadic position +reveal_type(bad) # N: Revealed type is "__main__.C[[builtins.int, *Any]]" +[builtins fixtures/tuple.pyi] + [case testTypeVarTuplePep646UnspecifiedParameters] from typing import Tuple, Generic, TypeVar from typing_extensions import Unpack, TypeVarTuple @@ -549,8 +591,7 @@ def call( *args: Unpack[Ts], ) -> None: ... - # TODO: exposes unhandled case in checkexpr - # target(*args) + target(*args) class A: def func(self, arg1: int, arg2: str) -> None: ... @@ -560,15 +601,14 @@ class A: vargs: Tuple[int, ...] vargs_str: Tuple[str, ...] -call(A().func) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[VarArg(object)], None]" +call(A().func) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[], None]" call(A().func, 0, 'foo') -call(A().func, 0, 'foo', 0) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[VarArg(object)], None]" -call(A().func, 0) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[VarArg(object)], None]" -call(A().func, 0, 1) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[int, object], None]" +call(A().func, 0, 'foo', 0) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[int, str, int], None]" +call(A().func, 0) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[int], None]" +call(A().func, 0, 1) # E: Argument 1 to "call" has incompatible type "Callable[[int, str], None]"; expected "Callable[[int, int], None]" call(A().func2, 0, 0) call(A().func3, 0, 1, 2) call(A().func3) - [builtins fixtures/tuple.pyi] [case testVariadicAliasBasicTuple] @@ -639,19 +679,6 @@ x: A[str, str] reveal_type(x) # N: Revealed type is "Tuple[builtins.int, builtins.int, builtins.str, builtins.str]" [builtins fixtures/tuple.pyi] -[case testVariadicAliasWrongCallable] -from typing import TypeVar, Callable -from typing_extensions import Unpack, TypeVarTuple - -T = TypeVar("T") -S = TypeVar("S") -Ts = TypeVarTuple("Ts") - -A = Callable[[T, Unpack[Ts], S], int] # E: Required positional args may not appear after default, named or var args -x: A[int, str, int, str] -reveal_type(x) # N: Revealed type is "def (builtins.int, builtins.str, builtins.int, builtins.str) -> builtins.int" -[builtins fixtures/tuple.pyi] - [case testVariadicAliasMultipleUnpacks] from typing import Tuple, Generic, Callable from typing_extensions import Unpack, TypeVarTuple @@ -688,7 +715,7 @@ reveal_type(x) # N: Revealed type is "builtins.list[Tuple[Any, Unpack[builtins. B = Callable[[T, Unpack[Ts]], int] y: B -reveal_type(y) # N: Revealed type is "def (Any, *Unpack[builtins.tuple[Any, ...]]) -> builtins.int" +reveal_type(y) # N: Revealed type is "def (Any, *Any) -> builtins.int" C = G[T, Unpack[Ts], T] z: C @@ -710,7 +737,7 @@ reveal_type(x) # N: Revealed type is "builtins.list[Tuple[Any, Unpack[builtins. B = Callable[[T, S, Unpack[Ts]], int] y: B[int] # E: Bad number of arguments for type alias, expected: at least 2, given: 1 -reveal_type(y) # N: Revealed type is "def (Any, Any, *Unpack[builtins.tuple[Any, ...]]) -> builtins.int" +reveal_type(y) # N: Revealed type is "def (Any, Any, *Any) -> builtins.int" C = G[T, Unpack[Ts], S] z: C[int] # E: Bad number of arguments for type alias, expected: at least 2, given: 1 @@ -804,3 +831,290 @@ reveal_type(x) # N: Revealed type is "Tuple[builtins.int, Unpack[builtins.tuple y: A[Unpack[Tuple[bool, ...]]] reveal_type(y) # N: Revealed type is "Tuple[builtins.bool, Unpack[builtins.tuple[builtins.bool, ...]], builtins.bool, builtins.bool]" [builtins fixtures/tuple.pyi] + +[case testBanPathologicalRecursiveTuples] +from typing import Tuple +from typing_extensions import Unpack +A = Tuple[int, Unpack[A]] # E: Invalid recursive alias: a tuple item of itself +B = Tuple[int, Unpack[C]] # E: Invalid recursive alias: a tuple item of itself \ + # E: Name "C" is used before definition +C = Tuple[int, Unpack[B]] +x: A +y: B +z: C +reveal_type(x) # N: Revealed type is "Any" +reveal_type(y) # N: Revealed type is "Any" +reveal_type(z) # N: Revealed type is "Tuple[builtins.int, Unpack[Any]]" +[builtins fixtures/tuple.pyi] + +[case testInferenceAgainstGenericVariadicWithBadType] +# flags: --new-type-inference +from typing import TypeVar, Callable, Generic +from typing_extensions import Unpack, TypeVarTuple + +T = TypeVar("T") +Ts = TypeVarTuple("Ts") +Us = TypeVarTuple("Us") + +class Foo(Generic[Unpack[Ts]]): ... + +def dec(f: Callable[[Unpack[Ts]], T]) -> Callable[[Unpack[Ts]], T]: ... +def f(*args: Unpack[Us]) -> Foo[Us]: ... # E: TypeVarTuple "Us" is only valid with an unpack +dec(f) # No crash +[builtins fixtures/tuple.pyi] + +[case testHomogeneousGenericTupleUnpackInferenceNoCrash1] +from typing import Any, TypeVar, Tuple, Type, Optional +from typing_extensions import Unpack + +T = TypeVar("T") +def convert(obj: Any, *to_classes: Unpack[Tuple[Type[T], ...]]) -> Optional[T]: + ... + +x = convert(1, int, float) +reveal_type(x) # N: Revealed type is "Union[builtins.float, None]" +[builtins fixtures/tuple.pyi] + +[case testHomogeneousGenericTupleUnpackInferenceNoCrash2] +from typing import TypeVar, Tuple, Callable, Iterable +from typing_extensions import Unpack + +T = TypeVar("T") +def combine(x: T, y: T) -> T: ... +def reduce(fn: Callable[[T, T], T], xs: Iterable[T]) -> T: ... + +def pipeline(*xs: Unpack[Tuple[int, Unpack[Tuple[str, ...]], bool]]) -> None: + reduce(combine, xs) +[builtins fixtures/tuple.pyi] + +[case testVariadicStarArgsCallNoCrash] +from typing import TypeVar, Callable, Tuple +from typing_extensions import TypeVarTuple, Unpack + +X = TypeVar("X") +Y = TypeVar("Y") +Xs = TypeVarTuple("Xs") +Ys = TypeVarTuple("Ys") + +def nil() -> Tuple[()]: + return () + +def cons( + f: Callable[[X], Y], + g: Callable[[Unpack[Xs]], Tuple[Unpack[Ys]]], +) -> Callable[[X, Unpack[Xs]], Tuple[Y, Unpack[Ys]]]: + def wrapped(x: X, *xs: Unpack[Xs]) -> Tuple[Y, Unpack[Ys]]: + y, ys = f(x), g(*xs) + return y, *ys + return wrapped + +def star(f: Callable[[X], Y]) -> Callable[[Unpack[Tuple[X, ...]]], Tuple[Y, ...]]: + def wrapped(*xs: X): + if not xs: + return nil() + return cons(f, star(f))(*xs) + return wrapped +[builtins fixtures/tuple.pyi] + +[case testInvalidTypeVarTupleUseNoCrash] +from typing_extensions import TypeVarTuple + +Ts = TypeVarTuple("Ts") + +def f(x: Ts) -> Ts: # E: TypeVarTuple "Ts" is only valid with an unpack + return x + +v = f(1, 2, "A") # E: Too many arguments for "f" +reveal_type(v) # N: Revealed type is "Any" +[builtins fixtures/tuple.pyi] + +[case testTypeVarTupleSimpleDecoratorWorks] +from typing import TypeVar, Callable +from typing_extensions import TypeVarTuple, Unpack + +Ts = TypeVarTuple("Ts") +T = TypeVar("T") + +def decorator(f: Callable[[Unpack[Ts]], T]) -> Callable[[Unpack[Ts]], T]: + def wrapper(*args: Unpack[Ts]) -> T: + return f(*args) + return wrapper + +@decorator +def f(a: int, b: int) -> int: ... +reveal_type(f) # N: Revealed type is "def (builtins.int, builtins.int) -> builtins.int" +[builtins fixtures/tuple.pyi] + +[case testTupleWithUnpackIterator] +from typing import Tuple +from typing_extensions import Unpack + +def pipeline(*xs: Unpack[Tuple[int, Unpack[Tuple[float, ...]], bool]]) -> None: + for x in xs: + reveal_type(x) # N: Revealed type is "builtins.float" +[builtins fixtures/tuple.pyi] + +[case testFixedUnpackItemInInstanceArguments] +from typing import TypeVar, Callable, Tuple, Generic +from typing_extensions import TypeVarTuple, Unpack + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") + +class C(Generic[T, Unpack[Ts], S]): + prefix: T + suffix: S + middle: Tuple[Unpack[Ts]] + +Ints = Tuple[int, int] +c: C[Unpack[Ints]] +reveal_type(c.prefix) # N: Revealed type is "builtins.int" +reveal_type(c.suffix) # N: Revealed type is "builtins.int" +reveal_type(c.middle) # N: Revealed type is "Tuple[()]" +[builtins fixtures/tuple.pyi] + +[case testVariadicUnpackItemInInstanceArguments] +from typing import TypeVar, Callable, Tuple, Generic +from typing_extensions import TypeVarTuple, Unpack + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") + +class Other(Generic[Unpack[Ts]]): ... +class C(Generic[T, Unpack[Ts], S]): + prefix: T + suffix: S + x: Tuple[Unpack[Ts]] + y: Callable[[Unpack[Ts]], None] + z: Other[Unpack[Ts]] + +Ints = Tuple[int, ...] +c: C[Unpack[Ints]] +reveal_type(c.prefix) # N: Revealed type is "builtins.int" +reveal_type(c.suffix) # N: Revealed type is "builtins.int" +reveal_type(c.x) # N: Revealed type is "builtins.tuple[builtins.int, ...]" +reveal_type(c.y) # N: Revealed type is "def (*builtins.int)" +reveal_type(c.z) # N: Revealed type is "__main__.Other[Unpack[builtins.tuple[builtins.int, ...]]]" +[builtins fixtures/tuple.pyi] + +[case testTooFewItemsInInstanceArguments] +from typing import Generic, TypeVar +from typing_extensions import TypeVarTuple, Unpack + +T = TypeVar("T") +S = TypeVar("S") +Ts = TypeVarTuple("Ts") +class C(Generic[T, Unpack[Ts], S]): ... + +c: C[int] # E: Bad number of arguments, expected: at least 2, given: 1 +reveal_type(c) # N: Revealed type is "__main__.C[Any, Unpack[builtins.tuple[Any, ...]], Any]" +[builtins fixtures/tuple.pyi] + +[case testVariadicClassUpperBoundCheck] +from typing import Tuple, TypeVar, Generic +from typing_extensions import Unpack, TypeVarTuple + +class A: ... +class B: ... +class C: ... +class D: ... + +T = TypeVar("T", bound=int) +S = TypeVar("S", bound=str) +Ts = TypeVarTuple("Ts") + +class G(Generic[T, Unpack[Ts], S]): ... +First = Tuple[A, B] +Second = Tuple[C, D] +x: G[Unpack[First], Unpack[Second]] # E: Type argument "A" of "G" must be a subtype of "int" \ + # E: Type argument "D" of "G" must be a subtype of "str" +[builtins fixtures/tuple.pyi] + +[case testVariadicTupleType] +from typing import Tuple, Callable +from typing_extensions import TypeVarTuple, Unpack + +Ts = TypeVarTuple("Ts") +class A(Tuple[Unpack[Ts]]): + fn: Callable[[Unpack[Ts]], None] + +x: A[int] +reveal_type(x) # N: Revealed type is "Tuple[builtins.int, fallback=__main__.A[builtins.int]]" +reveal_type(x[0]) # N: Revealed type is "builtins.int" +reveal_type(x.fn) # N: Revealed type is "def (builtins.int)" + +y: A[int, str] +reveal_type(y) # N: Revealed type is "Tuple[builtins.int, builtins.str, fallback=__main__.A[builtins.int, builtins.str]]" +reveal_type(y[0]) # N: Revealed type is "builtins.int" +reveal_type(y.fn) # N: Revealed type is "def (builtins.int, builtins.str)" + +z: A[Unpack[Tuple[int, ...]]] +reveal_type(z) # N: Revealed type is "__main__.A[Unpack[builtins.tuple[builtins.int, ...]]]" +# TODO: this requires fixing map_instance_to_supertype(). +# reveal_type(z[0]) +reveal_type(z.fn) # N: Revealed type is "def (*builtins.int)" + +t: A[int, Unpack[Tuple[int, str]], str] +reveal_type(t) # N: Revealed type is "Tuple[builtins.int, builtins.int, builtins.str, builtins.str, fallback=__main__.A[builtins.int, builtins.int, builtins.str, builtins.str]]" +reveal_type(t[0]) # N: Revealed type is "builtins.int" +reveal_type(t.fn) # N: Revealed type is "def (builtins.int, builtins.int, builtins.str, builtins.str)" +[builtins fixtures/tuple.pyi] + +[case testVariadicNamedTuple] +from typing import Tuple, Callable, NamedTuple, Generic +from typing_extensions import TypeVarTuple, Unpack + +Ts = TypeVarTuple("Ts") +class A(NamedTuple, Generic[Unpack[Ts], T]): + fn: Callable[[Unpack[Ts]], None] + val: T + +y: A[int, str] +reveal_type(y) # N: Revealed type is "Tuple[def (builtins.int), builtins.str, fallback=__main__.A[builtins.int, builtins.str]]" +reveal_type(y[0]) # N: Revealed type is "def (builtins.int)" +reveal_type(y.fn) # N: Revealed type is "def (builtins.int)" + +z: A[Unpack[Tuple[int, ...]]] +reveal_type(z) # N: Revealed type is "Tuple[def (*builtins.int), builtins.int, fallback=__main__.A[Unpack[builtins.tuple[builtins.int, ...]], builtins.int]]" +reveal_type(z.fn) # N: Revealed type is "def (*builtins.int)" + +t: A[int, Unpack[Tuple[int, str]], str] +reveal_type(t) # N: Revealed type is "Tuple[def (builtins.int, builtins.int, builtins.str), builtins.str, fallback=__main__.A[builtins.int, builtins.int, builtins.str, builtins.str]]" + +def test(x: int, y: str) -> None: ... +nt = A(fn=test, val=42) +reveal_type(nt) # N: Revealed type is "Tuple[def (builtins.int, builtins.str), builtins.int, fallback=__main__.A[builtins.int, builtins.str, builtins.int]]" + +def bad() -> int: ... +nt2 = A(fn=bad, val=42) # E: Argument "fn" to "A" has incompatible type "Callable[[], int]"; expected "Callable[[], None]" +[builtins fixtures/tuple.pyi] + +[case testVariadicTypedDict] +from typing import Tuple, Callable, Generic +from typing_extensions import TypeVarTuple, Unpack, TypedDict + +Ts = TypeVarTuple("Ts") +class A(TypedDict, Generic[Unpack[Ts], T]): + fn: Callable[[Unpack[Ts]], None] + val: T + +y: A[int, str] +reveal_type(y) # N: Revealed type is "TypedDict('__main__.A', {'fn': def (builtins.int), 'val': builtins.str})" +reveal_type(y["fn"]) # N: Revealed type is "def (builtins.int)" + +z: A[Unpack[Tuple[int, ...]]] +reveal_type(z) # N: Revealed type is "TypedDict('__main__.A', {'fn': def (*builtins.int), 'val': builtins.int})" +reveal_type(z["fn"]) # N: Revealed type is "def (*builtins.int)" + +t: A[int, Unpack[Tuple[int, str]], str] +reveal_type(t) # N: Revealed type is "TypedDict('__main__.A', {'fn': def (builtins.int, builtins.int, builtins.str), 'val': builtins.str})" + +def test(x: int, y: str) -> None: ... +td = A({"fn": test, "val": 42}) +reveal_type(td) # N: Revealed type is "TypedDict('__main__.A', {'fn': def (builtins.int, builtins.str), 'val': builtins.int})" + +def bad() -> int: ... +td2 = A({"fn": bad, "val": 42}) # E: Incompatible types (expression has type "Callable[[], int]", TypedDict item "fn" has type "Callable[[], None]") +[builtins fixtures/tuple.pyi] diff --git a/test-data/unit/check-typevar-unbound.test b/test-data/unit/check-typevar-unbound.test index d3e54c75e373..ed6beaa100db 100644 --- a/test-data/unit/check-typevar-unbound.test +++ b/test-data/unit/check-typevar-unbound.test @@ -15,8 +15,7 @@ def g() -> U: # E: A function returning TypeVar should receive at least one argu V = TypeVar('V', int, str) -# TODO: this should also give an error -def h() -> V: +def h() -> V: # E: A function returning TypeVar should receive at least one argument containing the same TypeVar ... [case testInnerFunctionTypeVar] diff --git a/test-data/unit/check-typevar-values.test b/test-data/unit/check-typevar-values.test index 83340c52b63b..effaf620f1f0 100644 --- a/test-data/unit/check-typevar-values.test +++ b/test-data/unit/check-typevar-values.test @@ -97,8 +97,8 @@ def f(x: AB) -> AB: from typing import TypeVar T = TypeVar('T', int, str) def f(x: T) -> T: - a = None # type: T - b = None # type: T + a: T + b: T if 1: a = x b = x @@ -248,10 +248,10 @@ def g(a: T) -> None: from typing import TypeVar, Generic, Any X = TypeVar('X', int, str) class A(Generic[X]): pass -a = None # type: A[int] -b = None # type: A[str] -d = None # type: A[object] # E: Value of type variable "X" of "A" cannot be "object" -c = None # type: A[Any] +a: A[int] +b: A[str] +d: A[object] # E: Value of type variable "X" of "A" cannot be "object" +c: A[Any] [case testConstructGenericTypeWithTypevarValuesAndTypeInference] from typing import TypeVar, Generic, Any, cast @@ -272,11 +272,11 @@ Z = TypeVar('Z') class D(Generic[X]): def __init__(self, x: X) -> None: pass def f(x: X) -> None: - a = None # type: D[X] + a: D[X] def g(x: Y) -> None: - a = None # type: D[Y] + a: D[Y] def h(x: Z) -> None: - a = None # type: D[Z] + a: D[Z] [out] main:11: error: Invalid type argument value for "D" main:13: error: Type variable "Z" not valid as type argument value for "D" @@ -287,7 +287,7 @@ X = TypeVar('X', int, str) class S(str): pass class C(Generic[X]): def __init__(self, x: X) -> None: pass -x = None # type: C[str] +x: C[str] y = C(S()) if int(): x = y @@ -412,10 +412,10 @@ class B: pass X = TypeVar('X', A, B) Y = TypeVar('Y', int, str) class C(Generic[X, Y]): pass -a = None # type: C[A, int] -b = None # type: C[B, str] -c = None # type: C[int, int] # E: Value of type variable "X" of "C" cannot be "int" -d = None # type: C[A, A] # E: Value of type variable "Y" of "C" cannot be "A" +a: C[A, int] +b: C[B, str] +c: C[int, int] # E: Value of type variable "X" of "C" cannot be "int" +d: C[A, A] # E: Value of type variable "Y" of "C" cannot be "A" [case testCallGenericFunctionUsingMultipleTypevarsWithValues] from typing import TypeVar @@ -512,7 +512,7 @@ class C(A[str]): from typing import TypeVar, Generic T = TypeVar('T', int, str) class C(Generic[T]): - def f(self, x: int = None) -> None: pass + def f(self, x: int = 2) -> None: pass [case testTypevarValuesWithOverloadedFunctionSpecialCase] from foo import * diff --git a/test-data/unit/check-underscores.test b/test-data/unit/check-underscores.test index ac9fad2ca792..2a789b3314f3 100644 --- a/test-data/unit/check-underscores.test +++ b/test-data/unit/check-underscores.test @@ -1,10 +1,4 @@ -[case testUnderscoresRequire36] -# flags: --python-version 3.5 -x = 1000_000 # E: Underscores in numeric literals are only supported in Python 3.6 and greater -[out] - [case testUnderscoresBasics] -# flags: --python-version 3.6 x: int x = 1000_000 x = 0x_FF_FF_FF_FF diff --git a/test-data/unit/check-union-or-syntax.test b/test-data/unit/check-union-or-syntax.test index 58526cfd0623..f342d0ca34a5 100644 --- a/test-data/unit/check-union-or-syntax.test +++ b/test-data/unit/check-union-or-syntax.test @@ -66,8 +66,8 @@ x: List[int | str] reveal_type(x) # N: Revealed type is "builtins.list[Union[builtins.int, builtins.str]]" [builtins fixtures/list.pyi] -[case testUnionOrSyntaxWithQuotedFunctionTypes] -# flags: --python-version 3.4 +[case testUnionOrSyntaxWithQuotedFunctionTypesPre310] +# flags: --python-version 3.9 from typing import Union def f(x: 'Union[int, str, None]') -> 'Union[int, None]': reveal_type(x) # N: Revealed type is "Union[builtins.int, builtins.str, None]" @@ -79,8 +79,8 @@ def g(x: "int | str | None") -> "int | None": return 42 reveal_type(g) # N: Revealed type is "def (x: Union[builtins.int, builtins.str, None]) -> Union[builtins.int, None]" -[case testUnionOrSyntaxWithQuotedVariableTypes] -# flags: --python-version 3.6 +[case testUnionOrSyntaxWithQuotedVariableTypesPre310] +# flags: --python-version 3.9 y: "int | str" = 42 reveal_type(y) # N: Revealed type is "Union[builtins.int, builtins.str]" @@ -124,7 +124,6 @@ cast(str | int, 'x') # E: Cast target is not a type [typing fixtures/typing-full.pyi] [case testUnionOrSyntaxInComment] -# flags: --python-version 3.6 x = 1 # type: int | str [case testUnionOrSyntaxFutureImport] @@ -138,7 +137,7 @@ x: int | None x: int | None # E: X | Y syntax for unions requires Python 3.10 [case testUnionOrSyntaxInStubFile] -# flags: --python-version 3.6 +# flags: --python-version 3.9 from lib import x [file lib.pyi] x: int | None diff --git a/test-data/unit/check-unions.test b/test-data/unit/check-unions.test index 65d5c1abc7e8..f6fd27e59e4d 100644 --- a/test-data/unit/check-unions.test +++ b/test-data/unit/check-unions.test @@ -55,12 +55,12 @@ class B: y = 2 class C: pass class D: pass -u = None # type: Union[A, C, D] -v = None # type: Union[C, D] -w = None # type: Union[A, B] -x = None # type: Union[A, C] -y = None # type: int -z = None # type: str +u: Union[A, C, D] +v: Union[C, D] +w: Union[A, B] +x: Union[A, C] +y: int +z: str if int(): y = w.y @@ -89,9 +89,9 @@ class B: class C: def foo(self) -> str: pass -x = None # type: Union[A, B] -y = None # type: Union[A, C] -i = None # type: int +x: Union[A, B] +y: Union[A, C] +i: int x.foo() y.foo() @@ -103,7 +103,7 @@ if int(): [case testUnionIndexing] from typing import Union, List -x = None # type: Union[List[int], str] +x: Union[List[int], str] x[2] x[2] + 1 # E: Unsupported operand types for + ("str" and "int") \ # N: Left operand is of type "Union[int, str]" @@ -132,6 +132,7 @@ def f(x: Union[int, str]) -> int: pass def f(x: type) -> str: pass [case testUnionWithNoneItem] +# flags: --no-strict-optional from typing import Union def f() -> Union[int, None]: pass x = 1 @@ -221,6 +222,7 @@ else: # N: def g(x: Union[int, str]) -> None [case testUnionSimplificationSpecialCases] +# flags: --no-strict-optional from typing import Any, TypeVar, Union class C(Any): pass @@ -266,9 +268,10 @@ class M(Generic[V]): def f(x: M[C]) -> None: y = x.get(None) - reveal_type(y) # N: Revealed type is "__main__.C" + reveal_type(y) # N: Revealed type is "Union[__main__.C, None]" [case testUnionSimplificationSpecialCases2] +# flags: --no-strict-optional from typing import Any, TypeVar, Union class C(Any): pass @@ -317,10 +320,10 @@ S = TypeVar('S') R = TypeVar('R') def u(x: T, y: S, z: R) -> Union[R, S, T]: pass -a = None # type: Any +a: Any reveal_type(u(1, 1, 1)) # N: Revealed type is "builtins.int" -reveal_type(u(C(), C(), None)) # N: Revealed type is "__main__.C" +reveal_type(u(C(), C(), None)) # N: Revealed type is "Union[None, __main__.C]" reveal_type(u(a, a, 1)) # N: Revealed type is "Union[builtins.int, Any]" reveal_type(u(a, C(), a)) # N: Revealed type is "Union[Any, __main__.C]" reveal_type(u('', 1, 1)) # N: Revealed type is "Union[builtins.int, builtins.str]" @@ -370,9 +373,9 @@ T = TypeVar('T') S = TypeVar('S') def u(x: T, y: S) -> Union[S, T]: pass -t_o = None # type: Type[object] -t_s = None # type: Type[str] -t_a = None # type: Type[Any] +t_o: Type[object] +t_s: Type[str] +t_a: Type[Any] # Two identical items reveal_type(u(t_o, t_o)) # N: Revealed type is "Type[builtins.object]" @@ -397,10 +400,10 @@ T = TypeVar('T') S = TypeVar('S') def u(x: T, y: S) -> Union[S, T]: pass -t_o = None # type: Type[object] -t_s = None # type: Type[str] -t_a = None # type: Type[Any] -t = None # type: type +t_o: Type[object] +t_s: Type[str] +t_a: Type[Any] +t: type # Union with object reveal_type(u(t_o, object())) # N: Revealed type is "builtins.object" @@ -926,7 +929,6 @@ reveal_type(z) # N: Revealed type is "Union[builtins.int, __main__.A, builtins.s [out] [case testUnpackUnionNoCrashOnPartialNone] -# flags: --strict-optional from typing import Dict, Tuple, List, Any a: Any @@ -941,7 +943,6 @@ if x: [out] [case testUnpackUnionNoCrashOnPartialNone2] -# flags: --strict-optional from typing import Dict, Tuple, List, Any a: Any @@ -957,7 +958,6 @@ if x: [out] [case testUnpackUnionNoCrashOnPartialNoneBinder] -# flags: --strict-optional from typing import Dict, Tuple, List, Any x: object @@ -972,7 +972,6 @@ if x: [out] [case testUnpackUnionNoCrashOnPartialList] -# flags: --strict-optional from typing import Dict, Tuple, List, Any a: Any @@ -1010,6 +1009,7 @@ MYTYPE = List[Union[str, "MYTYPE"]] # E: Cannot resolve name "MYTYPE" (possible [builtins fixtures/list.pyi] [case testNonStrictOptional] +# flags: --no-strict-optional from typing import Optional, List def union_test1(x): @@ -1077,7 +1077,6 @@ def bar(a: T4, b: T4) -> T4: # test multi-level alias [builtins fixtures/ops.pyi] [case testJoinUnionWithUnionAndAny] -# flags: --strict-optional from typing import TypeVar, Union, Any T = TypeVar("T") def f(x: T, y: T) -> T: diff --git a/test-data/unit/check-unreachable-code.test b/test-data/unit/check-unreachable-code.test index b2fd44043435..20b5dea9fc87 100644 --- a/test-data/unit/check-unreachable-code.test +++ b/test-data/unit/check-unreachable-code.test @@ -422,9 +422,9 @@ x = 1 [out] [case testCustomSysVersionInfo] -# flags: --python-version 3.5 +# flags: --python-version 3.11 import sys -if sys.version_info == (3, 5): +if sys.version_info == (3, 11): x = "foo" else: x = 3 @@ -433,7 +433,7 @@ reveal_type(x) # N: Revealed type is "builtins.str" [out] [case testCustomSysVersionInfo2] -# flags: --python-version 3.5 +# flags: --python-version 3.11 import sys if sys.version_info == (3, 6): x = "foo" @@ -615,7 +615,6 @@ reveal_type(x) # N: Revealed type is "__main__.B" [typing fixtures/typing-medium.pyi] [case testUnreachableWhenSuperclassIsAny] -# flags: --strict-optional from typing import Any # This can happen if we're importing a class from a missing module @@ -873,15 +872,15 @@ def expect_str(x: str) -> str: pass x: int if False: assert False - reveal_type(x) + reveal_type(x) # E: Statement is unreachable if False: raise Exception() - reveal_type(x) + reveal_type(x) # E: Statement is unreachable if False: assert_never(x) - reveal_type(x) + reveal_type(x) # E: Statement is unreachable if False: nonthrowing_assert_never(x) # E: Statement is unreachable @@ -890,7 +889,7 @@ if False: if False: # Ignore obvious type errors assert_never(expect_str(x)) - reveal_type(x) + reveal_type(x) # E: Statement is unreachable [builtins fixtures/exception.pyi] [case testNeverVariants] @@ -1380,6 +1379,38 @@ def f() -> None: x = 1 # E: Statement is unreachable [builtins fixtures/dict.pyi] +[case testUnreachableLiteralFrom__bool__] +# flags: --warn-unreachable +from typing_extensions import Literal + +class Truth: + def __bool__(self) -> Literal[True]: ... + +class Lie: + def __bool__(self) -> Literal[False]: ... + +class Maybe: + def __bool__(self) -> Literal[True | False]: ... + +t = Truth() +if t: + x = 1 +else: + x = 2 # E: Statement is unreachable + +if Lie(): + x = 3 # E: Statement is unreachable + +if Maybe(): + x = 4 + + +def foo() -> bool: ... + +y = Truth() or foo() # E: Right operand of "or" is never evaluated +z = Lie() and foo() # E: Right operand of "and" is never evaluated +[builtins fixtures/dict.pyi] + [case testUnreachableModuleBody1] # flags: --warn-unreachable from typing import NoReturn @@ -1447,3 +1478,19 @@ def f() -> None: Foo()['a'] = 'a' x = 0 # This should not be reported as unreachable [builtins fixtures/exception.pyi] + +[case testIntentionallyEmptyGeneratorFunction] +# flags: --warn-unreachable +from typing import Generator + +def f() -> Generator[None, None, None]: + return + yield + +[case testIntentionallyEmptyGeneratorFunction_None] +# flags: --warn-unreachable +from typing import Generator + +def f() -> Generator[None, None, None]: + return None + yield None diff --git a/test-data/unit/check-varargs.test b/test-data/unit/check-varargs.test index 92b9f7f04f26..fe09fb43c97c 100644 --- a/test-data/unit/check-varargs.test +++ b/test-data/unit/check-varargs.test @@ -8,8 +8,8 @@ [case testVarArgsWithinFunction] from typing import Tuple def f( *b: 'B') -> None: - ab = None # type: Tuple[B, ...] - ac = None # type: Tuple[C, ...] + ab: Tuple[B, ...] + ac: Tuple[C, ...] if int(): b = ac # E: Incompatible types in assignment (expression has type "Tuple[C, ...]", variable has type "Tuple[B, ...]") ac = b # E: Incompatible types in assignment (expression has type "Tuple[B, ...]", variable has type "Tuple[C, ...]") @@ -46,9 +46,9 @@ class A: pass class B(A): pass class C: pass -a = None # type: A -b = None # type: B -c = None # type: C +a: A +b: B +c: C f(c) # E: Argument 1 to "f" has incompatible type "C"; expected "A" f(a, b, c) # E: Argument 3 to "f" has incompatible type "C"; expected "A" @@ -67,9 +67,9 @@ class A: pass class B(A): pass class C: pass -a = None # type: A -b = None # type: B -c = None # type: C +a: A +b: B +c: C f(a) # E: Argument 1 to "f" has incompatible type "A"; expected "C" f(c, c) # E: Argument 2 to "f" has incompatible type "C"; expected "A" @@ -88,9 +88,9 @@ class A: pass class B(A): pass class C: pass -a = None # type: A -b = None # type: B -c = None # type: C +a: A +b: B +c: C f(a) # E: Argument 1 to "f" has incompatible type "A"; expected "Optional[C]" f(c, c) # E: Argument 2 to "f" has incompatible type "C"; expected "A" @@ -103,8 +103,8 @@ f(c, b, b, a, b) [case testCallVarargsFunctionWithIterable] from typing import Iterable -it1 = None # type: Iterable[int] -it2 = None # type: Iterable[str] +it1: Iterable[int] +it2: Iterable[str] def f(*x: int) -> None: pass f(*it1) f(*it2) # E: Argument 1 to "f" has incompatible type "*Iterable[str]"; expected "int" @@ -127,7 +127,7 @@ reveal_type(f(*x, *y)) # N: Revealed type is "Tuple[builtins.int, builtins.str, [case testCallVarargsFunctionWithIterableAndPositional] from typing import Iterable -it1 = None # type: Iterable[int] +it1: Iterable[int] def f(*x: int) -> None: pass f(*it1, 1, 2) f(*it1, 1, *it1, 2) @@ -161,10 +161,10 @@ class A: pass class B(A): pass class C: pass -a = None # type: A -b = None # type: B -c = None # type: C -o = None # type: object +a: A +b: B +c: C +o: object if int(): a = f(o) # E: Incompatible types in assignment (expression has type "object", variable has type "A") @@ -188,6 +188,7 @@ if int(): [builtins fixtures/list.pyi] [case testTypeInferenceWithCalleeVarArgsAndDefaultArgs] +# flags: --no-strict-optional from typing import TypeVar T = TypeVar('T') a = None # type: A @@ -229,10 +230,10 @@ def f(a: 'A', b: 'B') -> None: class A: pass class B: pass -aa = None # type: List[A] -ab = None # type: List[B] -a = None # type: A -b = None # type: B +aa: List[A] +ab: List[B] +a: A +b: B f(*aa) # E: Argument 1 to "f" has incompatible type "*List[A]"; expected "B" f(a, *ab) # Ok @@ -248,10 +249,10 @@ class B: pass class C: pass class CC(C): pass -a = None # type: A -b = None # type: B -c = None # type: C -cc = None # type: CC +a: A +b: B +c: C +cc: CC f(*(a, b, b)) # E: Argument 1 to "f" has incompatible type "*Tuple[A, B, B]"; expected "C" f(*(b, b, c)) # E: Argument 1 to "f" has incompatible type "*Tuple[B, B, C]"; expected "A" @@ -267,7 +268,7 @@ f(a, *(b, cc)) [builtins fixtures/tuple.pyi] [case testInvalidVarArg] - +# flags: --no-strict-optional def f(a: 'A') -> None: pass @@ -293,7 +294,10 @@ def g(a: 'A', *b: 'A') -> None: pass class A: pass class B: pass -aa, ab, a, b = None, None, None, None # type: (List[A], List[B], A, B) +aa: List[A] +ab: List[B] +a: A +b: B f(*aa) # E: Argument 1 to "f" has incompatible type "*List[A]"; expected "B" f(a, *aa) # E: Argument 2 to "f" has incompatible type "*List[A]"; expected "B" f(b, *ab) # E: Argument 1 to "f" has incompatible type "B"; expected "A" @@ -315,7 +319,10 @@ class B: pass class C: pass class CC(C): pass -a, b, c, cc = None, None, None, None # type: (A, B, C, CC) +a: A +b: B +c: C +cc: CC f(*(b, b, b)) # E: Argument 1 to "f" has incompatible type "*Tuple[B, B, B]"; expected "A" f(*(a, a, b)) # E: Argument 1 to "f" has incompatible type "*Tuple[A, A, B]"; expected "B" @@ -342,7 +349,8 @@ def f(a: 'A') -> None: pass def g(a: 'A', *b: 'A') -> None: pass class A: pass -d, a = None, None # type: (Any, A) +d: Any +a: A f(a, a, *d) # E: Too many arguments for "f" f(a, *d) # Ok f(*d) # Ok @@ -351,6 +359,7 @@ g(*d) g(a, *d) g(a, a, *d) [builtins fixtures/list.pyi] + [case testListVarArgsAndSubtyping] from typing import List def f( *a: 'A') -> None: @@ -362,8 +371,8 @@ def g( *a: 'B') -> None: class A: pass class B(A): pass -aa = None # type: List[A] -ab = None # type: List[B] +aa: List[A] +ab: List[B] g(*aa) # E: Argument 1 to "g" has incompatible type "*List[A]"; expected "B" f(*aa) @@ -449,6 +458,7 @@ foo(*()) [case testIntersectionTypesAndVarArgs] +# flags: --no-strict-optional from foo import * [file foo.pyi] from typing import overload @@ -518,7 +528,9 @@ def f(a: S, *b: T) -> Tuple[S, T]: class A: pass class B: pass -a, b, aa = None, None, None # type: (A, B, List[A]) +a: A +b: B +aa: List[A] if int(): a, b = f(*aa) # E: Argument 1 to "f" has incompatible type "*List[A]"; expected "B" @@ -553,7 +565,8 @@ def f(a: S, b: T) -> Tuple[S, T]: pass class A: pass class B: pass -a, b = None, None # type: (A, B) +a: A +b: B if int(): a, a = f(*(a, b)) # E: Argument 1 to "f" has incompatible type "*Tuple[A, B]"; expected "A" @@ -575,10 +588,11 @@ if int(): from typing import List, TypeVar, Generic, Tuple T = TypeVar('T') S = TypeVar('S') -a, b = None, None # type: (A, B) -ao = None # type: List[object] -aa = None # type: List[A] -ab = None # type: List[B] +a: A +b: B +ao: List[object] +aa: List[A] +ab: List[B] class G(Generic[T]): def f(self, *a: S) -> Tuple[List[S], List[T]]: @@ -616,7 +630,6 @@ if int(): [builtins fixtures/list.pyi] [case testCallerTupleVarArgsAndGenericCalleeVarArg] -# flags: --strict-optional from typing import TypeVar T = TypeVar('T') @@ -649,7 +662,7 @@ f(1, '') # E: Argument 2 to "f" has incompatible type "str"; expected "int" [case testVarArgsFunctionSubtyping] from typing import Callable -x = None # type: Callable[[int], None] +x: Callable[[int], None] def f(*x: int) -> None: pass def g(*x: str) -> None: pass x = f @@ -762,7 +775,7 @@ class Person(TypedDict): name: str age: int -def foo(x: Unpack[Person]) -> None: # E: "Person" cannot be unpacked (must be tuple or TypeVarTuple) +def foo(x: Unpack[Person]) -> None: # E: Unpack is only valid in a variadic position ... def bar(x: int, *args: Unpack[Person]) -> None: # E: "Person" cannot be unpacked (must be tuple or TypeVarTuple) ... diff --git a/test-data/unit/check-warnings.test b/test-data/unit/check-warnings.test index 10c7968be475..90f40777d6b7 100644 --- a/test-data/unit/check-warnings.test +++ b/test-data/unit/check-warnings.test @@ -207,7 +207,7 @@ def f() -> Any: return g() [out] [case testOKReturnAnyIfProperSubtype] -# flags: --warn-return-any --strict-optional +# flags: --warn-return-any from typing import Any, Optional class Test(object): diff --git a/test-data/unit/cmdline.test b/test-data/unit/cmdline.test index c2e98cdb74f9..42f0ee8a9ec6 100644 --- a/test-data/unit/cmdline.test +++ b/test-data/unit/cmdline.test @@ -296,7 +296,7 @@ mypy.ini: [mypy]: ignore_missing_imports: Not a boolean: nah [file mypy.ini] \[mypy] \[mypy-*] -python_version = 3.4 +python_version = 3.11 [out] mypy.ini: [mypy-*]: Per-module sections should only specify per-module flags (python_version) == Return code: 0 @@ -592,7 +592,7 @@ main.py:1: error: Cannot find implementation or library stub for module named "a \[tool.mypy] python_version = 3.10 [out] -pyproject.toml: [mypy]: python_version: Python 3.1 is not supported (must be 3.4 or higher). You may need to put quotes around your Python version +pyproject.toml: [mypy]: python_version: Python 3.1 is not supported (must be 3.7 or higher). You may need to put quotes around your Python version == Return code: 0 [case testPythonVersionTooOld10] @@ -604,13 +604,13 @@ python_version = 1.0 mypy.ini: [mypy]: python_version: Python major version '1' out of range (must be 3) == Return code: 0 -[case testPythonVersionTooOld33] +[case testPythonVersionTooOld36] # cmd: mypy -c pass [file mypy.ini] \[mypy] -python_version = 3.3 +python_version = 3.6 [out] -mypy.ini: [mypy]: python_version: Python 3.3 is not supported (must be 3.4 or higher) +mypy.ini: [mypy]: python_version: Python 3.6 is not supported (must be 3.7 or higher) == Return code: 0 [case testPythonVersionTooNew40] @@ -633,18 +633,18 @@ usage: mypy [-h] [-v] [-V] [more options; see below] mypy: error: Mypy no longer supports checking Python 2 code. Consider pinning to mypy<0.980 if you need to check Python 2 code. == Return code: 2 -[case testPythonVersionAccepted34] +[case testPythonVersionAccepted37] # cmd: mypy -c pass [file mypy.ini] \[mypy] -python_version = 3.4 +python_version = 3.7 [out] -[case testPythonVersionAccepted36] +[case testPythonVersionAccepted311] # cmd: mypy -c pass [file mypy.ini] \[mypy] -python_version = 3.6 +python_version = 3.11 [out] -- This should be a dumping ground for tests of plugins that are sensitive to @@ -676,11 +676,11 @@ int_pow.py:10: note: Revealed type is "builtins.int" int_pow.py:11: note: Revealed type is "Any" == Return code: 0 -[case testDisallowAnyGenericsBuiltinCollections] +[case testDisallowAnyGenericsBuiltinCollectionsPre39] # cmd: mypy m.py [file mypy.ini] \[mypy] -python_version=3.6 +python_version = 3.8 \[mypy-m] disallow_any_generics = True @@ -1122,19 +1122,6 @@ class AnotherCustomClassDefinedBelow: def another_even_more_interesting_method(self, arg: Union[int, str, float]) -> None: self.very_important_attribute_with_long_name: OneCustomClassName = OneCustomClassName().some_interesting_method(arg) [out] -some_file.py:3: error: Unsupported operand types for + ("int" and "str") - 42 + 'no way' - ^ -some_file.py:11: error: Incompatible types in assignment (expression has type -"AnotherCustomClassDefinedBelow", variable has type "OneCustomClassName") - ...t_attribute_with_long_name: OneCustomClassName = OneCustomClassName().... - ^ -some_file.py:11: error: Argument 1 to "some_interesting_method" of -"OneCustomClassName" has incompatible type "Union[int, str, float]"; expected -"AnotherCustomClassDefinedBelow" - ...OneCustomClassName = OneCustomClassName().some_interesting_method(arg) - ^ -[out version>=3.8] some_file.py:3: error: Unsupported operand types for + ("int" and "str") 42 + 'no way' ^~~~~~~~ @@ -1165,14 +1152,6 @@ def test_tabs() -> str: def test_between(x: str) -> None: ... test_between(1 + 1) [out] -tabs.py:2: error: Incompatible return value type (got "None", expected "str") - return None - ^ -tabs.py:4: error: Argument 1 to "test_between" has incompatible type "int"; -expected "str" - test_between(1 + 1) - ^ -[out version>=3.8] tabs.py:2: error: Incompatible return value type (got "None", expected "str") return None ^~~~ @@ -1484,6 +1463,10 @@ note: A user-defined top-level module with name "typing" is not supported [file dir/stdlib/types.pyi] [file dir/stdlib/typing.pyi] [file dir/stdlib/typing_extensions.pyi] +[file dir/stdlib/_typeshed.pyi] +[file dir/stdlib/_collections_abc.pyi] +[file dir/stdlib/collections/abc.pyi] +[file dir/stdlib/collections/__init__.pyi] [file dir/stdlib/VERSIONS] [out] Failed to find builtin module mypy_extensions, perhaps typeshed is broken? @@ -1523,6 +1506,10 @@ class dict: pass [file dir/stdlib/typing.pyi] [file dir/stdlib/mypy_extensions.pyi] [file dir/stdlib/typing_extensions.pyi] +[file dir/stdlib/_typeshed.pyi] +[file dir/stdlib/_collections_abc.pyi] +[file dir/stdlib/collections/abc.pyi] +[file dir/stdlib/collections/__init__.pyi] [file dir/stdlib/foo.pyi] 1() # Errors are reported if the file was explicitly passed on the command line [file dir/stdlib/VERSIONS] diff --git a/test-data/unit/daemon.test b/test-data/unit/daemon.test index c60068a44bec..18a03a92207d 100644 --- a/test-data/unit/daemon.test +++ b/test-data/unit/daemon.test @@ -28,6 +28,33 @@ Daemon stopped [file foo.py] def f(): pass +[case testDaemonRunIgnoreMissingImports] +$ dmypy run -- foo.py --follow-imports=error --ignore-missing-imports +Daemon started +Success: no issues found in 1 source file +$ dmypy stop +Daemon stopped +[file foo.py] +def f(): pass + +[case testDaemonRunErrorCodes] +$ dmypy run -- foo.py --follow-imports=error --disable-error-code=type-abstract +Daemon started +Success: no issues found in 1 source file +$ dmypy stop +Daemon stopped +[file foo.py] +def f(): pass + +[case testDaemonRunCombinedOptions] +$ dmypy run -- foo.py --follow-imports=error --ignore-missing-imports --disable-error-code=type-abstract +Daemon started +Success: no issues found in 1 source file +$ dmypy stop +Daemon stopped +[file foo.py] +def f(): pass + [case testDaemonIgnoreConfigFiles] $ dmypy start -- --follow-imports=error Daemon started @@ -35,6 +62,28 @@ Daemon started \[mypy] files = ./foo.py +[case testDaemonRunMultipleStrict] +$ dmypy run -- foo.py --strict --follow-imports=error +Daemon started +foo.py:1: error: Function is missing a return type annotation +foo.py:1: note: Use "-> None" if function does not return a value +Found 1 error in 1 file (checked 1 source file) +== Return code: 1 +$ dmypy run -- bar.py --strict --follow-imports=error +bar.py:1: error: Function is missing a return type annotation +bar.py:1: note: Use "-> None" if function does not return a value +Found 1 error in 1 file (checked 1 source file) +== Return code: 1 +$ dmypy run -- foo.py --strict --follow-imports=error +foo.py:1: error: Function is missing a return type annotation +foo.py:1: note: Use "-> None" if function does not return a value +Found 1 error in 1 file (checked 1 source file) +== Return code: 1 +[file foo.py] +def f(): pass +[file bar.py] +def f(): pass + [case testDaemonRunRestart] $ dmypy run -- foo.py --follow-imports=error Daemon started @@ -110,18 +159,18 @@ def plugin(version): return Dummy [case testDaemonRunRestartGlobs] -- Ensure dmypy is not restarted if the configuration doesn't change and it contains globs -- Note: Backslash path separator in output is replaced with forward slash so the same test succeeds on Windows as well -$ dmypy run -- foo --follow-imports=error --python-version=3.6 +$ dmypy run -- foo --follow-imports=error Daemon started foo/lol.py:1: error: Name "fail" is not defined Found 1 error in 1 file (checked 3 source files) == Return code: 1 -$ dmypy run -- foo --follow-imports=error --python-version=3.6 +$ dmypy run -- foo --follow-imports=error foo/lol.py:1: error: Name "fail" is not defined Found 1 error in 1 file (checked 3 source files) == Return code: 1 $ {python} -c "print('[mypy]')" >mypy.ini $ {python} -c "print('ignore_errors=True')" >>mypy.ini -$ dmypy run -- foo --follow-imports=error --python-version=3.6 +$ dmypy run -- foo --follow-imports=error Restarting: configuration changed Daemon stopped Daemon started @@ -215,7 +264,7 @@ $ dmypy stop Daemon stopped [case testDaemonWarningSuccessExitCode-posix] -$ dmypy run -- foo.py --follow-imports=error +$ dmypy run -- foo.py --follow-imports=error --python-version=3.11 Daemon started foo.py:2: note: By default the bodies of untyped functions are not checked, consider using --check-untyped-defs Success: no issues found in 1 source file @@ -233,13 +282,13 @@ def foo(): [case testDaemonQuickstart] $ {python} -c "print('x=1')" >foo.py $ {python} -c "print('x=1')" >bar.py -$ mypy --local-partial-types --cache-fine-grained --follow-imports=error --no-sqlite-cache --python-version=3.6 -- foo.py bar.py +$ mypy --local-partial-types --cache-fine-grained --follow-imports=error --no-sqlite-cache --python-version=3.11 -- foo.py bar.py Success: no issues found in 2 source files -$ {python} -c "import shutil; shutil.copy('.mypy_cache/3.6/bar.meta.json', 'asdf.json')" +$ {python} -c "import shutil; shutil.copy('.mypy_cache/3.11/bar.meta.json', 'asdf.json')" -- update bar's timestamp but don't change the file $ {python} -c "import time;time.sleep(1)" $ {python} -c "print('x=1')" >bar.py -$ dmypy run -- foo.py bar.py --follow-imports=error --use-fine-grained-cache --no-sqlite-cache --python-version=3.6 +$ dmypy run -- foo.py bar.py --follow-imports=error --use-fine-grained-cache --no-sqlite-cache --python-version=3.11 Daemon started Success: no issues found in 2 source files $ dmypy status --fswatcher-dump-file test.json @@ -247,11 +296,11 @@ Daemon is up and running $ dmypy stop Daemon stopped -- copy the original bar cache file back so that the mtime mismatches -$ {python} -c "import shutil; shutil.copy('asdf.json', '.mypy_cache/3.6/bar.meta.json')" +$ {python} -c "import shutil; shutil.copy('asdf.json', '.mypy_cache/3.11/bar.meta.json')" -- sleep guarantees timestamp changes $ {python} -c "import time;time.sleep(1)" $ {python} -c "print('lol')" >foo.py -$ dmypy run --log-file=log -- foo.py bar.py --follow-imports=error --use-fine-grained-cache --no-sqlite-cache --python-version=3.6 --quickstart-file test.json +$ dmypy run --log-file=log -- foo.py bar.py --follow-imports=error --use-fine-grained-cache --no-sqlite-cache --python-version=3.11 --quickstart-file test.json Daemon started foo.py:1: error: Name "lol" is not defined Found 1 error in 1 file (checked 2 source files) @@ -260,7 +309,7 @@ Found 1 error in 1 file (checked 2 source files) $ {python} -c "import sys; sys.stdout.write(open('log').read())" -- make sure the meta file didn't get updated. we use this as an imperfect proxy for -- whether the source file got rehashed, which we don't want it to have been. -$ {python} -c "x = open('.mypy_cache/3.6/bar.meta.json').read(); y = open('asdf.json').read(); assert x == y" +$ {python} -c "x = open('.mypy_cache/3.11/bar.meta.json').read(); y = open('asdf.json').read(); assert x == y" [case testDaemonSuggest] $ dmypy start --log-file log.txt -- --follow-imports=error --no-error-summary @@ -311,7 +360,7 @@ def bar() -> None: x = foo('abc') # type: str foo(arg='xyz') -[case testDaemonGetType_python38] +[case testDaemonGetType] $ dmypy start --log-file log.txt -- --follow-imports=error --no-error-summary --python-version 3.8 Daemon started $ dmypy inspect foo:1:2:3:4 @@ -374,7 +423,7 @@ def unreachable(x: int) -> None: return x # line 17 -[case testDaemonGetTypeInexact_python38] +[case testDaemonGetTypeInexact] $ dmypy start --log-file log.txt -- --follow-imports=error --no-error-summary Daemon started $ dmypy check foo.py --export-types @@ -429,7 +478,7 @@ def unreachable(x: int, y: int) -> None: return x and y # line 11 -[case testDaemonGetAttrs_python38] +[case testDaemonGetAttrs] $ dmypy start --log-file log.txt -- --follow-imports=error --no-error-summary Daemon started $ dmypy check foo.py bar.py --export-types @@ -471,7 +520,7 @@ class B: var: Union[A, B] var # line 10 -[case testDaemonGetDefinition_python38] +[case testDaemonGetDefinition] $ dmypy start --log-file log.txt -- --follow-imports=error --no-error-summary Daemon started $ dmypy check foo.py bar/baz.py bar/__init__.py --export-types diff --git a/test-data/unit/deps-generics.test b/test-data/unit/deps-generics.test index c78f3fad90c0..6baa57266d2f 100644 --- a/test-data/unit/deps-generics.test +++ b/test-data/unit/deps-generics.test @@ -159,7 +159,7 @@ class D: pass T = TypeVar('T', A, B) S = TypeVar('S', C, D) -def f(x: T) -> S: +def f(x: T, y: S) -> S: pass [out] -> , , m, m.A, m.f diff --git a/test-data/unit/deps.test b/test-data/unit/deps.test index 28d51f1a4c30..c3295b79e4ed 100644 --- a/test-data/unit/deps.test +++ b/test-data/unit/deps.test @@ -612,7 +612,6 @@ class A: -> , m.A.f, m.C [case testPartialNoneTypeAttributeCrash2] -# flags: --strict-optional class C: pass class A: @@ -1388,6 +1387,7 @@ class B(A): -> , m -> -> , m.B.__init__ + -> , m.B.__mypy-replace -> -> -> @@ -1419,6 +1419,7 @@ class B(A): -> -> , m.B.__init__ -> + -> , m.B.__mypy-replace -> -> -> diff --git a/test-data/unit/fine-grained-blockers.test b/test-data/unit/fine-grained-blockers.test index a134fb1d4301..33dedd887114 100644 --- a/test-data/unit/fine-grained-blockers.test +++ b/test-data/unit/fine-grained-blockers.test @@ -48,16 +48,6 @@ a.py:1: error: invalid syntax [syntax] def f(x: int) -> ^ == -main:3: error: Missing positional argument "x" in call to "f" [call-arg] - a.f() - ^ -== -[out version>=3.8] -== -a.py:1: error: invalid syntax [syntax] - def f(x: int) -> - ^ -== main:3: error: Missing positional argument "x" in call to "f" [call-arg] a.f() ^~~~~ diff --git a/test-data/unit/fine-grained-cache-incremental.test b/test-data/unit/fine-grained-cache-incremental.test index 50f93dd35af3..00157333efd7 100644 --- a/test-data/unit/fine-grained-cache-incremental.test +++ b/test-data/unit/fine-grained-cache-incremental.test @@ -202,7 +202,7 @@ a.py:8: note: x: expected "int", got "str" [file b.py] -- This is a heinous hack, but we simulate having a invalid cache by clobbering -- the proto deps file with something with mtime mismatches. -[file ../.mypy_cache/3.7/@deps.meta.json.2] +[file ../.mypy_cache/3.8/@deps.meta.json.2] {"snapshot": {"__main__": "a7c958b001a45bd6a2a320f4e53c4c16", "a": "d41d8cd98f00b204e9800998ecf8427e", "b": "d41d8cd98f00b204e9800998ecf8427e", "builtins": "c532c89da517a4b779bcf7a964478d67"}, "deps_meta": {"@root": {"path": "@root.deps.json", "mtime": 0}, "__main__": {"path": "__main__.deps.json", "mtime": 0}, "a": {"path": "a.deps.json", "mtime": 0}, "b": {"path": "b.deps.json", "mtime": 0}, "builtins": {"path": "builtins.deps.json", "mtime": 0}}} [file b.py.2] @@ -234,8 +234,8 @@ x = 10 [file p/c.py] class C: pass -[delete ../.mypy_cache/3.7/b.meta.json.2] -[delete ../.mypy_cache/3.7/p/c.meta.json.2] +[delete ../.mypy_cache/3.8/b.meta.json.2] +[delete ../.mypy_cache/3.8/p/c.meta.json.2] [out] == diff --git a/test-data/unit/fine-grained-dataclass.test b/test-data/unit/fine-grained-dataclass.test new file mode 100644 index 000000000000..036d858ddf69 --- /dev/null +++ b/test-data/unit/fine-grained-dataclass.test @@ -0,0 +1,25 @@ +[case testReplace] +[file model.py] +from dataclasses import dataclass + +@dataclass +class Model: + x: int = 0 +[file replace.py] +from dataclasses import replace +from model import Model + +m = Model() +replace(m, x=42) + +[file model.py.2] +from dataclasses import dataclass + +@dataclass +class Model: + x: str = 'hello' + +[builtins fixtures/dataclasses.pyi] +[out] +== +replace.py:5: error: Argument "x" to "replace" of "Model" has incompatible type "int"; expected "str" diff --git a/test-data/unit/fine-grained-suggest.test b/test-data/unit/fine-grained-suggest.test index a28e3204b93f..02373091ad54 100644 --- a/test-data/unit/fine-grained-suggest.test +++ b/test-data/unit/fine-grained-suggest.test @@ -62,7 +62,6 @@ foo('3', '4') == [case testSuggestInferFunc1] -# flags: --strict-optional # suggest: foo.foo [file foo.py] def foo(arg, lol=None): @@ -85,7 +84,6 @@ def untyped(x) -> None: == [case testSuggestInferFunc2] -# flags: --strict-optional # suggest: foo.foo [file foo.py] def foo(arg): @@ -222,7 +220,6 @@ Foo('lol') == [case testSuggestInferMethod1] -# flags: --strict-optional # suggest: --no-any foo.Foo.foo [file foo.py] class Foo: @@ -248,7 +245,6 @@ def bar() -> None: == [case testSuggestInferMethod2] -# flags: --strict-optional # suggest: foo.Foo.foo [file foo.py] class Foo: @@ -275,7 +271,6 @@ def bar() -> None: == [case testSuggestInferMethod3] -# flags: --strict-optional # suggest2: foo.Foo.foo [file foo.py] class Foo: @@ -372,7 +367,6 @@ def has_nested(x): == [case testSuggestInferFunctionUnreachable] -# flags: --strict-optional # suggest: foo.foo [file foo.py] import sys @@ -390,7 +384,6 @@ foo('test') == [case testSuggestInferMethodStep2] -# flags: --strict-optional # suggest2: foo.Foo.foo [file foo.py] class Foo: @@ -417,7 +410,6 @@ def bar() -> None: (Union[str, int, None], Optional[int]) -> Union[int, str] [case testSuggestInferNestedMethod] -# flags: --strict-optional # suggest: foo.Foo.Bar.baz [file foo.py] class Foo: @@ -435,7 +427,6 @@ def bar() -> None: == [case testSuggestCallable] -# flags: --strict-optional # suggest: foo.foo # suggest: foo.bar # suggest: --flex-any=0.9 foo.bar @@ -483,7 +474,6 @@ No guesses that match criteria! == [case testSuggestNewSemanal] -# flags: --strict-optional # suggest: foo.Foo.foo # suggest: foo.foo [file foo.py] @@ -521,7 +511,6 @@ def baz() -> None: == [case testSuggestInferFuncDecorator1] -# flags: --strict-optional # suggest: foo.foo [file foo.py] from typing import TypeVar @@ -543,7 +532,6 @@ def bar() -> None: == [case testSuggestInferFuncDecorator2] -# flags: --strict-optional # suggest: foo.foo [file foo.py] from typing import TypeVar, Callable, Any @@ -565,7 +553,6 @@ def bar() -> None: == [case testSuggestInferFuncDecorator3] -# flags: --strict-optional # suggest: foo.foo [file foo.py] from typing import TypeVar, Callable, Any @@ -589,7 +576,6 @@ def bar() -> None: == [case testSuggestInferFuncDecorator4] -# flags: --strict-optional # suggest: foo.foo [file dec.py] from typing import TypeVar, Callable, Any @@ -616,7 +602,6 @@ def bar() -> None: == [case testSuggestFlexAny1] -# flags: --strict-optional # suggest: --flex-any=0.4 m.foo # suggest: --flex-any=0.7 m.foo # suggest: --flex-any=0.4 m.bar @@ -661,7 +646,6 @@ No guesses that match criteria! [case testSuggestFlexAny2] -# flags: --strict-optional # suggest: --flex-any=0.5 m.baz # suggest: --flex-any=0.0 m.baz # suggest: --flex-any=0.5 m.F.foo @@ -693,7 +677,6 @@ No guesses that match criteria! == [case testSuggestClassMethod] -# flags: --strict-optional # suggest: foo.F.bar # suggest: foo.F.baz # suggest: foo.F.eggs @@ -1090,7 +1073,7 @@ optional2(10) optional2('test') def optional3(x: Optional[List[Any]]): - assert not x + assert x return x[0] optional3(test) diff --git a/test-data/unit/fine-grained.test b/test-data/unit/fine-grained.test index 88a11be31f34..68f72a2aa992 100644 --- a/test-data/unit/fine-grained.test +++ b/test-data/unit/fine-grained.test @@ -95,11 +95,6 @@ class A: def g(self, a: A) -> None: pass [out] == -main:5: error: Missing positional argument "a" in call to "g" of "A" [call-arg] - a.g() # E - ^ -[out version>=3.8] -== main:5: error: Missing positional argument "a" in call to "g" of "A" [call-arg] a.g() # E ^~~~~ @@ -196,7 +191,7 @@ main:3: error: "A" has no attribute "x" [case testVariableTypeBecomesInvalid] import m def f() -> None: - a = None # type: m.A + a: m.A [file m.py] class A: pass [file m.py.2] @@ -1729,15 +1724,15 @@ f = 1 main:1: error: Module "a" has no attribute "f" [case testDecoratedMethodRefresh] -from typing import Iterator, Callable, List +from typing import Iterator, Callable, List, Optional from a import f import a -def dec(f: Callable[['A'], Iterator[int]]) -> Callable[[int], int]: pass +def dec(f: Callable[['A'], Optional[Iterator[int]]]) -> Callable[[int], int]: pass class A: @dec - def f(self) -> Iterator[int]: + def f(self) -> Optional[Iterator[int]]: self.x = a.g() # type: int return None [builtins fixtures/list.pyi] @@ -2088,7 +2083,6 @@ a.py:5: error: "list" expects 1 type argument, but 2 given == [case testPreviousErrorInOverloadedFunction] -# flags: --strict-optional import a [file a.py] from typing import overload @@ -3499,7 +3493,6 @@ def foo() -> None: b.py:4: error: Incompatible types in assignment (expression has type "str", variable has type "int") [case testNamedTupleUpdateNonRecursiveToRecursiveFine] -# flags: --strict-optional import c [file a.py] from b import M @@ -3542,7 +3535,6 @@ c.py:5: error: Incompatible types in assignment (expression has type "Optional[N c.py:7: note: Revealed type is "Tuple[Union[Tuple[Union[..., None], builtins.int, fallback=b.M], None], builtins.int, fallback=a.N]" [case testTupleTypeUpdateNonRecursiveToRecursiveFine] -# flags: --strict-optional import c [file a.py] from b import M @@ -3575,7 +3567,6 @@ c.py:4: note: Revealed type is "Tuple[Union[Tuple[Union[..., None], builtins.int c.py:5: error: Incompatible types in assignment (expression has type "Optional[N]", variable has type "int") [case testTypeAliasUpdateNonRecursiveToRecursiveFine] -# flags: --strict-optional import c [file a.py] from b import M @@ -4631,6 +4622,7 @@ class User: == [case testNoStrictOptionalModule] +# flags: --no-strict-optional import a a.y = a.x [file a.py] @@ -4648,9 +4640,10 @@ y: int [out] == == -main:2: error: Incompatible types in assignment (expression has type "Optional[str]", variable has type "int") +main:3: error: Incompatible types in assignment (expression has type "Optional[str]", variable has type "int") [case testNoStrictOptionalFunction] +# flags: --no-strict-optional import a from typing import Optional def f() -> None: @@ -4671,9 +4664,10 @@ def g(x: str) -> None: [out] == == -main:5: error: Argument 1 to "g" has incompatible type "Optional[int]"; expected "str" +main:6: error: Argument 1 to "g" has incompatible type "Optional[int]"; expected "str" [case testNoStrictOptionalMethod] +# flags: --no-strict-optional import a from typing import Optional class C: @@ -4698,10 +4692,9 @@ class B: [out] == == -main:6: error: Argument 1 to "g" of "B" has incompatible type "Optional[int]"; expected "str" +main:7: error: Argument 1 to "g" of "B" has incompatible type "Optional[int]"; expected "str" [case testStrictOptionalModule] -# flags: --strict-optional import a a.y = a.x [file a.py] @@ -4714,10 +4707,9 @@ x: Optional[int] y: int [out] == -main:3: error: Incompatible types in assignment (expression has type "Optional[int]", variable has type "int") +main:2: error: Incompatible types in assignment (expression has type "Optional[int]", variable has type "int") [case testStrictOptionalFunction] -# flags: --strict-optional import a from typing import Optional def f() -> None: @@ -4733,10 +4725,9 @@ def g(x: int) -> None: pass [out] == -main:6: error: Argument 1 to "g" has incompatible type "Optional[int]"; expected "int" +main:5: error: Argument 1 to "g" has incompatible type "Optional[int]"; expected "int" [case testStrictOptionalMethod] -# flags: --strict-optional import a from typing import Optional class C: @@ -4755,7 +4746,7 @@ class B: pass [out] == -main:7: error: Argument 1 to "g" of "B" has incompatible type "Optional[int]"; expected "int" +main:6: error: Argument 1 to "g" of "B" has incompatible type "Optional[int]"; expected "int" [case testPerFileStrictOptionalModule] import a @@ -5281,10 +5272,11 @@ class I(metaclass=ABCMeta): @abstractmethod def f(self) -> None: pass [file b.py] +from typing import Optional from z import I class Foo(I): pass -def x() -> Foo: return None +def x() -> Optional[Foo]: return None [file z.py.2] from abc import abstractmethod, ABCMeta class I(metaclass=ABCMeta): @@ -5306,10 +5298,11 @@ class I(metaclass=ABCMeta): @abstractmethod def f(self) -> None: pass [file b.py] +from typing import Optional from a import I class Foo(I): pass -def x() -> Foo: return None +def x() -> Optional[Foo]: return None [file a.py.2] from abc import abstractmethod, ABCMeta class I(metaclass=ABCMeta): @@ -7774,7 +7767,8 @@ from typing import List import b class A(b.B): def meth(self) -> None: - self.x, *self.y = None, None # type: str, List[str] + self.x: str + self.y: List[str] [file b.py] from typing import List class B: @@ -7786,7 +7780,7 @@ class B: [builtins fixtures/list.pyi] [out] == -main:5: error: Incompatible types in assignment (expression has type "List[str]", base class "B" defined the type as "List[int]") +main:6: error: Incompatible types in assignment (expression has type "List[str]", base class "B" defined the type as "List[int]") [case testLiskovFineVariableCleanDefInMethodNested-only_when_nocache] from b import B @@ -7952,7 +7946,7 @@ class Foo(a.I): == [case testImplicitOptionalRefresh1] -# flags: --strict-optional --implicit-optional +# flags: --implicit-optional from x import f def foo(x: int = None) -> None: f() @@ -8032,7 +8026,7 @@ A = NamedTuple('A', F) # type: ignore [builtins fixtures/list.pyi] [out] == -b.py:3: note: Revealed type is "Tuple[, fallback=a.A]" +b.py:3: note: Revealed type is "Tuple[(), fallback=a.A]" [case testImportOnTopOfAlias1] from a import A @@ -9114,27 +9108,27 @@ import a [file a.py] # mypy: no-warn-no-return -from typing import List -def foo() -> List: +from typing import List, Optional +def foo() -> Optional[List]: 20 [file a.py.2] # mypy: disallow-any-generics, no-warn-no-return -from typing import List -def foo() -> List: +from typing import List, Optional +def foo() -> Optional[List]: 20 [file a.py.3] # mypy: no-warn-no-return -from typing import List -def foo() -> List: +from typing import List, Optional +def foo() -> Optional[List]: 20 [file a.py.4] -from typing import List -def foo() -> List: +from typing import List, Optional +def foo() -> Optional[List]: 20 [out] == @@ -9666,7 +9660,8 @@ reveal_type(z) [out] c.py:2: note: Revealed type is "a." == -c.py:2: note: Revealed type is "a.A" +c.py:2: note: Revealed type is "Any" +b.py:2: error: Cannot determine type of "y" [case testIsInstanceAdHocIntersectionFineGrainedIncrementalUnreachaableToIntersection] import c @@ -9697,7 +9692,8 @@ from b import z reveal_type(z) [builtins fixtures/isinstance.pyi] [out] -c.py:2: note: Revealed type is "a.A" +b.py:2: error: Cannot determine type of "y" +c.py:2: note: Revealed type is "Any" == c.py:2: note: Revealed type is "a." @@ -9790,7 +9786,6 @@ class ExampleClass(Generic[T]): [out] == [case testStrictNoneAttribute] -# flags: --strict-optional from typing import Generic, TypeVar T = TypeVar('T', int, str) @@ -9872,7 +9867,7 @@ x = 0 # Arbitrary change to trigger reprocessing [builtins fixtures/dict.pyi] [out] == -a.py:5: note: Revealed type is "def (x: builtins.int) -> builtins.str" +a.py:5: note: Revealed type is "def (x: builtins.int) -> Union[builtins.str, None]" [case testTypeVarTupleCached] import a @@ -10043,7 +10038,6 @@ class C(B): ... main.py:4: note: Revealed type is "def () -> builtins.str" [case testAbstractBodyTurnsEmpty] -# flags: --strict-optional from b import Base class Sub(Base): @@ -10063,10 +10057,9 @@ class Base: def meth(self) -> int: ... [out] == -main:6: error: Call to abstract method "meth" of "Base" with trivial body via super() is unsafe +main:5: error: Call to abstract method "meth" of "Base" with trivial body via super() is unsafe [case testAbstractBodyTurnsEmptyProtocol] -# flags: --strict-optional from b import Base class Sub(Base): @@ -10083,7 +10076,7 @@ class Base(Protocol): def meth(self) -> int: ... [out] == -main:6: error: Call to abstract method "meth" of "Base" with trivial body via super() is unsafe +main:5: error: Call to abstract method "meth" of "Base" with trivial body via super() is unsafe [case testPrettyMessageSorting] # flags: --pretty @@ -10101,23 +10094,6 @@ object + 1 1() [out] -b.py:1: error: Unsupported left operand type for + ("Type[object]") - object + 1 - ^ -a.py:1: error: Unsupported operand types for + ("int" and "str") - 1 + '' - ^ -== -b.py:1: error: Unsupported left operand type for + ("Type[object]") - object + 1 - ^ -b.py:2: error: "int" not callable - 1() - ^ -a.py:1: error: Unsupported operand types for + ("int" and "str") - 1 + '' - ^ -[out version>=3.8] b.py:1: error: Unsupported left operand type for + ("Type[object]") object + 1 ^~~~~~~~~~ @@ -10340,3 +10316,24 @@ reveal_type(x) [out] == a.py:3: note: Revealed type is "Union[def (x: builtins.int) -> builtins.int, def (*x: builtins.int) -> builtins.int]" + +[case testErrorInReAddedModule] +# flags: --disallow-untyped-defs --follow-imports=error +# cmd: mypy a.py +# cmd2: mypy b.py +# cmd3: mypy a.py + +[file a.py] +def f(): pass +[file b.py] +def f(): pass +[file unrelated.txt.3] +[out] +a.py:1: error: Function is missing a return type annotation +a.py:1: note: Use "-> None" if function does not return a value +== +b.py:1: error: Function is missing a return type annotation +b.py:1: note: Use "-> None" if function does not return a value +== +a.py:1: error: Function is missing a return type annotation +a.py:1: note: Use "-> None" if function does not return a value diff --git a/test-data/unit/fixtures/dataclasses.pyi b/test-data/unit/fixtures/dataclasses.pyi index 710b8659d265..059c853a621f 100644 --- a/test-data/unit/fixtures/dataclasses.pyi +++ b/test-data/unit/fixtures/dataclasses.pyi @@ -47,4 +47,5 @@ class list(Generic[_T], Sequence[_T]): class function: pass class classmethod: pass +class staticmethod: pass property = object() diff --git a/test-data/unit/fixtures/paramspec.pyi b/test-data/unit/fixtures/paramspec.pyi index 5e4b8564e238..9b0089f6a7e9 100644 --- a/test-data/unit/fixtures/paramspec.pyi +++ b/test-data/unit/fixtures/paramspec.pyi @@ -30,7 +30,8 @@ class list(Sequence[T], Generic[T]): def __iter__(self) -> Iterator[T]: ... class int: - def __neg__(self) -> 'int': ... + def __neg__(self) -> int: ... + def __add__(self, other: int) -> int: ... class bool(int): ... class float: ... diff --git a/test-data/unit/fixtures/plugin_attrs.pyi b/test-data/unit/fixtures/plugin_attrs.pyi index f62104809e74..57e5ecd1b2bc 100644 --- a/test-data/unit/fixtures/plugin_attrs.pyi +++ b/test-data/unit/fixtures/plugin_attrs.pyi @@ -1,5 +1,5 @@ # Builtins stub used to support attrs plugin tests. -from typing import Union, overload +from typing import Union, overload, Generic, Sequence, TypeVar, Type, Iterable, Iterator class object: def __init__(self) -> None: pass @@ -24,6 +24,13 @@ class complex: class str: pass class ellipsis: pass -class tuple: pass class list: pass class dict: pass + +T = TypeVar("T") +Tco = TypeVar('Tco', covariant=True) +class tuple(Sequence[Tco], Generic[Tco]): + def __new__(cls: Type[T], iterable: Iterable[Tco] = ...) -> T: ... + def __iter__(self) -> Iterator[Tco]: pass + def __contains__(self, item: object) -> bool: pass + def __getitem__(self, x: int) -> Tco: pass diff --git a/test-data/unit/fixtures/primitives.pyi b/test-data/unit/fixtures/primitives.pyi index b74252857d6f..63128a8ae03d 100644 --- a/test-data/unit/fixtures/primitives.pyi +++ b/test-data/unit/fixtures/primitives.pyi @@ -12,7 +12,7 @@ class object: def __ne__(self, other: object) -> bool: pass class type: - def __init__(self, x) -> None: pass + def __init__(self, x: object) -> None: pass class int: # Note: this is a simplification of the actual signature @@ -30,7 +30,7 @@ class str(Sequence[str]): def __iter__(self) -> Iterator[str]: pass def __contains__(self, other: object) -> bool: pass def __getitem__(self, item: int) -> str: pass - def format(self, *args, **kwargs) -> str: pass + def format(self, *args: object, **kwargs: object) -> str: pass class bytes(Sequence[int]): def __iter__(self) -> Iterator[int]: pass def __contains__(self, other: object) -> bool: pass @@ -45,7 +45,8 @@ class memoryview(Sequence[int]): def __iter__(self) -> Iterator[int]: pass def __contains__(self, other: object) -> bool: pass def __getitem__(self, item: int) -> int: pass -class tuple(Generic[T]): pass +class tuple(Generic[T]): + def __contains__(self, other: object) -> bool: pass class list(Sequence[T]): def __iter__(self) -> Iterator[T]: pass def __contains__(self, other: object) -> bool: pass diff --git a/test-data/unit/fixtures/typing-namedtuple.pyi b/test-data/unit/fixtures/typing-namedtuple.pyi index c8658a815a13..f4744575fc09 100644 --- a/test-data/unit/fixtures/typing-namedtuple.pyi +++ b/test-data/unit/fixtures/typing-namedtuple.pyi @@ -6,6 +6,8 @@ Type = 0 Literal = 0 Optional = 0 Self = 0 +Tuple = 0 +ClassVar = 0 T = TypeVar('T') T_co = TypeVar('T_co', covariant=True) @@ -18,6 +20,9 @@ class Mapping(Iterable[KT], Generic[KT, T_co]): def keys(self) -> Iterable[T]: pass # Approximate return type def __getitem__(self, key: T) -> T_co: pass -class Tuple(Sequence): pass -class NamedTuple(Tuple): - name: str +class NamedTuple(tuple[Any, ...]): + _fields: ClassVar[tuple[str, ...]] + @overload + def __init__(self, typename: str, fields: Iterable[tuple[str, Any]] = ...) -> None: ... + @overload + def __init__(self, typename: str, fields: None = None, **kwargs: Any) -> None: ... diff --git a/test-data/unit/fixtures/typing-override.pyi b/test-data/unit/fixtures/typing-override.pyi new file mode 100644 index 000000000000..606ca63d4f0d --- /dev/null +++ b/test-data/unit/fixtures/typing-override.pyi @@ -0,0 +1,25 @@ +TypeVar = 0 +Generic = 0 +Any = 0 +overload = 0 +Type = 0 +Literal = 0 +Optional = 0 +Self = 0 +Tuple = 0 +ClassVar = 0 +Callable = 0 + +T = TypeVar('T') +T_co = TypeVar('T_co', covariant=True) +KT = TypeVar('KT') + +class Iterable(Generic[T_co]): pass +class Iterator(Iterable[T_co]): pass +class Sequence(Iterable[T_co]): pass +class Mapping(Iterable[KT], Generic[KT, T_co]): + def keys(self) -> Iterable[T]: pass # Approximate return type + def __getitem__(self, key: T) -> T_co: pass + + +def override(__arg: T) -> T: ... diff --git a/test-data/unit/hacks.txt b/test-data/unit/hacks.txt index 43114a8af004..15b1065cb7a9 100644 --- a/test-data/unit/hacks.txt +++ b/test-data/unit/hacks.txt @@ -5,17 +5,6 @@ Due to historical reasons, test cases contain things that may appear baffling without extra context. This file attempts to describe most of them. -Strict optional is disabled be default --------------------------------------- - -Strict optional checking is enabled in mypy by default, but test cases -must enable it explicitly, either through `# flags: --strict-optional` -or by including `optional` as a substring in your test file name. - -The reason for this is that many test cases written before strict -optional was implemented use the idiom `x = None # type: t`, and -updating all of these test cases would take a lot of work. - Dummy if statements to prevent redefinition ------------------------------------------- diff --git a/test-data/unit/lib-stub/dataclasses.pyi b/test-data/unit/lib-stub/dataclasses.pyi index bd33b459266c..b2b48c2ae486 100644 --- a/test-data/unit/lib-stub/dataclasses.pyi +++ b/test-data/unit/lib-stub/dataclasses.pyi @@ -32,3 +32,5 @@ def field(*, class Field(Generic[_T]): pass + +def replace(__obj: _T, **changes: Any) -> _T: ... diff --git a/test-data/unit/lib-stub/functools.pyi b/test-data/unit/lib-stub/functools.pyi index 9e62a14c2f34..e665b2bad0c2 100644 --- a/test-data/unit/lib-stub/functools.pyi +++ b/test-data/unit/lib-stub/functools.pyi @@ -1,4 +1,4 @@ -from typing import Generic, TypeVar, Callable, Any, Mapping +from typing import Generic, TypeVar, Callable, Any, Mapping, overload _T = TypeVar("_T") diff --git a/test-data/unit/lib-stub/mypy_extensions.pyi b/test-data/unit/lib-stub/mypy_extensions.pyi index 56fac31e7219..4295c33f81ad 100644 --- a/test-data/unit/lib-stub/mypy_extensions.pyi +++ b/test-data/unit/lib-stub/mypy_extensions.pyi @@ -50,7 +50,67 @@ class FlexibleAlias(Generic[_T, _U]): ... class __SupportsInt(Protocol[T_co]): def __int__(self) -> int: pass -_Int = Union[int, i32, i64] +_Int = Union[int, u8, i16, i32, i64] + +class u8: + def __init__(self, x: Union[_Int, str, bytes, SupportsInt], base: int = 10) -> None: ... + def __add__(self, x: u8) -> u8: ... + def __radd__(self, x: u8) -> u8: ... + def __sub__(self, x: u8) -> u8: ... + def __rsub__(self, x: u8) -> u8: ... + def __mul__(self, x: u8) -> u8: ... + def __rmul__(self, x: u8) -> u8: ... + def __floordiv__(self, x: u8) -> u8: ... + def __rfloordiv__(self, x: u8) -> u8: ... + def __mod__(self, x: u8) -> u8: ... + def __rmod__(self, x: u8) -> u8: ... + def __and__(self, x: u8) -> u8: ... + def __rand__(self, x: u8) -> u8: ... + def __or__(self, x: u8) -> u8: ... + def __ror__(self, x: u8) -> u8: ... + def __xor__(self, x: u8) -> u8: ... + def __rxor__(self, x: u8) -> u8: ... + def __lshift__(self, x: u8) -> u8: ... + def __rlshift__(self, x: u8) -> u8: ... + def __rshift__(self, x: u8) -> u8: ... + def __rrshift__(self, x: u8) -> u8: ... + def __neg__(self) -> u8: ... + def __invert__(self) -> u8: ... + def __pos__(self) -> u8: ... + def __lt__(self, x: u8) -> bool: ... + def __le__(self, x: u8) -> bool: ... + def __ge__(self, x: u8) -> bool: ... + def __gt__(self, x: u8) -> bool: ... + +class i16: + def __init__(self, x: Union[_Int, str, bytes, SupportsInt], base: int = 10) -> None: ... + def __add__(self, x: i16) -> i16: ... + def __radd__(self, x: i16) -> i16: ... + def __sub__(self, x: i16) -> i16: ... + def __rsub__(self, x: i16) -> i16: ... + def __mul__(self, x: i16) -> i16: ... + def __rmul__(self, x: i16) -> i16: ... + def __floordiv__(self, x: i16) -> i16: ... + def __rfloordiv__(self, x: i16) -> i16: ... + def __mod__(self, x: i16) -> i16: ... + def __rmod__(self, x: i16) -> i16: ... + def __and__(self, x: i16) -> i16: ... + def __rand__(self, x: i16) -> i16: ... + def __or__(self, x: i16) -> i16: ... + def __ror__(self, x: i16) -> i16: ... + def __xor__(self, x: i16) -> i16: ... + def __rxor__(self, x: i16) -> i16: ... + def __lshift__(self, x: i16) -> i16: ... + def __rlshift__(self, x: i16) -> i16: ... + def __rshift__(self, x: i16) -> i16: ... + def __rrshift__(self, x: i16) -> i16: ... + def __neg__(self) -> i16: ... + def __invert__(self) -> i16: ... + def __pos__(self) -> i16: ... + def __lt__(self, x: i16) -> bool: ... + def __le__(self, x: i16) -> bool: ... + def __ge__(self, x: i16) -> bool: ... + def __gt__(self, x: i16) -> bool: ... class i32: def __init__(self, x: Union[_Int, str, bytes, SupportsInt], base: int = 10) -> None: ... diff --git a/test-data/unit/parse-errors.test b/test-data/unit/parse-errors.test index 33cf9b4f91b4..c6b1c00a6169 100644 --- a/test-data/unit/parse-errors.test +++ b/test-data/unit/parse-errors.test @@ -273,17 +273,10 @@ file:3: error: Syntax error in type comment file:3: error: Inconsistent use of "*" in function signature file:3: error: Inconsistent use of "**" in function signature -[case testPrintStatementInPython35] -# flags: --python-version 3.5 +[case testPrintStatementInPython3] print 1 [out] -file:2: error: Missing parentheses in call to 'print' - -[case testPrintStatementInPython37] -# flags: --python-version 3.7 -print 1 -[out] -file:2: error: Missing parentheses in call to 'print'. Did you mean print(1)? +file:1: error: Missing parentheses in call to 'print'. Did you mean print(1)? [case testInvalidConditionInConditionalExpression] 1 if 2, 3 else 4 diff --git a/test-data/unit/pep561.test b/test-data/unit/pep561.test index 8c401cfc3c51..9969c2894c36 100644 --- a/test-data/unit/pep561.test +++ b/test-data/unit/pep561.test @@ -72,15 +72,6 @@ reveal_type(a) [out] testStubPrecedence.py:5: note: Revealed type is "builtins.list[builtins.str]" -[case testTypedPkgSimpleEgg] -# pkgs: typedpkg; no-pip -from typedpkg.sample import ex -from typedpkg import dne -a = ex(['']) -reveal_type(a) -[out] -testTypedPkgSimpleEgg.py:5: note: Revealed type is "builtins.tuple[builtins.str, ...]" - [case testTypedPkgSimpleEditable] # pkgs: typedpkg; editable from typedpkg.sample import ex @@ -90,15 +81,6 @@ reveal_type(a) [out] testTypedPkgSimpleEditable.py:5: note: Revealed type is "builtins.tuple[builtins.str, ...]" -[case testTypedPkgSimpleEditableEgg] -# pkgs: typedpkg; editable; no-pip -from typedpkg.sample import ex -from typedpkg import dne -a = ex(['']) -reveal_type(a) -[out] -testTypedPkgSimpleEditableEgg.py:5: note: Revealed type is "builtins.tuple[builtins.str, ...]" - [case testTypedPkgNamespaceImportFrom] # pkgs: typedpkg, typedpkg_ns_a from typedpkg.pkg.aaa import af @@ -185,6 +167,7 @@ a.bf(False) b.bf(False) a.bf(1) b.bf(1) +import typedpkg_ns.whatever as c # type: ignore[import-untyped] [out] testNamespacePkgWStubs.py:4: error: Skipping analyzing "typedpkg_ns.b.bbb": module is installed, but missing library stubs or py.typed marker testNamespacePkgWStubs.py:4: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports diff --git a/test-data/unit/plugins/function_sig_hook.py b/test-data/unit/plugins/function_sig_hook.py index d83c7df26209..4d901b96716e 100644 --- a/test-data/unit/plugins/function_sig_hook.py +++ b/test-data/unit/plugins/function_sig_hook.py @@ -1,5 +1,5 @@ -from mypy.plugin import CallableType, CheckerPluginInterface, FunctionSigContext, Plugin -from mypy.types import Instance, Type +from mypy.plugin import CallableType, FunctionSigContext, Plugin + class FunctionSigPlugin(Plugin): def get_function_signature_hook(self, fullname): @@ -7,20 +7,17 @@ def get_function_signature_hook(self, fullname): return my_hook return None -def _str_to_int(api: CheckerPluginInterface, typ: Type) -> Type: - if isinstance(typ, Instance): - if typ.type.fullname == 'builtins.str': - return api.named_generic_type('builtins.int', []) - elif typ.args: - return typ.copy_modified(args=[_str_to_int(api, t) for t in typ.args]) - - return typ def my_hook(ctx: FunctionSigContext) -> CallableType: + arg1_args = ctx.args[0] + if len(arg1_args) != 1: + return ctx.default_signature + arg1_type = ctx.api.get_expression_type(arg1_args[0]) return ctx.default_signature.copy_modified( - arg_types=[_str_to_int(ctx.api, t) for t in ctx.default_signature.arg_types], - ret_type=_str_to_int(ctx.api, ctx.default_signature.ret_type), + arg_types=[arg1_type], + ret_type=arg1_type, ) + def plugin(version): return FunctionSigPlugin diff --git a/test-data/unit/pythoneval.test b/test-data/unit/pythoneval.test index 7108734102d1..58dfb172cf76 100644 --- a/test-data/unit/pythoneval.test +++ b/test-data/unit/pythoneval.test @@ -63,8 +63,8 @@ print(abs(A())) 5.5 [case testAbs2] -n = None # type: int -f = None # type: float +n: int +f: float n = abs(1) abs(1) + 'x' # Error f = abs(1.1) @@ -95,7 +95,7 @@ print(list.__add__([1, 2], [3, 4])) import typing class A: x = 1 - def f(self) -> None: print('f') + def f(self: typing.Optional["A"]) -> None: print('f') class B(A): pass B.f(None) @@ -130,7 +130,7 @@ A docstring! [case testFunctionAttributes] import typing ord.__class__ -print(type(ord.__doc__ + '')) +print(type(ord.__doc__ or '' + '')) print(ord.__name__) print(ord.__module__) [out] @@ -333,25 +333,28 @@ _program.py:6: note: Revealed type is "typing.IO[Any]" [case testGenericPatterns] from typing import Pattern import re -p = None # type: Pattern[str] +p: Pattern[str] p = re.compile('foo*') -b = None # type: Pattern[bytes] +b: Pattern[bytes] b = re.compile(b'foo*') -print(p.match('fooo').group(0)) +m = p.match('fooo') +assert m +print(m.group(0)) [out] fooo [case testGenericMatch] -from typing import Match +from typing import Match, Optional import re -def f(m: Match[bytes]) -> None: +def f(m: Optional[Match[bytes]]) -> None: + assert m print(m.group(0)) f(re.match(b'x*', b'xxy')) [out] b'xx' [case testIntFloatDucktyping] -x = None # type: float +x: float x = 2.2 x = 2 def f(x: float) -> None: pass @@ -374,18 +377,17 @@ math.sin(2) math.sin(2.2) [case testAbsReturnType] - -f = None # type: float -n = None # type: int +f: float +n: int n = abs(2) f = abs(2.2) abs(2.2) + 'x' [out] -_program.py:6: error: Unsupported operand types for + ("float" and "str") +_program.py:5: error: Unsupported operand types for + ("float" and "str") [case testROperatorMethods] -b = None # type: bytes -s = None # type: str +b: bytes +s: str if int(): s = b'foo' * 5 # Error if int(): @@ -434,7 +436,6 @@ True False [case testOverlappingOperatorMethods] - class X: pass class A: def __add__(self, x) -> int: @@ -444,11 +445,11 @@ class A: class B: def __radd__(self, x: A) -> str: return 'x' class C(X, B): pass -b = None # type: B +b: B b = C() print(A() + b) [out] -_program.py:9: error: Signatures of "__radd__" of "B" and "__add__" of "A" are unsafely overlapping +_program.py:8: error: Signatures of "__radd__" of "B" and "__add__" of "A" are unsafely overlapping [case testBytesAndBytearrayComparisons] import typing @@ -833,6 +834,7 @@ _program.py:3: error: Dict entry 1 has incompatible type "str": "str"; expected _program.py:5: error: "Dict[str, int]" has no attribute "xyz" [case testDefaultDict] +# flags: --new-type-inference import typing as t from collections import defaultdict @@ -858,14 +860,14 @@ class MyDDict(t.DefaultDict[int,T], t.Generic[T]): MyDDict(dict)['0'] MyDDict(dict)[0] [out] -_program.py:6: error: Argument 1 to "defaultdict" has incompatible type "Type[List[Any]]"; expected "Callable[[], str]" -_program.py:9: error: Invalid index type "str" for "defaultdict[int, str]"; expected type "int" -_program.py:9: error: Incompatible types in assignment (expression has type "int", target has type "str") -_program.py:19: error: Argument 1 to "tst" has incompatible type "defaultdict[str, List[]]"; expected "defaultdict[int, List[]]" -_program.py:23: error: Invalid index type "str" for "MyDDict[Dict[_KT, _VT]]"; expected type "int" +_program.py:7: error: Argument 1 to "defaultdict" has incompatible type "Type[List[Any]]"; expected "Optional[Callable[[], str]]" +_program.py:10: error: Invalid index type "str" for "defaultdict[int, str]"; expected type "int" +_program.py:10: error: Incompatible types in assignment (expression has type "int", target has type "str") +_program.py:20: error: Argument 1 to "tst" has incompatible type "defaultdict[str, List[]]"; expected "defaultdict[int, List[]]" +_program.py:24: error: Invalid index type "str" for "MyDDict[Dict[, ]]"; expected type "int" [case testNoSubcriptionOfStdlibCollections] -# flags: --python-version 3.6 +# flags: --python-version 3.7 import collections from collections import Counter from typing import TypeVar @@ -965,10 +967,10 @@ print(getattr(B(), 'x')) 7 [case testSortedNoError] -from typing import Iterable, Callable, TypeVar, List, Dict +from typing import Iterable, Callable, TypeVar, List, Dict, Optional T = TypeVar('T') -def sorted(x: Iterable[T], *, key: Callable[[T], object] = None) -> None: ... -a = None # type: List[Dict[str, str]] +def sorted(x: Iterable[T], *, key: Optional[Callable[[T], object]] = None) -> None: ... +a = [] # type: List[Dict[str, str]] sorted(a, key=lambda y: y['']) [case testAbstractProperty] @@ -1001,9 +1003,13 @@ import re bre = b'a+' bpat = re.compile(bre) bpat = re.compile(bpat) -re.search(bre, b'').groups() +s1 = re.search(bre, b'') +assert s1 +s1.groups() re.search(bre, u'') # Error -re.search(bpat, b'').groups() +s2 = re.search(bpat, b'') +assert s2 +s2.groups() re.search(bpat, u'') # Error # match(), split(), findall(), finditer() are much the same, so skip those. # sub(), subn() have more overloads and we are checking these: @@ -1016,11 +1022,11 @@ re.subn(bpat, b'', b'')[0] + b'' re.subn(bre, lambda m: b'', b'')[0] + b'' re.subn(bpat, lambda m: b'', b'')[0] + b'' [out] -_testReModuleBytes.py:7: error: No overload variant of "search" matches argument types "bytes", "str" -_testReModuleBytes.py:7: note: Possible overload variants: -_testReModuleBytes.py:7: note: def search(pattern: Union[str, Pattern[str]], string: str, flags: Union[int, RegexFlag] = ...) -> Optional[Match[str]] -_testReModuleBytes.py:7: note: def search(pattern: Union[bytes, Pattern[bytes]], string: Buffer, flags: Union[int, RegexFlag] = ...) -> Optional[Match[bytes]] -_testReModuleBytes.py:9: error: Argument 1 to "search" has incompatible type "Pattern[bytes]"; expected "Union[str, Pattern[str]]" +_testReModuleBytes.py:9: error: No overload variant of "search" matches argument types "bytes", "str" +_testReModuleBytes.py:9: note: Possible overload variants: +_testReModuleBytes.py:9: note: def search(pattern: Union[str, Pattern[str]], string: str, flags: Union[int, RegexFlag] = ...) -> Optional[Match[str]] +_testReModuleBytes.py:9: note: def search(pattern: Union[bytes, Pattern[bytes]], string: Buffer, flags: Union[int, RegexFlag] = ...) -> Optional[Match[bytes]] +_testReModuleBytes.py:13: error: Argument 1 to "search" has incompatible type "Pattern[bytes]"; expected "Union[str, Pattern[str]]" [case testReModuleString] # Regression tests for various overloads in the re module -- string version @@ -1028,9 +1034,13 @@ import re sre = 'a+' spat = re.compile(sre) spat = re.compile(spat) -re.search(sre, '').groups() +s1 = re.search(sre, '') +assert s1 +s1.groups() re.search(sre, b'') # Error -re.search(spat, '').groups() +s2 = re.search(spat, '') +assert s2 +s2.groups() re.search(spat, b'') # Error # match(), split(), findall(), finditer() are much the same, so skip those. # sus(), susn() have more overloads and we are checking these: @@ -1043,11 +1053,11 @@ re.subn(spat, '', '')[0] + '' re.subn(sre, lambda m: '', '')[0] + '' re.subn(spat, lambda m: '', '')[0] + '' [out] -_testReModuleString.py:7: error: No overload variant of "search" matches argument types "str", "bytes" -_testReModuleString.py:7: note: Possible overload variants: -_testReModuleString.py:7: note: def search(pattern: Union[str, Pattern[str]], string: str, flags: Union[int, RegexFlag] = ...) -> Optional[Match[str]] -_testReModuleString.py:7: note: def search(pattern: Union[bytes, Pattern[bytes]], string: Buffer, flags: Union[int, RegexFlag] = ...) -> Optional[Match[bytes]] -_testReModuleString.py:9: error: Argument 1 to "search" has incompatible type "Pattern[str]"; expected "Union[bytes, Pattern[bytes]]" +_testReModuleString.py:9: error: No overload variant of "search" matches argument types "str", "bytes" +_testReModuleString.py:9: note: Possible overload variants: +_testReModuleString.py:9: note: def search(pattern: Union[str, Pattern[str]], string: str, flags: Union[int, RegexFlag] = ...) -> Optional[Match[str]] +_testReModuleString.py:9: note: def search(pattern: Union[bytes, Pattern[bytes]], string: Buffer, flags: Union[int, RegexFlag] = ...) -> Optional[Match[bytes]] +_testReModuleString.py:13: error: Argument 1 to "search" has incompatible type "Pattern[str]"; expected "Union[bytes, Pattern[bytes]]" [case testListSetitemTuple] from typing import List, Tuple @@ -1084,7 +1094,6 @@ _program.py:17: note: Revealed type is "builtins.str" [case testTypedDictGet] # Test that TypedDict get plugin works with typeshed stubs -# TODO: Make it possible to use strict optional here from mypy_extensions import TypedDict class A: pass D = TypedDict('D', {'x': int, 'y': str}) @@ -1096,14 +1105,14 @@ d.get() s = '' reveal_type(d.get(s)) [out] -_testTypedDictGet.py:7: note: Revealed type is "builtins.int" -_testTypedDictGet.py:8: note: Revealed type is "builtins.str" -_testTypedDictGet.py:9: note: Revealed type is "builtins.object" -_testTypedDictGet.py:10: error: All overload variants of "get" of "Mapping" require at least one argument -_testTypedDictGet.py:10: note: Possible overload variants: -_testTypedDictGet.py:10: note: def get(self, str, /) -> object -_testTypedDictGet.py:10: note: def [_T] get(self, str, /, default: object) -> object -_testTypedDictGet.py:12: note: Revealed type is "builtins.object" +_testTypedDictGet.py:6: note: Revealed type is "Union[builtins.int, None]" +_testTypedDictGet.py:7: note: Revealed type is "Union[builtins.str, None]" +_testTypedDictGet.py:8: note: Revealed type is "builtins.object" +_testTypedDictGet.py:9: error: All overload variants of "get" of "Mapping" require at least one argument +_testTypedDictGet.py:9: note: Possible overload variants: +_testTypedDictGet.py:9: note: def get(self, str, /) -> object +_testTypedDictGet.py:9: note: def [_T] get(self, str, /, default: object) -> object +_testTypedDictGet.py:11: note: Revealed type is "builtins.object" [case testTypedDictMappingMethods] from mypy_extensions import TypedDict @@ -1142,10 +1151,10 @@ _testTypedDictMappingMethods.py:16: error: Key "value" of TypedDict "Cell" canno _testTypedDictMappingMethods.py:21: note: Revealed type is "builtins.int" [case testCrashOnComplexCheckWithNamedTupleNext] -from typing import NamedTuple +from typing import NamedTuple, Optional MyNamedTuple = NamedTuple('MyNamedTuple', [('parent', 'MyNamedTuple')]) # type: ignore -def foo(mymap) -> MyNamedTuple: +def foo(mymap) -> Optional[MyNamedTuple]: return next((mymap[key] for key in mymap), None) [out] @@ -1373,7 +1382,7 @@ JsonBlob = Dict[str, Any] Column = Union[List[str], List[int], List[bool], List[float], List[DateTime], List[JsonBlob]] def print_custom_table() -> None: - a = None # type: Column + a: Column for row in simple_map(format_row, a, a, a, a, a, a, a, a): # 8 columns reveal_type(row) @@ -1432,7 +1441,7 @@ accepts_named_tuple(b) accepts_named_tuple(1) accepts_named_tuple((1, 2)) [out] -_testNamedTupleTypeInheritanceSpecialCase.py:8: note: Revealed type is "collections.OrderedDict[builtins.str, Any]" +_testNamedTupleTypeInheritanceSpecialCase.py:8: note: Revealed type is "builtins.dict[builtins.str, Any]" _testNamedTupleTypeInheritanceSpecialCase.py:9: note: Revealed type is "builtins.tuple[builtins.str, ...]" _testNamedTupleTypeInheritanceSpecialCase.py:10: note: Revealed type is "builtins.dict[builtins.str, Any]" _testNamedTupleTypeInheritanceSpecialCase.py:17: error: Argument 1 to "accepts_named_tuple" has incompatible type "int"; expected "NamedTuple" @@ -1473,14 +1482,12 @@ frozenset({1}) == [1] # Error {1: 2}.keys() == frozenset({1}) {1: 2}.items() == {(1, 2)} -{1: 2}.keys() == {'no'} # Error +{1: 2}.keys() == {'no'} # OK {1: 2}.values() == {2} # Error -{1: 2}.keys() == [1] # Error +{1: 2}.keys() == [1] # OK [out] _testStrictEqualityAllowlist.py:5: error: Non-overlapping equality check (left operand type: "FrozenSet[int]", right operand type: "List[int]") -_testStrictEqualityAllowlist.py:11: error: Non-overlapping equality check (left operand type: "dict_keys[int, int]", right operand type: "Set[str]") _testStrictEqualityAllowlist.py:12: error: Non-overlapping equality check (left operand type: "dict_values[int, int]", right operand type: "Set[int]") -_testStrictEqualityAllowlist.py:13: error: Non-overlapping equality check (left operand type: "dict_keys[int, int]", right operand type: "List[int]") [case testUnreachableWithStdlibContextManagers] # mypy: warn-unreachable, strict-optional @@ -1640,7 +1647,6 @@ foo(list((list(""), ""))) [out] [case testNarrowTypeForDictKeys] -# flags: --strict-optional from typing import Dict, KeysView, Optional d: Dict[str, int] @@ -1658,10 +1664,10 @@ else: reveal_type(k) [out] -_testNarrowTypeForDictKeys.py:7: note: Revealed type is "builtins.str" -_testNarrowTypeForDictKeys.py:9: note: Revealed type is "Union[builtins.str, None]" -_testNarrowTypeForDictKeys.py:14: note: Revealed type is "builtins.str" -_testNarrowTypeForDictKeys.py:16: note: Revealed type is "Union[builtins.str, None]" +_testNarrowTypeForDictKeys.py:6: note: Revealed type is "builtins.str" +_testNarrowTypeForDictKeys.py:8: note: Revealed type is "Union[builtins.str, None]" +_testNarrowTypeForDictKeys.py:13: note: Revealed type is "builtins.str" +_testNarrowTypeForDictKeys.py:15: note: Revealed type is "Union[builtins.str, None]" [case testTypeAliasWithNewStyleUnion] # flags: --python-version 3.10 @@ -1987,6 +1993,19 @@ def good9(foo1: Foo[Concatenate[int, P]], foo2: Foo[[int, str, bytes]], *args: P [out] _testStrictEqualitywithParamSpec.py:11: error: Non-overlapping equality check (left operand type: "Foo[[int]]", right operand type: "Bar[[int]]") +[case testInferenceOfDunderDictOnClassObjects] +class Foo: ... +reveal_type(Foo.__dict__) +reveal_type(Foo().__dict__) +Foo.__dict__ = {} +Foo().__dict__ = {} + +[out] +_testInferenceOfDunderDictOnClassObjects.py:2: note: Revealed type is "types.MappingProxyType[builtins.str, Any]" +_testInferenceOfDunderDictOnClassObjects.py:3: note: Revealed type is "builtins.dict[builtins.str, Any]" +_testInferenceOfDunderDictOnClassObjects.py:4: error: Property "__dict__" defined in "type" is read-only +_testInferenceOfDunderDictOnClassObjects.py:4: error: Incompatible types in assignment (expression has type "Dict[, ]", variable has type "MappingProxyType[str, Any]") + [case testTypeVarTuple] # flags: --enable-incomplete-feature=TypeVarTuple --enable-incomplete-feature=Unpack --python-version=3.11 from typing import Any, Callable, Unpack, TypeVarTuple @@ -2011,3 +2030,91 @@ def foo(callback: Callable[[], Any]) -> None: def call(callback: Callable[[Unpack[Ts]], Any], *args: Unpack[Ts]) -> Any: ... + +[case testDataclassReplace] +from dataclasses import dataclass, replace + +@dataclass +class A: + x: int + +a = A(x=42) +a2 = replace(a, x=42) +reveal_type(a2) +a2 = replace() +a2 = replace(a, x='spam') +a2 = replace(a, x=42, q=42) +[out] +_testDataclassReplace.py:9: note: Revealed type is "_testDataclassReplace.A" +_testDataclassReplace.py:10: error: Too few arguments for "replace" +_testDataclassReplace.py:11: error: Argument "x" to "replace" of "A" has incompatible type "str"; expected "int" +_testDataclassReplace.py:12: error: Unexpected keyword argument "q" for "replace" of "A" + +[case testGenericInferenceWithTuple] +# flags: --new-type-inference +from typing import TypeVar, Callable, Tuple + +T = TypeVar("T") + +def f(x: Callable[..., T]) -> T: + return x() + +x: Tuple[str, ...] = f(tuple) +[out] + +[case testGenericInferenceWithDataclass] +# flags: --new-type-inference +from typing import Any, Collection, List +from dataclasses import dataclass, field + +class Foo: + pass + +@dataclass +class A: + items: Collection[Foo] = field(default_factory=list) +[out] + +[case testGenericInferenceWithItertools] +# flags: --new-type-inference +from typing import TypeVar, Tuple +from itertools import groupby +K = TypeVar("K") +V = TypeVar("V") + +def fst(kv: Tuple[K, V]) -> K: + k, v = kv + return k + +pairs = [(len(s), s) for s in ["one", "two", "three"]] +grouped = groupby(pairs, key=fst) +[out] + +[case testDataclassReplaceOptional] +from dataclasses import dataclass, replace +from typing import Optional + +@dataclass +class A: + x: Optional[int] + +a = A(x=42) +reveal_type(a) +a2 = replace(a, x=None) # OK +reveal_type(a2) +[out] +_testDataclassReplaceOptional.py:9: note: Revealed type is "_testDataclassReplaceOptional.A" +_testDataclassReplaceOptional.py:11: note: Revealed type is "_testDataclassReplaceOptional.A" + +[case testDataclassStrictOptionalAlwaysSet] +from dataclasses import dataclass +from typing import Callable, Optional + +@dataclass +class Description: + name_fn: Callable[[Optional[int]], Optional[str]] + +def f(d: Description) -> None: + reveal_type(d.name_fn) +[out] +_testDataclassStrictOptionalAlwaysSet.py:9: note: Revealed type is "def (Union[builtins.int, None]) -> Union[builtins.str, None]" diff --git a/test-data/unit/reports.test b/test-data/unit/reports.test index 50dabb1fdea9..a6cde503ca09 100644 --- a/test-data/unit/reports.test +++ b/test-data/unit/reports.test @@ -311,7 +311,7 @@ Total 0 14 100.00% [case testAnyExpressionsReportTypesOfAny] -# cmd: mypy --python-version=3.6 --any-exprs-report report n.py +# cmd: mypy --any-exprs-report report n.py [file n.py] from typing import Any, List diff --git a/test-data/unit/semanal-errors.test b/test-data/unit/semanal-errors.test index 0c3de312cdfa..f21ba5253437 100644 --- a/test-data/unit/semanal-errors.test +++ b/test-data/unit/semanal-errors.test @@ -361,84 +361,84 @@ main:2: error: "yield" outside function [case testInvalidLvalues1] 1 = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [out version>=3.10] -main:1: error: can't assign to literal here. Maybe you meant '==' instead of '='? +main:1: error: cannot assign to literal here. Maybe you meant '==' instead of '='? [case testInvalidLvalues2] (1) = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [out version>=3.10] -main:1: error: can't assign to literal here. Maybe you meant '==' instead of '='? +main:1: error: cannot assign to literal here. Maybe you meant '==' instead of '='? [case testInvalidLvalues3] (1, 1) = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [case testInvalidLvalues4] [1, 1] = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [case testInvalidLvalues6] x = y = z = 1 # ok x, (y, 1) = 1 [out] -main:2: error: can't assign to literal +main:2: error: cannot assign to literal [case testInvalidLvalues7] x, [y, 1] = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [case testInvalidLvalues8] x, [y, [z, 1]] = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [case testInvalidLvalues9] x, (y) = 1 # ok x, (y, (z, z)) = 1 # ok x, (y, (z, 1)) = 1 [out] -main:3: error: can't assign to literal +main:3: error: cannot assign to literal [case testInvalidLvalues10] x + x = 1 [out] -main:1: error: can't assign to operator +main:1: error: cannot assign to operator [out version>=3.10] -main:1: error: can't assign to expression here. Maybe you meant '==' instead of '='? +main:1: error: cannot assign to expression here. Maybe you meant '==' instead of '='? [case testInvalidLvalues11] -x = 1 [out] -main:1: error: can't assign to operator +main:1: error: cannot assign to operator [out version>=3.10] -main:1: error: can't assign to expression here. Maybe you meant '==' instead of '='? +main:1: error: cannot assign to expression here. Maybe you meant '==' instead of '='? [case testInvalidLvalues12] 1.1 = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [out version>=3.10] -main:1: error: can't assign to literal here. Maybe you meant '==' instead of '='? +main:1: error: cannot assign to literal here. Maybe you meant '==' instead of '='? [case testInvalidLvalues13] 'x' = 1 [out] -main:1: error: can't assign to literal +main:1: error: cannot assign to literal [out version>=3.10] -main:1: error: can't assign to literal here. Maybe you meant '==' instead of '='? +main:1: error: cannot assign to literal here. Maybe you meant '==' instead of '='? [case testInvalidLvalues14] x() = 1 [out] -main:1: error: can't assign to function call +main:1: error: cannot assign to function call [out version>=3.10] -main:1: error: can't assign to function call here. Maybe you meant '==' instead of '='? +main:1: error: cannot assign to function call here. Maybe you meant '==' instead of '='? [case testTwoStarExpressions] a, *b, *c = 1 @@ -490,8 +490,9 @@ main:2: error: Can use starred expression only as assignment target [case testInvalidDel1] x = 1 -del x(1) # E: can't delete function call +del x(1) [out] +main:2: error: cannot delete function call [case testInvalidDel2] x = 1 @@ -503,7 +504,6 @@ main:2: error: cannot delete expression [case testInvalidDel3] del z # E: Name "z" is not defined -[out] [case testFunctionTvarScope] @@ -810,8 +810,8 @@ class C(Generic[t]): pass cast(str + str, None) # E: Cast target is not a type cast(C[str][str], None) # E: Cast target is not a type cast(C[str + str], None) # E: Cast target is not a type -cast([int, str], None) # E: Bracketed expression "[...]" is not valid as a type \ - # N: Did you mean "List[...]"? +cast([int], None) # E: Bracketed expression "[...]" is not valid as a type \ + # N: Did you mean "List[...]"? [out] [case testInvalidCastTargetType] @@ -859,8 +859,8 @@ Any(arg=str) # E: Any(...) is no longer supported. Use cast(Any, ...) instead [case testTypeListAsType] -def f(x:[int, str]) -> None: # E: Bracketed expression "[...]" is not valid as a type \ - # N: Did you mean "List[...]"? +def f(x: [int]) -> None: # E: Bracketed expression "[...]" is not valid as a type \ + # N: Did you mean "List[...]"? pass [out] @@ -897,9 +897,9 @@ import typing def f(): pass f() = 1 # type: int [out] -main:3: error: can't assign to function call +main:3: error: cannot assign to function call [out version>=3.10] -main:3: error: can't assign to function call here. Maybe you meant '==' instead of '='? +main:3: error: cannot assign to function call here. Maybe you meant '==' instead of '='? [case testIndexedAssignmentWithTypeDeclaration] import typing @@ -1275,8 +1275,9 @@ main:2: note: Did you forget to import it from "typing"? (Suggestion: "from typi [case testInvalidWithTarget] def f(): pass -with f() as 1: pass # E: can't assign to literal +with f() as 1: pass [out] +main:2: error: cannot assign to literal [case testInvalidTypeAnnotation] import typing @@ -1290,9 +1291,9 @@ import typing def f() -> None: f() = 1 # type: int [out] -main:3: error: can't assign to function call +main:3: error: cannot assign to function call [out version>=3.10] -main:3: error: can't assign to function call here. Maybe you meant '==' instead of '='? +main:3: error: cannot assign to function call here. Maybe you meant '==' instead of '='? [case testInvalidReferenceToAttributeOfOuterClass] class A: @@ -1456,7 +1457,7 @@ homogenous_tuple: Tuple[Unpack[Tuple[int, ...]]] bad: Tuple[Unpack[int]] # E: "int" cannot be unpacked (must be tuple or TypeVarTuple) [builtins fixtures/tuple.pyi] -[case testTypeVarTuple] +[case testTypeVarTupleErrors] from typing import Generic from typing_extensions import TypeVarTuple, Unpack @@ -1470,15 +1471,14 @@ TP5 = TypeVarTuple(t='TP5') # E: TypeVarTuple() expects a string literal as fir TP6 = TypeVarTuple('TP6', bound=int) # E: Unexpected keyword argument "bound" for "TypeVarTuple" x: TVariadic # E: TypeVarTuple "TVariadic" is unbound -y: Unpack[TVariadic] # E: TypeVarTuple "TVariadic" is unbound +y: Unpack[TVariadic] # E: Unpack is only valid in a variadic position class Variadic(Generic[Unpack[TVariadic], Unpack[TVariadic2]]): # E: Can only use one type var tuple in a class def pass -# TODO: this should generate an error -#def bad_args(*args: TVariadic): -# pass +def bad_args(*args: TVariadic): # E: TypeVarTuple "TVariadic" is only valid with an unpack + pass def bad_kwargs(**kwargs: Unpack[TVariadic]): # E: Unpack item in ** argument must be a TypedDict pass diff --git a/test-data/unit/semanal-namedtuple.test b/test-data/unit/semanal-namedtuple.test index df1d5679c892..f396f799028f 100644 --- a/test-data/unit/semanal-namedtuple.test +++ b/test-data/unit/semanal-namedtuple.test @@ -225,3 +225,23 @@ class B(A): pass [out] main:2: error: Unsupported dynamic base class "NamedTuple" main:2: error: Name "NamedTuple" is not defined + +[case testNamedTupleWithDecorator] +from typing import final, NamedTuple + +@final +class A(NamedTuple("N", [("x", int)])): + pass +[builtins fixtures/tuple.pyi] +[out] +MypyFile:1( + ImportFrom:1(typing, [final, NamedTuple]) + ClassDef:4( + A + TupleType( + Tuple[builtins.int, fallback=__main__.N@4]) + Decorators( + NameExpr(final [typing.final])) + BaseType( + __main__.N@4) + PassStmt:5())) diff --git a/test-data/unit/semanal-statements.test b/test-data/unit/semanal-statements.test index f602c236c949..c143805f4564 100644 --- a/test-data/unit/semanal-statements.test +++ b/test-data/unit/semanal-statements.test @@ -557,9 +557,9 @@ MypyFile:1( def f(x, y) -> None: del x, y + 1 [out] -main:2: error: can't delete operator +main:2: error: cannot delete operator [out version>=3.10] -main:2: error: can't delete expression +main:2: error: cannot delete expression [case testTry] class c: pass diff --git a/test-data/unit/stubgen.test b/test-data/unit/stubgen.test index e1818dc4c4bc..774a17b76161 100644 --- a/test-data/unit/stubgen.test +++ b/test-data/unit/stubgen.test @@ -127,10 +127,52 @@ class A: def g() -> None: ... -[case testVariable] -x = 1 +[case testVariables] +i = 1 +s = 'a' +f = 1.5 +c1 = 1j +c2 = 0j + 1 +bl1 = True +bl2 = False +bts = b'' +[out] +i: int +s: str +f: float +c1: complex +c2: complex +bl1: bool +bl2: bool +bts: bytes + +[case testVariablesWithUnary] +i = +-1 +f = -1.5 +c1 = -1j +c2 = -1j + 1 +bl1 = not True +bl2 = not not False +[out] +i: int +f: float +c1: complex +c2: complex +bl1: bool +bl2: bool + +[case testVariablesWithUnaryWrong] +i = not +1 +bl1 = -True +bl2 = not -False +bl3 = -(not False) [out] -x: int +from _typeshed import Incomplete + +i: Incomplete +bl1: Incomplete +bl2: Incomplete +bl3: Incomplete [case testAnnotatedVariable] x: int = 1 @@ -656,6 +698,62 @@ class Y(NamedTuple): a: int b: str +[case testNamedTupleClassSyntax_semanal] +from typing import NamedTuple + +class A(NamedTuple): + x: int + y: str = 'a' + +class B(A): + z1: str + z2 = 1 + z3: str = 'b' + +class RegularClass: + x: int + y: str = 'a' + class NestedNamedTuple(NamedTuple): + x: int + y: str = 'a' + z: str = 'b' +[out] +from typing import NamedTuple + +class A(NamedTuple): + x: int + y: str = ... + +class B(A): + z1: str + z2: int + z3: str + +class RegularClass: + x: int + y: str + class NestedNamedTuple(NamedTuple): + x: int + y: str = ... + z: str + + +[case testNestedClassInNamedTuple_semanal-xfail] +from typing import NamedTuple + +# TODO: make sure that nested classes in `NamedTuple` are supported: +class NamedTupleWithNestedClass(NamedTuple): + class Nested: + x: int + y: str = 'a' +[out] +from typing import NamedTuple + +class NamedTupleWithNestedClass(NamedTuple): + class Nested: + x: int + y: str + [case testEmptyNamedtuple] import collections, typing X = collections.namedtuple('X', []) @@ -1036,11 +1134,16 @@ y: C [case testTypeVarPreserved] tv = TypeVar('tv') +ps = ParamSpec('ps') +tvt = TypeVarTuple('tvt') [out] from typing import TypeVar +from typing_extensions import ParamSpec, TypeVarTuple tv = TypeVar('tv') +ps = ParamSpec('ps') +tvt = TypeVarTuple('tvt') [case testTypeVarArgsPreserved] tv = TypeVar('tv', int, str) @@ -1052,29 +1155,37 @@ tv = TypeVar('tv', int, str) [case testTypeVarNamedArgsPreserved] tv = TypeVar('tv', bound=bool, covariant=True) +ps = ParamSpec('ps', bound=bool, covariant=True) [out] from typing import TypeVar +from typing_extensions import ParamSpec tv = TypeVar('tv', bound=bool, covariant=True) +ps = ParamSpec('ps', bound=bool, covariant=True) [case TypeVarImportAlias] -from typing import TypeVar as t_TV -from typing_extensions import TypeVar as te_TV +from typing import TypeVar as t_TV, ParamSpec as t_PS +from typing_extensions import TypeVar as te_TV, TypeVarTuple as te_TVT from x import TypeVar as x_TV T = t_TV('T') U = te_TV('U') V = x_TV('V') +PS = t_PS('PS') +TVT = te_TVT('TVT') + [out] from _typeshed import Incomplete -from typing import TypeVar as t_TV -from typing_extensions import TypeVar as te_TV +from typing import ParamSpec as t_PS, TypeVar as t_TV +from typing_extensions import TypeVar as te_TV, TypeVarTuple as te_TVT T = t_TV('T') U = te_TV('U') V: Incomplete +PS = t_PS('PS') +TVT = te_TVT('TVT') [case testTypeVarFromImportAlias] import typing as t @@ -1085,6 +1196,9 @@ T = t.TypeVar('T') U = te.TypeVar('U') V = x.TypeVar('V') +PS = t.ParamSpec('PS') +TVT = te.TypeVarTuple('TVT') + [out] import typing as t import typing_extensions as te @@ -1093,6 +1207,8 @@ from _typeshed import Incomplete T = t.TypeVar('T') U = te.TypeVar('U') V: Incomplete +PS = t.ParamSpec('PS') +TVT = te.TypeVarTuple('TVT') [case testTypeAliasPreserved] alias = str @@ -3067,6 +3183,85 @@ def f2(): def f1(): ... def f2(): ... +[case testIncludeDocstrings] +# flags: --include-docstrings +class A: + """class docstring + + a multiline docstring""" + def func(): + """func docstring + don't forget to indent""" + ... + def nodoc(): + ... +class B: + def quoteA(): + '''func docstring with quotes"""\\n + and an end quote\'''' + ... + def quoteB(): + '''func docstring with quotes""" + \'\'\' + and an end quote\\"''' + ... + def quoteC(): + """func docstring with end quote\\\"""" + ... + def quoteD(): + r'''raw with quotes\"''' + ... +[out] +class A: + """class docstring + + a multiline docstring""" + def func() -> None: + """func docstring + don't forget to indent""" + def nodoc() -> None: ... + +class B: + def quoteA() -> None: + '''func docstring with quotes"""\\n + and an end quote\'''' + def quoteB() -> None: + '''func docstring with quotes""" + \'\'\' + and an end quote\\"''' + def quoteC() -> None: + '''func docstring with end quote\\"''' + def quoteD() -> None: + '''raw with quotes\\"''' + +[case testIgnoreDocstrings] +class A: + """class docstring + + a multiline docstring""" + def func(): + """func docstring + + don't forget to indent""" + def nodoc(): + ... + +class B: + def func(): + """func docstring""" + ... + def nodoc(): + ... + +[out] +class A: + def func() -> None: ... + def nodoc() -> None: ... + +class B: + def func() -> None: ... + def nodoc() -> None: ... + [case testKnownMagicMethodsReturnTypes] class Some: def __len__(self): ... diff --git a/test-data/unit/typexport-basic.test b/test-data/unit/typexport-basic.test index 0dcd0098f177..c4c3a1d36f83 100644 --- a/test-data/unit/typexport-basic.test +++ b/test-data/unit/typexport-basic.test @@ -294,8 +294,8 @@ import typing x = () [builtins fixtures/primitives.pyi] [out] -NameExpr(2) : Tuple[] -TupleExpr(2) : Tuple[] +NameExpr(2) : Tuple[()] +TupleExpr(2) : Tuple[()] [case testInferTwoTypes] ## NameExpr @@ -313,8 +313,8 @@ def f() -> None: x = () [builtins fixtures/primitives.pyi] [out] -NameExpr(3) : Tuple[] -TupleExpr(3) : Tuple[] +NameExpr(3) : Tuple[()] +TupleExpr(3) : Tuple[()] -- Basic generics @@ -727,7 +727,7 @@ class A: pass class B: a = None # type: A [out] -LambdaExpr(2) : def (B) -> A +LambdaExpr(2) : def (x: B) -> A MemberExpr(2) : A NameExpr(2) : B @@ -756,7 +756,7 @@ class B: a = None # type: A [builtins fixtures/list.pyi] [out] -LambdaExpr(2) : def (B) -> builtins.list[A] +LambdaExpr(2) : def (x: B) -> builtins.list[A] ListExpr(2) : builtins.list[A] [case testLambdaAndHigherOrderFunction] @@ -775,7 +775,7 @@ map( CallExpr(9) : builtins.list[B] NameExpr(9) : def (f: def (A) -> B, a: builtins.list[A]) -> builtins.list[B] CallExpr(10) : B -LambdaExpr(10) : def (A) -> B +LambdaExpr(10) : def (x: A) -> B NameExpr(10) : def (a: A) -> B NameExpr(10) : builtins.list[A] NameExpr(10) : A @@ -795,7 +795,7 @@ map( [builtins fixtures/list.pyi] [out] NameExpr(10) : def (f: def (A) -> builtins.list[B], a: builtins.list[A]) -> builtins.list[B] -LambdaExpr(11) : def (A) -> builtins.list[B] +LambdaExpr(11) : def (x: A) -> builtins.list[B] ListExpr(11) : builtins.list[B] NameExpr(11) : def (a: A) -> B NameExpr(11) : builtins.list[A] @@ -817,7 +817,7 @@ map( -- context. Perhaps just fail instead? CallExpr(7) : builtins.list[Any] NameExpr(7) : def (f: builtins.list[def (A) -> Any], a: builtins.list[A]) -> builtins.list[Any] -LambdaExpr(8) : def (A) -> A +LambdaExpr(8) : def (x: A) -> A ListExpr(8) : builtins.list[def (A) -> Any] NameExpr(8) : A NameExpr(9) : builtins.list[A] @@ -838,7 +838,7 @@ map( [out] CallExpr(9) : builtins.list[B] NameExpr(9) : def (f: def (A) -> B, a: builtins.list[A]) -> builtins.list[B] -LambdaExpr(10) : def (A) -> B +LambdaExpr(10) : def (x: A) -> B MemberExpr(10) : B NameExpr(10) : A NameExpr(11) : builtins.list[A] @@ -860,7 +860,7 @@ map( CallExpr(9) : builtins.list[B] NameExpr(9) : def (f: def (A) -> B, a: builtins.list[A]) -> builtins.list[B] NameExpr(10) : builtins.list[A] -LambdaExpr(11) : def (A) -> B +LambdaExpr(11) : def (x: A) -> B MemberExpr(11) : B NameExpr(11) : A @@ -1212,7 +1212,7 @@ f( [builtins fixtures/list.pyi] [out] NameExpr(8) : Overload(def (x: builtins.int, f: def (builtins.int) -> builtins.int), def (x: builtins.str, f: def (builtins.str) -> builtins.str)) -LambdaExpr(9) : def (builtins.int) -> builtins.int +LambdaExpr(9) : def (x: builtins.int) -> builtins.int NameExpr(9) : builtins.int [case testExportOverloadArgTypeNested] @@ -1231,10 +1231,10 @@ f( lambda x: x) [builtins fixtures/list.pyi] [out] -LambdaExpr(9) : def (builtins.int) -> builtins.int -LambdaExpr(10) : def (builtins.int) -> builtins.int -LambdaExpr(12) : def (builtins.str) -> builtins.str -LambdaExpr(13) : def (builtins.str) -> builtins.str +LambdaExpr(9) : def (y: builtins.int) -> builtins.int +LambdaExpr(10) : def (x: builtins.int) -> builtins.int +LambdaExpr(12) : def (y: builtins.str) -> builtins.str +LambdaExpr(13) : def (x: builtins.str) -> builtins.str -- TODO -- diff --git a/test-requirements.txt b/test-requirements.txt index 42c8a08a2b5d..6f7bec0375ad 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,22 +1,16 @@ -r mypy-requirements.txt -r build-requirements.txt attrs>=18.0 -black==23.3.0 # must match version in .pre-commit-config.yaml +black==23.7.0 # must match version in .pre-commit-config.yaml filelock>=3.3.0 -flake8==6.0.0; python_version >= "3.8" # must match version in .pre-commit-config.yaml -flake8-bugbear==23.3.23; python_version >= "3.8" # must match version in .pre-commit-config.yaml -flake8-noqa==1.3.1; python_version >= "3.8" # must match version in .pre-commit-config.yaml -isort[colors]==5.12.0; python_version >= "3.8" # must match version in .pre-commit-config.yaml -lxml>=4.9.1; (python_version<'3.11' or sys_platform!='win32') and python_version<'3.12' +# lxml 4.9.3 switched to manylinux_2_28, the wheel builder still uses manylinux2014 +lxml>=4.9.1,<4.9.3; (python_version<'3.11' or sys_platform!='win32') and python_version<'3.12' pre-commit pre-commit-hooks==4.4.0 psutil>=4.0 -# pytest 6.2.3 does not support Python 3.10 -pytest>=6.2.4 +pytest>=7.4.0 pytest-xdist>=1.34.0 -pytest-forked>=1.3.0,<2.0.0 pytest-cov>=2.10.0 -py>=1.5.2 +ruff==0.0.280 # must match version in .pre-commit-config.yaml setuptools>=65.5.1 -six -tomli>=1.1.0 +tomli>=1.1.0 # needed even on py311+ so the self check passes with --python-version 3.7 diff --git a/tox.ini b/tox.ini index 6d64cebaec6d..a809c4d2c570 100644 --- a/tox.ini +++ b/tox.ini @@ -2,10 +2,10 @@ minversion = 4.4.4 skip_missing_interpreters = {env:TOX_SKIP_MISSING_INTERPRETERS:True} envlist = - py37, py38, py39, py310, + py311, docs, lint, type, @@ -33,14 +33,17 @@ commands = [testenv:docs] description = invoke sphinx-build to build the HTML docs +passenv = + VERIFY_MYPY_ERROR_CODES deps = -rdocs/requirements-docs.txt commands = - sphinx-build -d "{toxworkdir}/docs_doctree" docs/source "{toxworkdir}/docs_out" --color -W -bhtml {posargs} + sphinx-build -n -d "{toxworkdir}/docs_doctree" docs/source "{toxworkdir}/docs_out" --color -W -bhtml {posargs} python -c 'import pathlib; print("documentation available under file://\{0\}".format(pathlib.Path(r"{toxworkdir}") / "docs_out" / "index.html"))' [testenv:lint] description = check the code style skip_install = true +deps = pre-commit commands = pre-commit run --all-files --show-diff-on-failure [testenv:type] @@ -50,5 +53,5 @@ passenv = MYPY_FORCE_COLOR MYPY_FORCE_TERMINAL_WIDTH commands = - python -m mypy --config-file mypy_self_check.ini -p mypy -p mypyc - python -m mypy --config-file mypy_self_check.ini misc --exclude misc/fix_annotate.py --exclude misc/async_matrix.py --exclude misc/sync-typeshed.py + python runtests.py self + python -m mypy --config-file mypy_self_check.ini misc --exclude misc/sync-typeshed.py