diff --git a/.circleci/config.yml b/.circleci/config.yml
index 92d9093b..49eac3a8 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -3,11 +3,17 @@ version: 2.1
executors:
python:
parameters:
- version:
+ pyversion:
+ type: string
+ pgversion:
type: string
+ default: "16.0"
+ debiandist:
+ type: string
+ default: "bullseye"
docker:
- - image: python:<< parameters.version >>-buster
- - image: postgres:13.0
+ - image: python:<< parameters.pyversion >>-<< parameters.debiandist >>
+ - image: postgres:<< parameters.pgversion >>
environment:
POSTGRES_DB: 'psqlextra'
POSTGRES_USER: 'psqlextra'
@@ -22,7 +28,7 @@ commands:
steps:
- run:
name: Install packages
- command: apt-get update && apt-get install -y --no-install-recommends postgresql-client-11 libpq-dev build-essential git
+ command: apt-get update && apt-get install -y --no-install-recommends postgresql-client libpq-dev build-essential git
- run:
name: Install Python packages
@@ -39,71 +45,79 @@ commands:
command: tox --listenvs | grep ^py<< parameters.pyversion >> | circleci tests split | xargs -n 1 tox -e
environment:
DATABASE_URL: 'postgres://psqlextra:psqlextra@localhost:5432/psqlextra'
+ DATABASE_IN_CONTAINER: 'true'
+ TOX_TESTENV_PASSENV: 'DATABASE_URL DATABASE_IN_CONTAINER'
jobs:
test-python36:
executor:
name: python
- version: "3.6"
+ pyversion: "3.6"
+ pgversion: "13.0"
steps:
- checkout
- install-dependencies:
- extra: test
+ extra: dev, test
- run-tests:
pyversion: 36
test-python37:
executor:
name: python
- version: "3.7"
+ pyversion: "3.7"
+ pgversion: "13.0"
steps:
- checkout
- install-dependencies:
- extra: test
+ extra: dev, test
- run-tests:
pyversion: 37
test-python38:
executor:
name: python
- version: "3.8"
+ pyversion: "3.8"
+ pgversion: "13.0"
steps:
- checkout
- install-dependencies:
- extra: test
+ extra: dev, test
- run-tests:
pyversion: 38
test-python39:
executor:
name: python
- version: "3.9"
+ pyversion: "3.9"
+ pgversion: "13.0"
steps:
- checkout
- install-dependencies:
- extra: test
+ extra: dev, test
- run-tests:
pyversion: 39
test-python310:
executor:
name: python
- version: "3.10"
+ pyversion: "3.10"
+ pgversion: "16.0"
steps:
- checkout
- install-dependencies:
- extra: test
+ extra: dev, test
- run-tests:
pyversion: 310
test-python311:
executor:
name: python
- version: "3.11"
+ pyversion: "3.11"
+ pgversion: "16.0"
steps:
- checkout
- install-dependencies:
- extra: test
+ extra: dev, test, test-report
- run-tests:
pyversion: 311
- store_test_results:
@@ -112,22 +126,48 @@ jobs:
name: Upload coverage report
command: coveralls
+ test-python312:
+ executor:
+ name: python
+ pyversion: "3.12"
+ pgversion: "16.0"
+ debiandist: "bullseye"
+ steps:
+ - checkout
+ - install-dependencies:
+ extra: dev, test
+ - run-tests:
+ pyversion: 312
+
+ test-python313:
+ executor:
+ name: python
+ pyversion: "3.13"
+ pgversion: "16.0"
+ debiandist: "bullseye"
+ steps:
+ - checkout
+ - install-dependencies:
+ extra: dev, test
+ - run-tests:
+ pyversion: 313
+
analysis:
executor:
name: python
- version: "3.9"
+ pyversion: "3.11"
steps:
- checkout
- install-dependencies:
- extra: analysis, test
+ extra: dev, analysis, test
- run:
name: Verify
- command: python setup.py verify
+ command: poe verify
publish:
executor:
name: python
- version: "3.9"
+ pyversion: "3.11"
steps:
- checkout
- install-dependencies:
@@ -188,6 +228,18 @@ workflows:
only: /.*/
branches:
only: /.*/
+ - test-python312:
+ filters:
+ tags:
+ only: /.*/
+ branches:
+ only: /.*/
+ - test-python313:
+ filters:
+ tags:
+ only: /.*/
+ branches:
+ only: /.*/
- analysis:
filters:
tags:
diff --git a/.gitignore b/.gitignore
index 97ebaa67..52805a88 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,7 @@ reports/
*.egg-info/
pip-wheel-metadata/
dist/
+build/
# Ignore stupid .DS_Store
.DS_Store
@@ -28,3 +29,6 @@ dist/
# Ignore PyCharm / IntelliJ files
.idea/
+build/
+.python-version
+docker-compose.yml
\ No newline at end of file
diff --git a/README.md b/README.md
index 17037d87..603fef82 100644
--- a/README.md
+++ b/README.md
@@ -8,8 +8,8 @@
| :memo: | **License** | [](http://doge.mit-license.org) |
| :package: | **PyPi** | [](https://pypi.python.org/pypi/django-postgres-extra) |
| :four_leaf_clover: | **Code coverage** | [](https://coveralls.io/github/SectorLabs/django-postgres-extra?branch=master) |
-|
| **Django Versions** | 2.0, 2.1, 2.2, 3.0, 3.1, 3.2, 4.0, 4.1, 4.2, 5.0 |
-|
| **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11 |
+|
| **Django Versions** | 2.0, 2.1, 2.2, 3.0, 3.1, 3.2, 4.0, 4.1, 4.2, 5.0, 5.1, 5.2 |
+|
| **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11, 3.12, 3.13 |
|
| **Psycopg Versions** | 2, 3 |
| :book: | **Documentation** | [Read The Docs](https://django-postgres-extra.readthedocs.io/en/master/) |
| :warning: | **Upgrade** | [Upgrade from v1.x](https://django-postgres-extra.readthedocs.io/en/master/major_releases.html#new-features)
@@ -31,37 +31,48 @@ With seamless we mean that any features we add will work truly seamlessly. You s
[See the full list](http://django-postgres-extra.readthedocs.io/#features)
-* **Native upserts**
+* **Conflict handling (atomic upsert)**
- * Single query
- * Concurrency safe
- * With bulk support (single query)
+ Adds support for PostgreSQL's `ON CONFLICT` syntax for inserts. Supports `DO UPDATE` and `DO NOTHING`. Single statement, atomic and concurrency safe upserts. Supports conditional updates as well.
-* **Extended support for HStoreField**
+* **Table partitioning**
- * Unique constraints
- * Null constraints
- * Select individual keys using ``.values()`` or ``.values_list()``
+ Adds support for PostgreSQL 11.x declarative table partitioning. Integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions.
-* **PostgreSQL 11.x declarative table partitioning**
+* **Views & materialized views**
- * Supports both range and list partitioning
+ Adds support for creating views & materialized views as any other model. Integrated into Django migrations.
-* **Faster deletes**
+* **Locking models & tables**
- * Truncate tables (with cascade)
+ Support for explicit table-level locks.
-* **Indexes**
+* **Creating/dropping schemas**
- * Conditional unique index.
- * Case sensitive unique index.
+ Support for managing PostgreSQL schemas.
+
+* **Truncating tables**
+
+ Support for ``TRUNCATE TABLE`` statements (including cascading).
+
+For Django 3.1 and older:
+
+* **Conditional unique index**
+* **Case insensitive index**
+
+For Django 2.2 and older:
+
+* **Unique index**
+* **HStore unique and required constraints on specific HStore keys**
## Working with the code
### Prerequisites
-* PostgreSQL 10 or newer.
-* Django 2.0 or newer (including 3.x, 4.x).
-* Python 3.6 or newer.
+* PostgreSQL 14 or newer.
+* Django 5.x or newer.
+* Python 3.11 or newer.
+
+These are just for local development. CI for code analysis etc runs against these. Tests will pass on all Python, Django and PostgreSQL versions documented. Linting, formatting and type-checking the code might not work on other Python and/or Django versions.
### Getting started
@@ -86,16 +97,16 @@ With seamless we mean that any features we add will work truly seamlessly. You s
4. Install the development/test dependencies:
- λ pip install .[test] .[analysis]
+ λ pip install -r requirements-test.txt
5. Run the tests:
- λ tox
+ λ poe test
6. Run the benchmarks:
- λ py.test -c pytest-benchmark.ini
+ λ poe benchmark
7. Auto-format code, sort imports and auto-fix linting errors:
- λ python setup.py fix
+ λ poe fix
diff --git a/docs/source/deletion.rst b/docs/source/deletion.rst
index c27cdcb6..9308594c 100644
--- a/docs/source/deletion.rst
+++ b/docs/source/deletion.rst
@@ -48,3 +48,28 @@ By default, Postgres will raise an error if any other table is referencing one o
MyModel.objects.truncate(cascade=True)
print(MyModel1.objects.count()) # zero records left
print(MyModel2.objects.count()) # zero records left
+
+
+Restart identity
+****************
+
+If specified, any sequences on the table will be restarted.
+
+.. code-block:: python
+
+ from django.db import models
+ from psqlextra.models import PostgresModel
+
+ class MyModel(PostgresModel):
+ pass
+
+ mymodel = MyModel.objects.create()
+ assert mymodel.id == 1
+
+ MyModel.objects.truncate(restart_identity=True) # table is empty after this
+ print(MyModel.objects.count()) # zero records left
+
+ # Create a new row, it should get ID 1 again because
+ # the sequence got restarted.
+ mymodel = MyModel.objects.create()
+ assert mymodel.id == 1
diff --git a/docs/source/hstore.rst b/docs/source/hstore.rst
index 6dc22304..401b40ee 100644
--- a/docs/source/hstore.rst
+++ b/docs/source/hstore.rst
@@ -15,6 +15,10 @@ Constraints
Unique
******
+.. warning::
+
+ In Django 2.2 or newer, you might want to use :class:`~django.db.models.UniqueConstraint` instead.
+
The ``uniqueness`` constraint can be added on one or more `hstore`_ keys, similar to how a ``UNIQUE`` constraint can be added to a column. Setting this option causes unique indexes to be created on the specified keys.
You can specify a ``list`` of strings to specify the keys that must be marked as unique:
@@ -44,6 +48,10 @@ In the example above, ``key1`` and ``key2`` must unique **together**, and ``key3
Required
********
+.. warning::
+
+ In Django 2.2 or newer, you might want to use :class:`~django.db.models.CheckConstraint` instead.
+
The ``required`` option can be added to ensure that the specified `hstore`_ keys are set for every row. This is similar to a ``NOT NULL`` constraint on a column. You can specify a list of `hstore`_ keys that are required:
.. code-block:: python
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 1959016e..3fddca56 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -13,36 +13,47 @@ Explore the documentation to learn about all features:
* :ref:`Conflict handling `
- Adds support for PostgreSQL's ``ON CONFLICT`` syntax for inserts. Supports for ``DO UPDATE`` and ``DO NOTHING``. In other words; single statement, atomic, concurrency safe upserts.
+ Adds support for PostgreSQL's ``ON CONFLICT`` syntax for inserts. Supports for ``DO UPDATE`` and ``DO NOTHING``. Single statement, atomic, concurrency safe upserts. Supports conditional updates as well.
-* :ref:`HStore `
+* :ref:`Table partitioning `
- Built on top Django's built-in support for `hstore`_ fields. Adds support for indices on keys and unique/required constraints. All of these features integrate well with Django's migrations sytem.
+ Adds support for PostgreSQL 11.x declarative table partitioning. Integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions.
-* :ref:`Partial unique index `
+* :ref:`Views & materialized views `
- Partial (unique) index that only applies when a certain condition is true.
+ Adds support for creating views & materialized views as any other model. Integrated into Django migrations.
-* :ref:`Case insensitive index `
+* :ref:`Locking models & tables `
- Case insensitive index, allows searching a column and ignoring the casing.
+ Support for explicit table-level locks.
-* :ref:`Table partitioning `
+* :ref:`Creating/dropping schemas `
- Adds support for PostgreSQL 11.x declarative table partitioning.
+ Support for managing Postgres schemas.
* :ref:`Truncating tables `
Support for ``TRUNCATE TABLE`` statements (including cascading).
-* :ref:`Locking models & tables `
+For Django 3.1 and older:
- Support for explicit table-level locks.
+* :ref:`Partial unique index `
+ Partial (unique) index that only applies when a certain condition is true.
-* :ref:`Creating/dropping schemas `
+* :ref:`Case insensitive index `
- Support for managing Postgres schemas.
+ Case insensitive index, allows searching a column and ignoring the casing.
+
+For Django 2.2 and older:
+
+* :ref:`Unique index `
+
+ Unique indices that can span more than one field.
+
+* :ref:`HStore key unique & required constraint `
+
+ Add unique and required constraints in specific hstore keys.
.. toctree::
@@ -56,6 +67,7 @@ Explore the documentation to learn about all features:
conflict_handling
deletion
table_partitioning
+ views
expressions
annotations
locking
diff --git a/docs/source/indexes.rst b/docs/source/indexes.rst
index 622d1ce6..236b6976 100644
--- a/docs/source/indexes.rst
+++ b/docs/source/indexes.rst
@@ -7,6 +7,11 @@ Indexes
Unique Index
-----------------------------
+
+.. warning::
+
+ In Django 2.2 or newer, you might want to use :class:`~django.db.models.UniqueConstraint` instead.
+
The :class:`~psqlextra.indexes.UniqueIndex` lets you create a unique index. Normally Django only allows you to create unique indexes by specifying ``unique=True`` on the model field.
Although it can be used on any Django model, it is most useful on views and materialized views where ``unique=True`` does not work.
@@ -32,13 +37,14 @@ Although it can be used on any Django model, it is most useful on views and mate
Conditional Unique Index
------------------------
-The :class:`~psqlextra.indexes.ConditionalUniqueIndex` lets you create partial unique indexes in case you ever need :attr:`~django:django.db.models.Options.unique_together` constraints
-on nullable columns.
.. warning::
In Django 3.1 or newer, you might want to use :attr:`~django.db.models.indexes.condition` instead.
+The :class:`~psqlextra.indexes.ConditionalUniqueIndex` lets you create partial unique indexes in case you ever need :attr:`~django:django.db.models.Options.unique_together` constraints
+on nullable columns.
+
Before:
.. code-block:: python
@@ -83,6 +89,11 @@ After:
Case Insensitive Unique Index
-----------------------------
+
+.. warning::
+
+ In Django 3.1 or newer, you might want to use :attr:`~django.db.models.indexes.condition` instead.
+
The :class:`~psqlextra.indexes.CaseInsensitiveUniqueIndex` lets you create an index that ignores the casing for the specified field(s).
This makes the field(s) behave more like a text field in MySQL.
diff --git a/docs/source/snippets/postgres_doc_links.rst b/docs/source/snippets/postgres_doc_links.rst
index fe0f4d76..537d056d 100644
--- a/docs/source/snippets/postgres_doc_links.rst
+++ b/docs/source/snippets/postgres_doc_links.rst
@@ -3,3 +3,7 @@
.. _hstore: https://www.postgresql.org/docs/11/hstore.html
.. _PostgreSQL Declarative Table Partitioning: https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE
.. _Explicit table-level locks: https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
+.. _PostgreSQL Views: https://www.postgresql.org/docs/current/sql-createview.html
+.. _PostgreSQL Materialized Views: https://www.postgresql.org/docs/current/sql-creatematerializedview.html
+.. _PostgreSQL Refresh Materialized Views: https://www.postgresql.org/docs/current/sql-refreshmaterializedview.html
+.. _PostgreSQL Table Partitioning Limitations: https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE-LIMITATIONS
diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst
index 1bb5ba6f..1c36db0a 100644
--- a/docs/source/table_partitioning.rst
+++ b/docs/source/table_partitioning.rst
@@ -2,12 +2,13 @@
.. warning::
- Table partitioning is a relatively new and advanded PostgreSQL feature. It has plenty of ways to shoot yourself in the foot with.
+ Table partitioning is an advanded PostgreSQL feature. It has plenty of ways to shoot yourself in the foot with.
We HIGHLY RECOMMEND you only use this feature if you're already deeply familiar with table partitioning and aware of its advantages and disadvantages.
Do study the PostgreSQL documentation carefully.
+
.. _table_partitioning_page:
@@ -22,11 +23,62 @@ The following partitioning methods are available:
* ``PARTITION BY LIST``
* ``PARTITION BY HASH``
-.. note::
+Known limitations
+-----------------
+
+Foreign keys
+~~~~~~~~~~~~
+There is no support for foreign keys **to** partitioned models. Even in Django 5.2 with the introduction of :class:`~django:django.db.models.CompositePrimaryKey`, there is no support for foreign keys. See: https://code.djangoproject.com/ticket/36034
+
+Foreing keys **on** a partitioned models to other, non-partitioned models are always supported.
+
+PostgreSQL 10.x
+~~~~~~~~~~~~~~~
+Although table partitioning is available in PostgreSQL 10.x, it is highly recommended you use PostgresSQL 11.x. Table partitioning got a major upgrade in PostgreSQL 11.x.
+
+PostgreSQL 10.x does not support creating foreign keys to/from partitioned tables and does not automatically create an index across all partitions.
+
+Changing the partition key or partition method
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is **NO SUPPORT** whatsoever for changing the partitioning key or method on a partitioned model after the initial creation.
+
+Such changes are not detected by ``python manage.py pgmakemigrations`` and there are no pre-built operations for modifying them.
+
+Transforming existing models into partitioned models
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is **NO SUPPORT** whatsoever to transform an existing, non-partitioned model into a partitioned model.
+
+At a high-level, you have the following options to do this:
+
+1. Drop the model first and re-create it as a partitioned model according to the documentation.
+
+ .. warning::
+
+ Blindly doing this causes the original table & data to be lost.
+
+2. Craft a custom migration to use the original table as a default partition.
+
+ Migration #1: Rename the original table to ``_default``
+
+ Migration #2: Create the partitioned model with the old name.
+
+ Migration #3: Attach the original (renamed) table as the default partition.
- Although table partitioning is available in PostgreSQL 10.x, it is highly recommended you use PostgresSQL 11.x. Table partitioning got a major upgrade in PostgreSQL 11.x.
+ Migration #4: Create more partitions and/or move data from the default partition
- PostgreSQL 10.x does not support creating foreign keys to/from partitioned tables and does not automatically create an index across all partitions.
+ .. warning::
+
+ This is not an officially supported flow. Be extremely cautious to avoid
+ data loss.
+
+Lock-free and/or concurrency safe operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is **NO SUPPORT** whatsoever to create/attach partitions and move data between partitions in a lock-free and concurrency safe manner.
+
+Most operations require ``AccessExclusiveLock`` and **will** block reads/writes. Be extremely cautious on production environments and study the associated locks with the SQL operations before proceeding.
Creating partitioned tables
@@ -59,6 +111,71 @@ Inherit your model from :class:`psqlextra.models.PostgresPartitionedModel` and d
name = models.TextField()
timestamp = models.DateTimeField()
+Primary key
+~~~~~~~~~~~
+
+PostgreSQL demands that the primary key is the same or is part of the partitioning key. See `PostgreSQL Table Partitioning Limitations`_.
+
+**In Django <5.2, the behavior is as following:**
+
+ - If the primary key is the same as the partitioning key, standard Django behavior applies.
+
+ - If the primary key is not the exact same as the partitioning key or the partitioning key consists of more than one field:
+
+ An implicit composite primary key (not visible from Django) is created.
+
+**In Django >5.2, the behavior is as following:**
+
+ - If no explicit primary key is defined, a :class:`~django:django.db.models.CompositePrimaryKey` is created automatically that includes an auto-incrementing `id` primary key field and the partitioning keys.
+
+ - If an explicit :class:`~django:django.db.models.CompositePrimaryKey` is specified, no modifications are made to it and it is your responsibility to make sure the partitioning keys are part of the primary key.
+
+Django 5.2 examples
+*******************
+
+Custom composite primary key
+""""""""""""""""""""""""""""
+
+.. code-block:: python
+
+ from django.db import models
+
+ from psqlextra.types import PostgresPartitioningMethod
+ from psqlextra.models import PostgresPartitionedModel
+
+ class MyModel(PostgresPartitionedModel):
+ class PartitioningMeta:
+ method = PostgresPartitioningMethod.RANGE
+ key = ["timestamp"]
+
+ # WARNING: This overrides default primary key that includes a auto-increment `id` field.
+ pk = models.CompositePrimaryKey("name", "timestamp")
+
+ name = models.TextField()
+ timestamp = models.DateTimeField()
+
+
+Custom composite primary key with auto-incrementing ID
+""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+.. code-block:: python
+
+ from django.db import models
+
+ from psqlextra.types import PostgresPartitioningMethod
+ from psqlextra.models import PostgresPartitionedModel
+
+ class MyModel(PostgresPartitionedModel):
+ class PartitioningMeta:
+ method = PostgresPartitioningMethod.RANGE
+ key = ["timestamp"]
+
+ id = models.AutoField(primary_key=True)
+ pk = models.CompositePrimaryKey("id", "timestamp")
+
+ name = models.TextField()
+ timestamp = models.DateTimeField()
+
Generating a migration
**********************
@@ -101,7 +218,8 @@ Command-line options
Long flag Short flag Default Description
==================== ============= ================ ==================================================================================================== === === === === === ===
``--yes`` ``-y`` ``False`` Specifies yes to all questions. You will NOT be asked for confirmation before partition deletion.
- ``--using`` ``-u`` ``'default'`` Optional name of the database connection to use.
+ ``--using`` ``-u`` ``'default'`` Optionally, name of the database connection to use.
+ ``--model-names`` ``-m`` ``None`` Optionally, a list of model names to partition for.
``--skip-create`` ``False`` Whether to skip creating partitions.
``--skip-delete`` ``False`` Whether to skip deleting partitions.
@@ -174,6 +292,17 @@ Time-based partitioning
count=12,
),
),
+
+ # 24 partitions ahead, each partition is 1 hour, for a total of 24 hours. Starting with hour 0 of current day
+ # old partitions are never deleted, `max_age` is not set
+ # partitions will be named `[table_name]_[year]_[month]_[month day number]_[hour (24h)]:00:00`.
+ PostgresPartitioningConfig(
+ model=MyPartitionedModel,
+ strategy=PostgresCurrentTimePartitioningStrategy(
+ size=PostgresTimePartitionSize(hours=1),
+ count=24,
+ ),
+ ),
])
diff --git a/docs/source/views.rst b/docs/source/views.rst
new file mode 100644
index 00000000..ea6bfcb0
--- /dev/null
+++ b/docs/source/views.rst
@@ -0,0 +1,197 @@
+.. include:: ./snippets/postgres_doc_links.rst
+
+.. _views_page:
+
+
+Views & materialized views
+==========================
+
+:class:`~psqlextra.models.PostgresViewModel` and :class:`~psqlextra.models.PostgresMaterializedViewModel` add support for `PostgreSQL Views`_ and `PostgreSQL Materialized Views`_.
+
+.. note::
+
+ You can create indices and constraints on (materialized) views just like you would on normal PostgreSQL tables. This is fully supported.
+
+
+Known limitations
+-----------------
+
+Changing view query
+*******************
+
+THere is **NO SUPPORT** whatsoever for changing the backing query of a view after the initial creation.
+
+Such changes are not detected by ``python manage.py pgmakemigrations`` and there are no pre-built operations for modifying them.
+
+
+Creating a (materialized) view
+------------------------------
+
+Views are declared like regular Django models with a special base class and an extra option to specify the query backing the view. Once declared, they behave like regular Django models with the exception that you cannot write to them.
+
+Declaring the model
+*******************
+
+.. warning::
+
+ All fields returned by the backing query must be declared as Django fields. Fields that are returned by the query that aren't declared as Django fields become
+ part of the view, but will not be visible from Django.
+
+With a queryset
+~~~~~~~~~~~~~~~
+
+.. code-block:: python
+
+ from django.db import models
+
+ from psqlextra.models import PostgresViewModel
+
+
+ class MyView(PostgresViewModel):
+ name = models.TextField()
+ somefk__name = models.TextField()
+
+ class Meta:
+ indexes = [models.Index(fields=["name"])]
+
+ class ViewMeta:
+ query = SomeOtherModel.objects.values('id', 'name', 'somefk__name')
+
+ class MyMaterializedView(PostgresMaterializedViewModel):
+ name = models.TextField()
+ somefk__name = models.TextField()
+
+ class Meta:
+ indexes = [models.Index(fields=["name"])]
+
+ class ViewMeta:
+ query = SomeOtherModel.objects.values('id', 'name', 'somefk__name')
+
+With raw SQL
+~~~~~~~~~~~~
+
+Any raw SQL can be used as the backing query for a view. Specify a tuple to pass the values for placeholders.
+
+.. code-block:: python
+
+ from django.db import models
+
+ from psqlextra.models import PostgresViewModel
+
+
+ class MyView(PostgresViewModel):
+ name = models.TextField()
+ somefk__name = models.TextField()
+
+ class Meta:
+ indexes = [models.Index(fields=["name"])]
+
+ class ViewMeta:
+ query = "SELECT id, somefk.name AS somefk__name FROM mytable INNER JOIN somefk ON somefk.id = mytable.somefk_id"
+
+ class MyMaterializedView(PostgresMaterializedViewModel):
+ name = models.TextField()
+ somefk__name = models.TextField()
+
+ class Meta:
+ indexes = [models.Index(fields=["name"])]
+
+ class ViewMeta:
+ query = ("SELECT id, somefk.name AS somefk__name FROM mytable INNER JOIN somefk ON somefk.id = mytable.somefk_id WHERE id > %s", 1)
+
+
+With a callable
+~~~~~~~~~~~~~~~
+
+A callable can be used when your query depends on settings or other variables that aren't available at evaluation time. The callable can return raw SQL, raw SQL with params or a queryset.
+
+.. code-block:: python
+
+ from django.db import models
+
+ from psqlextra.models import PostgresViewModel
+
+ def _generate_query():
+ return ("SELECT * FROM sometable WHERE app_name = %s", settings.APP_NAME)
+
+ def _build_query():
+ return SomeTable.objects.filter(app_name=settings.APP_NAME)
+
+
+ class MyView(PostgresViewModel):
+ name = models.TextField()
+ somefk__name = models.TextField()
+
+ class ViewMeta:
+ query = _generate_query
+
+ class MyMaterializedView(PostgresMaterializedViewModel):
+ name = models.TextField()
+ somefk__name = models.TextField()
+
+ class ViewMeta:
+ query = _generate_query
+
+
+Generating a migration
+**********************
+Run the following command to automatically generate a migration:
+
+.. code-block:: bash
+
+ python manage.py pgmakemigrations
+
+This will generate a migration that creates the view with the specified query as the base.
+
+.. warning::
+
+ Always use ``python manage.py pgmakemigrations`` for view models.
+
+ The model must be created by the :class:`~psqlextra.backend.migrations.operations.PostgresCreateViewModel` or :class:`~psqlextra.backend.migrations.operations.PostgresCreateMaterializedViewModel` operation.
+
+ Do not use the standard ``python manage.py makemigrations`` command for view models. Django will issue a standard :class:`~django:django.db.migrations.operations.CreateModel` operation. Doing this will not create a view and all subsequent operations will fail.
+
+
+Refreshing a materialized view
+------------------------------
+
+Make sure to read the PostgreSQL documentation on refreshing materialized views for caveats: `PostgreSQL Refresh Materialized Views`_.
+
+.. code-block:: python
+
+ # Takes an AccessExclusive lock and blocks till table is re-filled
+ MyViewModel.refresh()
+
+ # Allows concurrent read, does block till table is re-filled.
+ # Warning: Only works if the view was refreshed at least once before.
+ MyViewModel.refresh(concurrently=True)
+
+
+Creating a materialized view without data
+-----------------------------------------
+
+.. warning::
+
+ You cannot query your materialized view until it has been refreshed at least once. After creating the materialized view without data, you must execute a refresh at some point. The first refresh cannot be ``CONCURRENTLY`` (PostgreSQL restriction).
+
+By default, the migration creates the materialized view and executes the first refresh. If you want to avoid this, pass the ``with_data=False`` flag in the :class:`~psqlextra.backend.migrations.operations.PostgresCreateMaterializedViewModel` operation in your generated migration.
+
+.. code-block:: python
+
+ from django.db import migrations, models
+
+ from psqlextra.backend.migrations.operations import PostgresCreateMaterializedViewModel
+
+ class Migration(migrations.Migration):
+ operations = [
+ PostgresCreateMaterializedViewModel(
+ name="myview",
+ fields=[...],
+ options={...},
+ view_options={
+ "query": ...
+ },
+ # Not the default, creates materialized with `WITH NO DATA`
+ with_data=False,
+ )
+ ]
diff --git a/psqlextra/backend/base.py b/psqlextra/backend/base.py
index 5c788a05..58222dd7 100644
--- a/psqlextra/backend/base.py
+++ b/psqlextra/backend/base.py
@@ -2,7 +2,12 @@
from typing import TYPE_CHECKING
+from django import VERSION
from django.conf import settings
+from django.contrib.postgres.signals import (
+ get_hstore_oids,
+ register_type_handlers,
+)
from django.db import ProgrammingError
from . import base_impl
@@ -41,6 +46,9 @@ class DatabaseWrapper(Wrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
+ if VERSION >= (5, 0):
+ return
+
# Some base back-ends such as the PostGIS back-end don't properly
# set `ops_class` and `introspection_class` and initialize these
# classes themselves.
@@ -94,3 +102,22 @@ def prepare_database(self):
"or add the extension manually.",
exc_info=True,
)
+ return
+
+ # Clear old (non-existent), stale oids.
+ get_hstore_oids.cache_clear()
+
+ # Verify that we (and Django) can find the OIDs
+ # for hstore.
+ oids, _ = get_hstore_oids(self.alias)
+ if not oids:
+ logger.warning(
+ '"hstore" extension was created, but we cannot find the oids'
+ "in the database. Something went wrong.",
+ )
+ return
+
+ # We must trigger Django into registering the type handlers now
+ # so that any subsequent code can properly use the newly
+ # registered types.
+ register_type_handlers(self)
diff --git a/psqlextra/backend/introspection.py b/psqlextra/backend/introspection.py
index bd775779..a9106bdc 100644
--- a/psqlextra/backend/introspection.py
+++ b/psqlextra/backend/introspection.py
@@ -93,6 +93,8 @@ def get_partitioned_tables(
pg_class
ON
pg_class.oid = pg_partitioned_table.partrelid
+ ORDER BY
+ pg_partitioned_table.partrelid
"""
)
@@ -151,6 +153,9 @@ def get_partitions(
pg_description.objoid = child.oid
WHERE
parent.relname = %s
+ ORDER BY
+ child.oid,
+ child.relname
"""
cursor.execute(sql, (table_name,))
@@ -196,6 +201,9 @@ def get_partition_key(self, cursor, table_name: str) -> List[str]:
AND ordinal_position = pt.column_index
WHERE
table_name = %s
+ ORDER BY
+ col.ordinal_position,
+ col.column_name
"""
cursor.execute(sql, (table_name,))
@@ -213,6 +221,9 @@ def get_schema_list(self, cursor) -> List[str]:
schema_name
FROM
information_schema.schemata
+ ORDER BY
+ schema_name,
+ catalog_name
""",
tuple(),
)
diff --git a/psqlextra/backend/migrations/operations/create_materialized_view_model.py b/psqlextra/backend/migrations/operations/create_materialized_view_model.py
index ce1028d6..9ca2320f 100644
--- a/psqlextra/backend/migrations/operations/create_materialized_view_model.py
+++ b/psqlextra/backend/migrations/operations/create_materialized_view_model.py
@@ -23,10 +23,13 @@ def __init__(
view_options={},
bases=None,
managers=None,
+ *,
+ with_data: bool = True,
):
super().__init__(name, fields, options, bases, managers)
self.view_options = view_options or {}
+ self.with_data = with_data
def state_forwards(self, app_label, state):
state.add_model(
@@ -46,7 +49,9 @@ def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
- schema_editor.create_materialized_view_model(model)
+ schema_editor.create_materialized_view_model(
+ model, with_data=self.with_data
+ )
def database_backwards(
self, app_label, schema_editor, from_state, to_state
@@ -63,6 +68,9 @@ def deconstruct(self):
if self.view_options:
kwargs["view_options"] = self.view_options
+ if self.with_data is False:
+ kwargs["with_data"] = self.with_data
+
return name, args, kwargs
def describe(self):
diff --git a/psqlextra/backend/migrations/patched_autodetector.py b/psqlextra/backend/migrations/patched_autodetector.py
index e5ba8938..07f9e528 100644
--- a/psqlextra/backend/migrations/patched_autodetector.py
+++ b/psqlextra/backend/migrations/patched_autodetector.py
@@ -37,8 +37,8 @@
class AddOperationHandler:
"""Handler for when operations are being added to a new migration.
- This is where we intercept operations such as
- :see:CreateModel to replace it with our own.
+ This is where we intercept operations such as :see:CreateModel to
+ replace it with our own.
"""
def __init__(self, autodetector, app_label, args, kwargs):
diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py
index 28e9211a..b8dd0a76 100644
--- a/psqlextra/backend/schema.py
+++ b/psqlextra/backend/schema.py
@@ -61,9 +61,12 @@ class PostgresSchemaEditor(SchemaEditor):
sql_create_view = "CREATE VIEW %s AS (%s)"
sql_replace_view = "CREATE OR REPLACE VIEW %s AS (%s)"
sql_drop_view = "DROP VIEW IF EXISTS %s"
- sql_create_materialized_view = (
+ sql_create_materialized_view_with_data = (
"CREATE MATERIALIZED VIEW %s AS (%s) WITH DATA"
)
+ sql_create_materialized_view_without_data = (
+ "CREATE MATERIALIZED VIEW %s AS (%s) WITH NO DATA"
+ )
sql_drop_materialized_view = "DROP MATERIALIZED VIEW %s"
sql_refresh_materialized_view = "REFRESH MATERIALIZED VIEW %s"
sql_refresh_materialized_view_concurrently = (
@@ -250,10 +253,11 @@ def clone_model_constraints_and_indexes_to_schema(
model, tuple(), model._meta.unique_together
)
- if model._meta.index_together:
- self.alter_index_together(
- model, tuple(), model._meta.index_together
- )
+ if django.VERSION < (5, 1):
+ if model._meta.index_together:
+ self.alter_index_together(
+ model, tuple(), model._meta.index_together
+ )
for field in model._meta.local_concrete_fields: # type: ignore[attr-defined]
# Django creates primary keys later added to the model with
@@ -547,19 +551,28 @@ def delete_view_model(self, model: Type[Model]) -> None:
sql = self.sql_drop_view % self.quote_name(model._meta.db_table)
self.execute(sql)
- def create_materialized_view_model(self, model: Type[Model]) -> None:
+ def create_materialized_view_model(
+ self, model: Type[Model], *, with_data: bool = True
+ ) -> None:
"""Creates a new materialized view model."""
- self._create_view_model(self.sql_create_materialized_view, model)
+ if with_data:
+ self._create_view_model(
+ self.sql_create_materialized_view_with_data, model
+ )
+ else:
+ self._create_view_model(
+ self.sql_create_materialized_view_without_data, model
+ )
def replace_materialized_view_model(self, model: Type[Model]) -> None:
"""Replaces a materialized view with a newer version.
This is used to alter the backing query of a materialized view.
- Replacing a materialized view is a lot trickier than a normal view.
- For normal views we can use `CREATE OR REPLACE VIEW`, but for
- materialized views, we have to create the new view, copy all
+ Replacing a materialized view is a lot trickier than a normal
+ view. For normal views we can use `CREATE OR REPLACE VIEW`, but
+ for materialized views, we have to create the new view, copy all
indexes and constraints and drop the old one.
This operation is atomic as it runs in a transaction.
@@ -570,7 +583,7 @@ def replace_materialized_view_model(self, model: Type[Model]) -> None:
cursor, model._meta.db_table
)
- with transaction.atomic():
+ with transaction.atomic(using=self.connection.alias):
self.delete_materialized_view_model(model)
self.create_materialized_view_model(model)
@@ -604,21 +617,54 @@ def create_partitioned_model(self, model: Type[Model]) -> None:
self.quote_name(field_name) for field_name in meta.key
)
+ pk_field = model._meta.pk
+ has_composite_pk = self._is_composite_primary_key(pk_field)
+
# create a composite key that includes the partitioning key
- sql = sql.replace(" PRIMARY KEY", "")
- if model._meta.pk and model._meta.pk.name not in meta.key:
- sql = sql[:-1] + ", PRIMARY KEY (%s, %s))" % (
- self.quote_name(model._meta.pk.name),
- partitioning_key_sql,
+ # if the user didn't already define one
+ if not has_composite_pk:
+ inline_pk_sql = self._create_primary_key_inline_sql(model, pk_field)
+ inline_tablespace_sql = (
+ self._create_primary_key_inline_tablespace_sql(model, pk_field)
)
- else:
- sql = sql[:-1] + ", PRIMARY KEY (%s))" % (partitioning_key_sql,)
+
+ sql = sql.replace(inline_pk_sql, "")
+
+ if (
+ not self._is_virtual_primary_key(pk_field)
+ and pk_field
+ and pk_field.name not in meta.key
+ ):
+ last_brace_idx = sql.rfind(")")
+ sql = (
+ sql[:last_brace_idx]
+ + f", PRIMARY KEY (%s, %s){inline_tablespace_sql}"
+ % (
+ self.quote_name(pk_field.name),
+ partitioning_key_sql,
+ )
+ + sql[last_brace_idx:]
+ )
+ else:
+ last_brace_idx = sql.rfind(")")
+ sql = (
+ sql[:last_brace_idx]
+ + f", PRIMARY KEY (%s){inline_tablespace_sql}"
+ % (partitioning_key_sql,)
+ + sql[last_brace_idx:]
+ )
# extend the standard CREATE TABLE statement with
# 'PARTITION BY ...'
- sql += self.sql_partition_by % (
- meta.method.upper(),
- partitioning_key_sql,
+ last_brace_idx = sql.rfind(")") + 1
+ sql = (
+ sql[:last_brace_idx]
+ + self.sql_partition_by
+ % (
+ meta.method.upper(),
+ partitioning_key_sql,
+ )
+ + sql[last_brace_idx:]
)
self.execute(sql, params)
@@ -673,7 +719,7 @@ def add_range_partition(
"%s",
)
- with transaction.atomic():
+ with transaction.atomic(using=self.connection.alias):
self.execute(sql, (from_values, to_values))
if comment:
@@ -716,7 +762,7 @@ def add_list_partition(
",".join(["%s" for _ in range(len(values))]),
)
- with transaction.atomic():
+ with transaction.atomic(using=self.connection.alias):
self.execute(sql, values)
if comment:
@@ -762,7 +808,7 @@ def add_hash_partition(
"%s",
)
- with transaction.atomic():
+ with transaction.atomic(using=self.connection.alias):
self.execute(sql, (modulus, remainder))
if comment:
@@ -799,7 +845,7 @@ def add_default_partition(
self.quote_name(model._meta.db_table),
)
- with transaction.atomic():
+ with transaction.atomic(using=self.connection.alias):
self.execute(sql)
if comment:
@@ -1045,7 +1091,7 @@ def _partitioning_properties_for_model(model: Type[Model]):
% (model.__name__, meta.method)
)
- if not isinstance(meta.key, list):
+ if not isinstance(meta.key, (list, tuple)):
raise ImproperlyConfigured(
(
"Model '%s' is not properly configured to be partitioned."
@@ -1073,6 +1119,72 @@ def _partitioning_properties_for_model(model: Type[Model]):
def create_partition_table_name(self, model: Type[Model], name: str) -> str:
return "%s_%s" % (model._meta.db_table.lower(), name.lower())
+ def _create_primary_key_inline_sql(
+ self, model: Type[Model], pk_field: Optional[Field]
+ ) -> str:
+ pk_field = model._meta.pk
+ if not pk_field:
+ return ""
+
+ tablespace_sql = self._create_primary_key_inline_tablespace_sql(
+ model, pk_field
+ )
+
+ if self._is_virtual_primary_key(pk_field):
+ return ""
+
+ pk_sql = " PRIMARY KEY" if pk_field else ""
+ if tablespace_sql:
+ pk_sql += tablespace_sql
+
+ return pk_sql
+
+ def _create_primary_key_inline_tablespace_sql(
+ self, model: Type[Model], pk_field: Optional[Field]
+ ) -> str:
+ tablespace = (pk_field.db_tablespace if pk_field else None) or model._meta.db_tablespace # type: ignore [attr-defined]
+ return (
+ " " + self.connection.ops.tablespace_sql(tablespace, inline=True)
+ if tablespace
+ else ""
+ )
+
+ def _is_composite_primary_key(self, field: Optional[Field]) -> bool:
+ """Checks whether the specified field is a composite primary key.
+
+ This needs to be wrapped because composite primary keys are only
+ natively supported in Django 5.2 and newer.
+ """
+
+ if not field:
+ return False
+
+ try:
+ from django.db.models.fields.composite import CompositePrimaryKey
+
+ return isinstance(field, CompositePrimaryKey)
+ except ImportError:
+ return False
+
+ def _is_virtual_primary_key(self, field: Optional[Field]) -> bool:
+ """Gets whether the declared primary key is a virtual field that
+ doesn't construct any real column in the DB.
+
+ It is pseudo-standard to have virtual fields by creating
+ a field with no DB type. CompositePrimaryKey in Django
+ 5.2 and newer use this. Some third-party packages use
+ the same technique.
+
+ ManyToManyFields were the first to actually use this.
+ """
+
+ if not field:
+ return True
+
+ pk_db_params = field.db_parameters(connection=self.connection)
+ pk_db_type = pk_db_params["type"] if pk_db_params else None
+ return not bool(pk_db_type)
+
def _clone_model_field(self, field: Field, **overrides) -> Field:
"""Clones the specified model field and overrides its kwargs with the
specified overrides.
diff --git a/psqlextra/contrib/README.md b/psqlextra/contrib/README.md
new file mode 100644
index 00000000..296194e6
--- /dev/null
+++ b/psqlextra/contrib/README.md
@@ -0,0 +1,5 @@
+# psqlextra.contrib
+
+This module contains a arbitrary collection of utilities and snippets that build on top of core functionality provided by django-postgres-extra.
+
+This collection is UNTESTED, UNSUPPORTED and UNDOCUMENTED. They are only provided here as an inspiration. Use at your own risk.
diff --git a/psqlextra/contrib/__init__.py b/psqlextra/contrib/__init__.py
new file mode 100644
index 00000000..97794eb0
--- /dev/null
+++ b/psqlextra/contrib/__init__.py
@@ -0,0 +1,11 @@
+from .model_data_migrator import PostgresModelDataMigrator
+from .static_row import StaticRowQueryCompiler, StaticRowQuerySet
+from .transaction import no_transaction
+
+__all__ = [
+ "PostgresModelDataMigrator",
+ "PostgresModelDataMigratorState" "StaticRowQuery",
+ "StaticRowQueryCompiler",
+ "StaticRowQuerySet",
+ "no_transaction",
+]
diff --git a/psqlextra/contrib/expressions.py b/psqlextra/contrib/expressions.py
new file mode 100644
index 00000000..dfc57f75
--- /dev/null
+++ b/psqlextra/contrib/expressions.py
@@ -0,0 +1,47 @@
+from django.db import models
+from django.db.models.expressions import CombinedExpression, Func
+
+
+class Equals(CombinedExpression):
+ """Expression that constructs `{lhs} = {rhs}`.
+
+ Used as an alternative to Django's `Q` object when the
+ left-hand side is a aliased field not known to Django.
+ """
+
+ connector: str = "="
+
+ def __init__(self, lhs, rhs) -> None:
+ super().__init__(
+ lhs, self.connector, rhs, output_field=models.BooleanField()
+ )
+
+
+class Is(Equals):
+ """Expression that constructs `{lhs} IS {rhs}`."""
+
+ connector: str = "IS"
+
+
+class GreaterThen(Equals):
+ """Expression that constructs `{lhs} > {rhs}`."""
+
+ connector: str = ">"
+
+
+class LowerThenOrEqual(Equals):
+ """Expression that constructs `{lhs} <= {rhs}`."""
+
+ connector: str = "<="
+
+
+class And(Equals):
+ """Expression that constructs `{lhs} AND {rhs}`."""
+
+ connector: str = "AND"
+
+
+class Bool(Func):
+ """Cast to a boolean."""
+
+ function = "BOOL"
diff --git a/psqlextra/contrib/model_data_migrator.py b/psqlextra/contrib/model_data_migrator.py
new file mode 100644
index 00000000..35a2dcd3
--- /dev/null
+++ b/psqlextra/contrib/model_data_migrator.py
@@ -0,0 +1,352 @@
+# mypy: disable-error-code="attr-defined"
+
+import json
+import os
+import time
+
+from abc import abstractmethod
+from contextlib import contextmanager
+from dataclasses import dataclass
+from datetime import timedelta
+from typing import Any, Dict, Type
+
+from django.db import DEFAULT_DB_ALIAS, connections, models, transaction
+
+from psqlextra.locking import PostgresTableLockMode, postgres_lock_model
+from psqlextra.schema import PostgresSchema
+from psqlextra.settings import (
+ postgres_prepend_local_search_path,
+ postgres_set_local,
+)
+
+from .transaction import no_transaction
+
+
+@dataclass
+class PostgresModelDataMigratorState:
+ id: str
+ work_schema: PostgresSchema
+ backup_schema: PostgresSchema
+ default_schema: PostgresSchema
+ storage_settings: Dict[str, Any]
+
+
+class PostgresModelDataMigrator:
+ """Helps altering/moving large amounts of data in a table quickly without
+ interruptions.
+
+ In simple terms: This class temporarily drops all indices
+ and constraints from a table to speed up writes.
+
+ In complicated terms:
+
+ 1. Create copy of the table without indices or constraints
+ in a separate schema.
+
+ The clone is made in a separate schema so that there
+ are no naming conflicts and there is no need to rename
+ anything.
+
+ 2. Allow the caller to fill the copy.
+
+ This will be an order of magnitude faster because
+ there are no indices to build or constraints to
+ statisfy. You are responsible for making sure the
+ data is ok and will statisfy the constraints when
+ they come back.
+
+ 3. Add the indices and constraints to the table.
+
+ This takes time, but it's still a lot faster than
+ the indices being built incrementally.
+
+ 4. Allow the caller to clean up the copied table.
+
+ With the indices back in place, filtering the copied
+ table should be fast. Perfect time to clean up
+ some data.
+
+ 5. Vacuum+Analyze the table.
+
+ Vacuuming ensures we don't risk transaction ID
+ wrap-around and analyzing ensures up-to-date
+ statistics.
+
+ 6. Start a transaction.
+
+ 7. Lock the real table in EXCLUSIVE mode.
+
+ This blocks writes or modifications to the table,
+ but does not block readers.
+
+ 8. Allow the caller to move some data from the real table
+ into the copied one.
+
+ This is the perfect time to copy any data that was
+ written to the real table since the migration process
+ began. Since the original table is locked, you can
+ be sure no more rows are being added or modified.
+
+ 9. Move the original table into a backup schema.
+
+ This allows it to be quickly restored manually
+ if the migration is broken in any way.
+
+ 10. Move the copied table in place of the real one.
+
+ 11. Commit the transaction, which releases the lock.
+
+ The process is very similiar to how pg_repack rewrites
+ an entire table without long-running locks on the table.
+
+ Attributes:
+ model: The model to migrate.
+ using: Optional name of the database connection to use.
+ operation_timeout: Maximum amount of time a single statement
+ can take.
+ """
+
+ model: Type[models.Model]
+ using: str = DEFAULT_DB_ALIAS
+ operation_timeout: timedelta
+
+ def __init__(self, logger) -> None:
+ self.logger = logger
+ self.connection = connections[self.using]
+ self.schema_editor = self.connection.schema_editor(atomic=False)
+
+ @abstractmethod
+ def fill_cloned_table_lockless(
+ self, work_schema: PostgresSchema, default_schema: PostgresSchema
+ ) -> None:
+ """Moment to fill the cloned table with data."""
+
+ @abstractmethod
+ def clean_cloned_table(
+ self, work_schema: PostgresSchema, default_schema: PostgresSchema
+ ) -> None:
+ """Moment to clean the filled table after it has indices and validated
+ data."""
+
+ @abstractmethod
+ def fill_cloned_table_locked(
+ self, work_schema: PostgresSchema, default_schema: PostgresSchema
+ ) -> None:
+ """Moment to do final cleaning while the original table is locked for
+ writing."""
+
+ @no_transaction(
+ why="The transaction would be too big and some statements cannot be run in a transaction."
+ )
+ def migrate(self) -> PostgresModelDataMigratorState:
+ start_time = time.time()
+
+ with self.atomic():
+ with self.connection.cursor() as cursor:
+ storage_settings = (
+ self.connection.introspection.get_storage_settings(
+ cursor, self.table_name
+ )
+ )
+
+ state = PostgresModelDataMigratorState(
+ id=os.urandom(4).hex(),
+ work_schema=PostgresSchema.create_random(
+ f"migrate_{self.table_name}", using=self.using
+ ),
+ backup_schema=PostgresSchema.create_random(
+ f"backup_{self.table_name}", using=self.using
+ ),
+ default_schema=PostgresSchema.default,
+ storage_settings=storage_settings,
+ )
+
+ logger = self.logger.bind(id=state.id)
+ logger.info(
+ f"Starting migration of {self.table_name}",
+ data=json.dumps(
+ {
+ "work_schema": state.work_schema.name,
+ "backup_schema": state.backup_schema.name,
+ "default_schema": state.default_schema.name,
+ "storage_settings": state.storage_settings,
+ }
+ ),
+ )
+
+ count = self.model.objects.using(self.using).count()
+ logger.info(f"Found {count} records in {self.table_name}")
+
+ phases = [
+ (self._migrate_phase_1, "cloning and filling table"),
+ (self._migrate_phase_2, "adding constraints and indexes"),
+ (self._migrate_phase_3, "cleaning up and vacuuming"),
+ (self._migrate_phase_4, "swapping"),
+ ]
+
+ for index, (phase, description) in enumerate(phases):
+ phase_start_time = time.time()
+ logger.info(
+ f"Starting phase #{index + 1} of migrating {self.table_name}: {description}"
+ )
+ phase(state)
+ logger.info(
+ f"Finished phase #{index + 1} of migrating {self.table_name}: {description}",
+ task_time=time.time() - phase_start_time,
+ )
+
+ state.work_schema.delete(cascade=True, using=self.using)
+
+ logger.info(
+ f"Finished migrating {self.table_name}",
+ task_time=time.time() - start_time,
+ )
+
+ return state
+
+ def _migrate_phase_1(self, state: PostgresModelDataMigratorState) -> None:
+ """Clone the table without constraints or indices."""
+
+ with self.atomic():
+ self.schema_editor.clone_model_structure_to_schema(
+ self.model, schema_name=state.work_schema.name
+ )
+
+ # Disable auto-vacuum on the cloned table to prevent
+ # it from consuming excessive resources _while_ we're
+ # writing to it. We're running this manually before
+ # we turn it back on in the last phase.
+ with postgres_prepend_local_search_path(
+ [state.work_schema.name], using=self.using
+ ):
+ self.schema_editor.alter_model_storage_setting(
+ self.model, "autovacuum_enabled", "false"
+ )
+
+ # Let the derived class fill our cloned table
+ self.fill_cloned_table_lockless(state.work_schema, state.default_schema)
+
+ def _migrate_phase_2(self, state: PostgresModelDataMigratorState) -> None:
+ """Add indices and constraints to the cloned table."""
+
+ # Add indices and constraints to the temporary table
+ # This could be speed up by increasing `maintenance_work_mem`
+ # and `max_parallel_workers_per_gather`, but we won't as
+ # it'll consume more I/O, potentially disturbing normal traffic.
+ with self.atomic():
+ self.schema_editor.clone_model_constraints_and_indexes_to_schema(
+ self.model, schema_name=state.work_schema.name
+ )
+
+ # Validate foreign keys
+ #
+ # The foreign keys have been added in NOT VALID mode so they
+ # only validate new rows. Validate the existing rows.
+ #
+ # This is a two-step process to avoid a AccessExclusiveLock
+ # on the referenced tables.
+ with self.atomic():
+ self.schema_editor.clone_model_foreign_keys_to_schema(
+ self.model, schema_name=state.work_schema.name
+ )
+
+ def _migrate_phase_3(self, state: PostgresModelDataMigratorState) -> None:
+ """Clean & finalize the cloned table."""
+
+ # Let the derived class do some clean up on the temporary
+ # table now that we have indices and constraints.
+ with self.atomic():
+ self.clean_cloned_table(state.work_schema, state.default_schema)
+
+ # Finalize the copy by vacuuming+analyzing it
+ #
+ # VACUUM: There should not be much bloat since the table
+ # is new, but the clean up phase might have generated some.
+ #
+ # We mostly VACUUM to reset the transaction ID and prevent
+ # transaction ID wraparound.
+ #
+ # ANALYZE: The table went from 0 to being filled, by running ANALYZE,
+ # we update the statistics, allowing the query planner to
+ # make good decisions.
+ with postgres_prepend_local_search_path(
+ [state.work_schema.name], using=self.using
+ ):
+ self.schema_editor.vacuum_model(self.model, analyze=True)
+
+ # Re-enable autovacuum on the cloned table
+ with postgres_prepend_local_search_path(
+ [state.work_schema.name], using=self.using
+ ):
+ autovacuum_enabled = state.storage_settings.get(
+ "autovacuum_enabled"
+ )
+ if autovacuum_enabled:
+ self.schema_editor.alter_model_storage_setting(
+ self.model, "autovacuum_enabled", autovacuum_enabled
+ )
+ else:
+ self.schema_editor.reset_model_storage_setting(
+ self.model, "autovacuum_enabled"
+ )
+
+ def _migrate_phase_4(self, state: PostgresModelDataMigratorState) -> None:
+ """Replace the original table with the cloned one."""
+
+ with self.atomic():
+ # Lock the original table for writing so that the caller
+ # is given a chance to do last-minute moving of data.
+ postgres_lock_model(
+ self.model, PostgresTableLockMode.EXCLUSIVE, using=self.using
+ )
+
+ # Let derived class finalize the temporary table while the
+ # original is locked. Not much work should happen here.
+ self.fill_cloned_table_locked(
+ state.work_schema, state.default_schema
+ )
+
+ # Move the original table into the backup schema.
+ # Disable autovacuum on it so we don't waste resources
+ # keeping it clean.
+ self.schema_editor.alter_model_storage_setting(
+ self.model, "autovacuum_enabled", "false"
+ )
+ self.schema_editor.alter_model_schema(
+ self.model, state.backup_schema.name
+ )
+
+ # Move the cloned table in place of the original
+ with postgres_prepend_local_search_path(
+ [state.work_schema.name], using=self.using
+ ):
+ self.schema_editor.alter_model_schema(
+ self.model, state.default_schema.name
+ )
+
+ @property
+ def model_name(self) -> str:
+ return self.model.__name__
+
+ @property
+ def table_name(self) -> str:
+ return self.model._meta.db_table
+
+ @contextmanager
+ def atomic(self):
+ """Creates a atomic transaction with run-time parameters tuned for a
+ live migration.
+
+ - Statement/idle timeout set to prevent runaway queries
+ from continuing long after the migrator was killed.
+ - No parallel works to keep I/O under control.
+ """
+
+ with transaction.atomic(durable=True, using=self.using):
+ with postgres_set_local(
+ statement_timeout=f"{self.operation_timeout.total_seconds()}s",
+ idle_in_transaction_session_timeout=f"{self.operation_timeout.total_seconds()}s",
+ max_parallel_workers_per_gather=0,
+ using=self.using,
+ ):
+ yield
diff --git a/psqlextra/contrib/static_row.py b/psqlextra/contrib/static_row.py
new file mode 100644
index 00000000..a89905a1
--- /dev/null
+++ b/psqlextra/contrib/static_row.py
@@ -0,0 +1,97 @@
+from typing import Any, List, Optional, Tuple, Type, TypeVar, cast
+
+from django.db import DEFAULT_DB_ALIAS, connections, models
+from django.db.models.expressions import Value
+from django.db.models.query import RawQuerySet
+from django.db.models.sql import Query
+from django.db.models.sql.compiler import SQLCompiler
+
+TModel = TypeVar("TModel", bound=models.Model)
+
+
+class StaticRowQueryCompiler(SQLCompiler):
+ has_extra_select = False
+
+ def as_sql(self, *args, **kwargs):
+ cols = []
+ params = []
+
+ select, _, _ = self.get_select()
+
+ for _, (s_sql, s_params), s_alias in select:
+ cols.append(
+ "%s AS %s"
+ % (
+ s_sql,
+ self.connection.ops.quote_name(s_alias),
+ )
+ )
+
+ params.extend(s_params)
+
+ return f"SELECT {', '.join(cols)}", tuple(params)
+
+
+class StaticRowQuery(Query):
+ def __init__(
+ self, model: Type[models.Model], using: str = DEFAULT_DB_ALIAS
+ ):
+ self.using = using
+
+ super().__init__(model)
+
+ def get_columns(self):
+ return list(self.annotations.keys())
+
+ def get_compiler(
+ self, using: Optional[str] = None, connection=None, elide_empty=True
+ ):
+ using = using or self.using
+
+ compiler = StaticRowQueryCompiler(
+ self, connection or connections[using], using
+ )
+ compiler.setup_query()
+
+ return compiler
+
+ def __iter__(self):
+ compiler = self.get_compiler()
+
+ cursor = compiler.connection.cursor()
+ cursor.execute(*compiler.as_sql())
+
+ return iter(cursor)
+
+
+class StaticRowQuerySet(RawQuerySet):
+ """Query set that compiles queries that don't select from anything and have
+ their values hard-coded.
+
+ Example:
+
+ >>> SELECT 'mystring' AS something, -1 AS somethingelse;
+
+ This is used when you want to add some rows to a result
+ set using UNION in SQL.
+ """
+
+ def __init__(
+ self,
+ model: Type[models.Model],
+ row: List[Tuple[str, Value]],
+ using: str = DEFAULT_DB_ALIAS,
+ ) -> None:
+ query = StaticRowQuery(model, using)
+ query.default_cols = False
+ query.annotations = dict(row)
+
+ sql, params = query.sql_with_params()
+
+ # cast(Tuple[Any], params) because `RawQuerySet.__init_` is mistyped
+ super().__init__(
+ raw_query=sql,
+ model=model,
+ query=query,
+ params=cast(Tuple[Any], params),
+ )
diff --git a/psqlextra/contrib/transaction.py b/psqlextra/contrib/transaction.py
new file mode 100644
index 00000000..796246c7
--- /dev/null
+++ b/psqlextra/contrib/transaction.py
@@ -0,0 +1,33 @@
+from contextlib import contextmanager
+from typing import Optional
+
+from django.conf import settings
+from django.core.exceptions import SuspiciousOperation
+from django.db import DEFAULT_DB_ALIAS, connections
+
+
+def _is_in_test():
+ return (
+ getattr(settings, "TEST_MODE", False)
+ or getattr(settings, "TESTING", False)
+ or getattr(settings, "TEST", False)
+ )
+
+
+@contextmanager
+def no_transaction(*, why: str, using: Optional[str] = None):
+ """Prevents a method or a block from running in a database transaction."""
+
+ # During tests, allow one level of transaction.atomic(..) nesting
+ # because tests themselves run in a transaction. If there's only
+ # one level of nesting, it's from the test itself and the code
+ # would actually run without a transaction outside the test.
+
+ connection = connections[using or DEFAULT_DB_ALIAS]
+
+ if connection.in_atomic_block and not (
+ _is_in_test() and len(connection.savepoint_ids) <= 1
+ ):
+ raise SuspiciousOperation(f"Unexpected database transaction: {why}")
+
+ yield
diff --git a/psqlextra/error.py b/psqlextra/error.py
index b3a5cf83..5be8c37c 100644
--- a/psqlextra/error.py
+++ b/psqlextra/error.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Optional, Type, Union
+from typing import TYPE_CHECKING, Optional, Type, Union, cast
from django import db
@@ -38,7 +38,7 @@ def extract_postgres_error(
):
return None
- return error.__cause__
+ return cast(Union["_Psycopg2Error", "_Psycopg3Error"], error.__cause__)
def extract_postgres_error_code(error: db.Error) -> Optional[str]:
diff --git a/psqlextra/introspect/models.py b/psqlextra/introspect/models.py
index 61a478dd..e160bcaf 100644
--- a/psqlextra/introspect/models.py
+++ b/psqlextra/introspect/models.py
@@ -7,6 +7,7 @@
Optional,
Type,
TypeVar,
+ Union,
cast,
)
@@ -115,9 +116,10 @@ def models_from_cursor(
)
for index, related_field_name in enumerate(related_fields):
- related_model = model._meta.get_field(
- related_field_name
- ).related_model
+ related_model = cast(
+ Union[Type[Model], None],
+ model._meta.get_field(related_field_name).related_model,
+ )
if not related_model:
continue
diff --git a/psqlextra/management/commands/pgpartition.py b/psqlextra/management/commands/pgpartition.py
index 8a6fa636..ca621662 100644
--- a/psqlextra/management/commands/pgpartition.py
+++ b/psqlextra/management/commands/pgpartition.py
@@ -1,6 +1,6 @@
import sys
-from typing import Optional
+from typing import List, Optional
from django.conf import settings
from django.core.management.base import BaseCommand
@@ -37,10 +37,18 @@ def add_arguments(self, parser):
parser.add_argument(
"--using",
"-u",
- help="Optional name of the database connection to use.",
+ help="Name of the database connection to use.",
default="default",
)
+ parser.add_argument(
+ "--model-names",
+ "-m",
+ nargs="+",
+ help="A list of model names for which to partition.",
+ default=None,
+ )
+
parser.add_argument(
"--skip-create",
action="https://github.com/store_true",
@@ -64,13 +72,17 @@ def handle( # type: ignore[override]
using: Optional[str],
skip_create: bool,
skip_delete: bool,
+ model_names: Optional[List[str]] = None,
*args,
**kwargs,
):
partitioning_manager = self._partitioning_manager()
plan = partitioning_manager.plan(
- skip_create=skip_create, skip_delete=skip_delete, using=using
+ skip_create=skip_create,
+ skip_delete=skip_delete,
+ model_names=model_names,
+ using=using,
)
creations_count = len(plan.creations)
diff --git a/psqlextra/manager/manager.py b/psqlextra/manager/manager.py
index 0931b38a..ee1eb58b 100644
--- a/psqlextra/manager/manager.py
+++ b/psqlextra/manager/manager.py
@@ -37,7 +37,10 @@ def __init__(self, *args, **kwargs):
)
def truncate(
- self, cascade: bool = False, using: Optional[str] = None
+ self,
+ cascade: bool = False,
+ restart_identity: bool = False,
+ using: Optional[str] = None,
) -> None:
"""Truncates this model/table using the TRUNCATE statement.
@@ -51,14 +54,19 @@ def truncate(
False, an error will be raised if there
are rows in other tables referencing
the rows you're trying to delete.
+ restart_identity:
+ Automatically restart sequences owned by
+ columns of the truncated table(s).
"""
connection = connections[using or "default"]
table_name = connection.ops.quote_name(self.model._meta.db_table)
with connection.cursor() as cursor:
- sql = "TRUNCATE TABLE %s" % table_name
+ sql = f"TRUNCATE TABLE {table_name}"
if cascade:
sql += " CASCADE"
+ if restart_identity:
+ sql += " RESTART IDENTITY"
cursor.execute(sql)
diff --git a/psqlextra/models/partitioned.py b/psqlextra/models/partitioned.py
index f0115367..3a206775 100644
--- a/psqlextra/models/partitioned.py
+++ b/psqlextra/models/partitioned.py
@@ -1,6 +1,11 @@
-from typing import Iterable
+from typing import Iterable, List, Optional, Tuple
+import django
+
+from django.core.exceptions import ImproperlyConfigured
+from django.db import models
from django.db.models.base import ModelBase
+from django.db.models.options import Options
from psqlextra.types import PostgresPartitioningMethod
@@ -20,19 +25,203 @@ class PostgresPartitionedModelMeta(ModelBase):
default_key: Iterable[str] = []
def __new__(cls, name, bases, attrs, **kwargs):
- new_class = super().__new__(cls, name, bases, attrs, **kwargs)
- meta_class = attrs.pop("PartitioningMeta", None)
+ partitioning_meta_class = attrs.pop("PartitioningMeta", None)
+
+ partitioning_method = getattr(partitioning_meta_class, "method", None)
+ partitioning_key = getattr(partitioning_meta_class, "key", None)
- method = getattr(meta_class, "method", None)
- key = getattr(meta_class, "key", None)
+ if django.VERSION >= (5, 2):
+ for base in bases:
+ cls._delete_auto_created_fields(base)
+
+ cls._create_primary_key(attrs, partitioning_key)
patitioning_meta = PostgresPartitionedModelOptions(
- method=method or cls.default_method, key=key or cls.default_key
+ method=partitioning_method or cls.default_method,
+ key=partitioning_key or cls.default_key,
)
+ new_class = super().__new__(cls, name, bases, attrs, **kwargs)
new_class.add_to_class("_partitioning_meta", patitioning_meta)
return new_class
+ @classmethod
+ def _create_primary_key(
+ cls, attrs, partitioning_key: Optional[List[str]]
+ ) -> None:
+ from django.db.models.fields.composite import CompositePrimaryKey
+
+ # Find any existing primary key the user might have declared.
+ #
+ # If it is a composite primary key, we will do nothing and
+ # keep it as it is. You're own your own.
+ pk = cls._find_primary_key(attrs)
+ if pk and isinstance(pk[1], CompositePrimaryKey):
+ return
+
+ # Create an `id` field (auto-incrementing) if there is no
+ # primary key yet.
+ #
+ # This matches standard Django behavior.
+ if not pk:
+ attrs["id"] = attrs.get("id") or cls._create_auto_field(attrs)
+ pk_fields = ["id"]
+ else:
+ pk_fields = [pk[0]]
+
+ partitioning_keys = (
+ partitioning_key
+ if isinstance(partitioning_key, list)
+ else list(filter(None, [partitioning_key]))
+ )
+
+ unique_pk_fields = set(pk_fields + (partitioning_keys or []))
+ if len(unique_pk_fields) <= 1:
+ if "id" in attrs:
+ attrs["id"].primary_key = True
+ return
+
+ # You might have done something like this:
+ #
+ # id = models.AutoField(primary_key=True)
+ # pk = CompositePrimaryKey("id", "timestamp")
+ #
+ # The `primary_key` attribute has to be removed
+ # from the `id` field in the example above to
+ # avoid having two primary keys.
+ #
+ # Without this, the generated schema will
+ # have two primary keys, which is an error.
+ for field in attrs.values():
+ is_pk = getattr(field, "primary_key", False)
+ if is_pk:
+ field.primary_key = False
+
+ auto_generated_pk = CompositePrimaryKey(*sorted(unique_pk_fields))
+ attrs["pk"] = auto_generated_pk
+
+ @classmethod
+ def _create_auto_field(cls, attrs):
+ app_label = attrs.get("app_label")
+ meta_class = attrs.get("Meta", None)
+
+ pk_class = Options(meta_class, app_label)._get_default_pk_class()
+ return pk_class(verbose_name="ID", auto_created=True)
+
+ @classmethod
+ def _find_primary_key(cls, attrs) -> Optional[Tuple[str, models.Field]]:
+ """Gets the field that has been marked by the user as the primary key
+ field for this model.
+
+ This is quite complex because Django allows a variety of options:
+
+ 1. No PK at all. In this case, Django generates one named `id`
+ as an auto-increment integer (AutoField)
+
+ 2. One field that has `primary_key=True`. Any field can have
+ this attribute, but Django would error if there were more.
+
+ 3. One field named `pk`.
+
+ 4. One field that has `primary_key=True` and a field that
+ is of type `CompositePrimaryKey` that includes that
+ field.
+
+ Since a table can only have one primary key, our goal here
+ is to find the field (if any) that is going to become
+ the primary key of the table.
+
+ Our logic is straight forward:
+
+ 1. If there is a `CompositePrimaryKey`, that field becomes the primary key.
+
+ 2. If there is a field with `primary_key=True`, that field becomes the primary key.
+
+ 3. There is no primary key.
+ """
+
+ from django.db.models.fields.composite import CompositePrimaryKey
+
+ fields = {
+ name: value
+ for name, value in attrs.items()
+ if isinstance(value, models.Field)
+ }
+
+ fields_marked_as_pk = {
+ name: value for name, value in fields.items() if value.primary_key
+ }
+
+ # We cannot let the user define a field named `pk` that is not a CompositePrimaryKey
+ # already because when we generate a primary key, we want to name it `pk`.
+ field_named_pk = attrs.get("pk")
+ if field_named_pk and not field_named_pk.primary_key:
+ raise ImproperlyConfigured(
+ "You cannot define a field named `pk` that is not a primary key."
+ )
+
+ if field_named_pk:
+ if not isinstance(field_named_pk, CompositePrimaryKey):
+ raise ImproperlyConfigured(
+ "You cannot define a field named `pk` that is not a composite primary key on a partitioned model. Either make `pk` a CompositePrimaryKey or rename it."
+ )
+
+ return ("pk", field_named_pk)
+
+ if not fields_marked_as_pk:
+ return None
+
+ # Make sure the user didn't define N primary keys. Django would also warn
+ # about this.
+ #
+ # One exception is a set up such as:
+ #
+ # >>> id = models.AutoField(primary_key=True)
+ # >>> timestamp = models.DateTimeField()
+ # >>> pk = models.CompositePrimaryKey("id", "timestamp")
+ #
+ # In this case, both `id` and `pk` are marked as primary key. Django
+ # allows this and just ignores the `primary_key=True` attribute
+ # on all the other fields except the composite one.
+ #
+ # We also handle this as expected and treat the CompositePrimaryKey
+ # as the primary key.
+ sorted_fields_marked_as_pk = sorted(
+ list(fields_marked_as_pk.items()),
+ key=lambda pair: 0
+ if isinstance(pair[1], CompositePrimaryKey)
+ else 1,
+ )
+ if len(sorted_fields_marked_as_pk[1:]) > 1:
+ raise ImproperlyConfigured(
+ "You cannot mark more than one fields as a primary key."
+ )
+
+ return sorted_fields_marked_as_pk[0]
+
+ @classmethod
+ def _delete_auto_created_fields(cls, model: models.Model):
+ """Base classes might be injecting an auto-generated `id` field before
+ we even have the chance of doing this ourselves.
+
+ Delete any auto generated fields from the base class so that we
+ can declare our own. If there is no auto-generated field, one
+ will be added anyways by our own logic
+ """
+
+ fields = model._meta.local_fields + model._meta.local_many_to_many
+ for field in fields:
+ auto_created = getattr(field, "auto_created", False)
+ if auto_created:
+ if field in model._meta.local_fields:
+ model._meta.local_fields.remove(field)
+
+ if field in model._meta.fields:
+ model._meta.fields.remove(field) # type: ignore [attr-defined]
+
+ if hasattr(model, field.name):
+ delattr(model, field.name)
+
class PostgresPartitionedModel(
PostgresModel, metaclass=PostgresPartitionedModelMeta
diff --git a/psqlextra/models/view.py b/psqlextra/models/view.py
index b19f88c8..d24ed5a0 100644
--- a/psqlextra/models/view.py
+++ b/psqlextra/models/view.py
@@ -54,8 +54,8 @@ def _view_query_as_sql_with_params(
When copying the meta options from the model, we convert any
from the above to a raw SQL query with bind parameters. We do
- this is because it is what the SQL driver understands and
- we can easily serialize it into a migration.
+ this is because it is what the SQL driver understands and we can
+ easily serialize it into a migration.
"""
# might be a callable to support delayed imports
diff --git a/psqlextra/partitioning/current_time_strategy.py b/psqlextra/partitioning/current_time_strategy.py
index 114a1aaf..795f60ca 100644
--- a/psqlextra/partitioning/current_time_strategy.py
+++ b/psqlextra/partitioning/current_time_strategy.py
@@ -16,7 +16,8 @@ class PostgresCurrentTimePartitioningStrategy(
All buckets will be equal in size and start at the start of the
unit. With monthly partitioning, partitions start on the 1st and
- with weekly partitioning, partitions start on monday.
+ with weekly partitioning, partitions start on monday, with hourly
+ partitioning, partitions start at 00:00.
"""
def __init__(
diff --git a/psqlextra/partitioning/manager.py b/psqlextra/partitioning/manager.py
index 074cc1c6..01bac3b4 100644
--- a/psqlextra/partitioning/manager.py
+++ b/psqlextra/partitioning/manager.py
@@ -25,6 +25,7 @@ def plan(
self,
skip_create: bool = False,
skip_delete: bool = False,
+ model_names: Optional[List[str]] = None,
using: Optional[str] = None,
) -> PostgresPartitioningPlan:
"""Plans which partitions should be deleted/created.
@@ -38,6 +39,10 @@ def plan(
If set to True, no partitions will be marked
for deletion, regardless of the configuration.
+ model_names:
+ Optionally, only plan for the models with
+ the specified name.
+
using:
Optional name of the database connection to use.
@@ -48,7 +53,19 @@ def plan(
model_plans = []
+ normalized_model_names = (
+ [model_name.lower().strip() for model_name in model_names]
+ if model_names
+ else []
+ )
+
for config in self.configs:
+ if (
+ model_names
+ and config.model.__name__.lower() not in normalized_model_names
+ ):
+ continue
+
model_plan = self._plan_for_config(
config,
skip_create=skip_create,
diff --git a/psqlextra/partitioning/plan.py b/psqlextra/partitioning/plan.py
index 3fcac44d..301b4241 100644
--- a/psqlextra/partitioning/plan.py
+++ b/psqlextra/partitioning/plan.py
@@ -54,7 +54,7 @@ def apply(self, using: Optional[str]) -> None:
def print(self) -> None:
"""Prints this model plan to the terminal in a readable format."""
- print(f"{self.config.model.__name__}:")
+ print(f"{self.config.model.__name__}: ")
for partition in self.deletions:
print(" - %s" % partition.name())
diff --git a/psqlextra/partitioning/shorthands.py b/psqlextra/partitioning/shorthands.py
index 30175273..f263e362 100644
--- a/psqlextra/partitioning/shorthands.py
+++ b/psqlextra/partitioning/shorthands.py
@@ -16,6 +16,7 @@ def partition_by_current_time(
months: Optional[int] = None,
weeks: Optional[int] = None,
days: Optional[int] = None,
+ hours: Optional[int] = None,
max_age: Optional[relativedelta] = None,
name_format: Optional[str] = None,
) -> PostgresPartitioningConfig:
@@ -43,6 +44,9 @@ def partition_by_current_time(
days:
The amount of days each partition should contain.
+ hours:
+ The amount of hours each partition should contain.
+
max_age:
The maximum age of a partition (calculated from the
start of the partition).
@@ -56,7 +60,7 @@ def partition_by_current_time(
"""
size = PostgresTimePartitionSize(
- years=years, months=months, weeks=weeks, days=days
+ years=years, months=months, weeks=weeks, days=days, hours=hours
)
return PostgresPartitioningConfig(
diff --git a/psqlextra/partitioning/time_partition.py b/psqlextra/partitioning/time_partition.py
index 3c8a4d87..64a8cf8d 100644
--- a/psqlextra/partitioning/time_partition.py
+++ b/psqlextra/partitioning/time_partition.py
@@ -20,6 +20,7 @@ class PostgresTimePartition(PostgresRangePartition):
PostgresTimePartitionUnit.MONTHS: "%Y_%b",
PostgresTimePartitionUnit.WEEKS: "%Y_week_%W",
PostgresTimePartitionUnit.DAYS: "%Y_%b_%d",
+ PostgresTimePartitionUnit.HOURS: "%Y_%b_%d_%H:00:00",
}
def __init__(
@@ -31,8 +32,8 @@ def __init__(
end_datetime = start_datetime + size.as_delta()
super().__init__(
- from_values=start_datetime.strftime("%Y-%m-%d"),
- to_values=end_datetime.strftime("%Y-%m-%d"),
+ from_values=start_datetime.strftime("%Y-%m-%d %H:00:00"),
+ to_values=end_datetime.strftime("%Y-%m-%d %H:00:00"),
)
self.size = size
diff --git a/psqlextra/partitioning/time_partition_size.py b/psqlextra/partitioning/time_partition_size.py
index 3d013bcd..b8231ddc 100644
--- a/psqlextra/partitioning/time_partition_size.py
+++ b/psqlextra/partitioning/time_partition_size.py
@@ -1,6 +1,6 @@
import enum
-from datetime import date, datetime
+from datetime import date, datetime, timedelta, timezone
from typing import Optional, Union
from dateutil.relativedelta import relativedelta
@@ -13,6 +13,10 @@ class PostgresTimePartitionUnit(enum.Enum):
MONTHS = "months"
WEEKS = "weeks"
DAYS = "days"
+ HOURS = "hours"
+
+
+UNIX_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
class PostgresTimePartitionSize:
@@ -20,6 +24,7 @@ class PostgresTimePartitionSize:
unit: PostgresTimePartitionUnit
value: int
+ anchor: datetime
def __init__(
self,
@@ -27,8 +32,10 @@ def __init__(
months: Optional[int] = None,
weeks: Optional[int] = None,
days: Optional[int] = None,
+ hours: Optional[int] = None,
+ anchor: datetime = UNIX_EPOCH,
) -> None:
- sizes = [years, months, weeks, days]
+ sizes = [years, months, weeks, days, hours]
if not any(sizes):
raise PostgresPartitioningError("Partition cannot be 0 in size.")
@@ -38,6 +45,7 @@ def __init__(
"Partition can only have on size unit."
)
+ self.anchor = anchor
if years:
self.unit = PostgresTimePartitionUnit.YEARS
self.value = years
@@ -50,6 +58,9 @@ def __init__(
elif days:
self.unit = PostgresTimePartitionUnit.DAYS
self.value = days
+ elif hours:
+ self.unit = PostgresTimePartitionUnit.HOURS
+ self.value = hours
else:
raise PostgresPartitioningError(
"Unsupported time partitioning unit"
@@ -68,6 +79,9 @@ def as_delta(self) -> relativedelta:
if self.unit == PostgresTimePartitionUnit.DAYS:
return relativedelta(days=self.value)
+ if self.unit == PostgresTimePartitionUnit.HOURS:
+ return relativedelta(hours=self.value)
+
raise PostgresPartitioningError(
"Unsupported time partitioning unit: %s" % self.unit
)
@@ -82,11 +96,21 @@ def start(self, dt: datetime) -> datetime:
if self.unit == PostgresTimePartitionUnit.WEEKS:
return self._ensure_datetime(dt - relativedelta(days=dt.weekday()))
- return self._ensure_datetime(dt)
+ if self.unit == PostgresTimePartitionUnit.DAYS:
+ diff_days = (dt - self.anchor).days
+ partition_index = diff_days // self.value
+ start = self.anchor + timedelta(days=partition_index * self.value)
+ return self._ensure_datetime(start)
+
+ if self.unit == PostgresTimePartitionUnit.HOURS:
+ return self._ensure_datetime(dt.replace(hour=0))
+
+ raise ValueError("Unknown unit")
@staticmethod
def _ensure_datetime(dt: Union[date, datetime]) -> datetime:
- return datetime(year=dt.year, month=dt.month, day=dt.day)
+ hour = dt.hour if isinstance(dt, datetime) else 0
+ return datetime(year=dt.year, month=dt.month, day=dt.day, hour=hour)
def __repr__(self) -> str:
return "PostgresTimePartitionSize<%s, %s>" % (self.unit, self.value)
diff --git a/psqlextra/query.py b/psqlextra/query.py
index 65a20c50..ca1d2226 100644
--- a/psqlextra/query.py
+++ b/psqlextra/query.py
@@ -41,6 +41,14 @@
QuerySetBase = QuerySet
+def peek_iterator(iterable):
+ try:
+ first = next(iterable)
+ except StopIteration:
+ return None
+ return list(chain([first], iterable))
+
+
class PostgresQuerySet(QuerySetBase, Generic[TModel]):
"""Adds support for PostgreSQL specifics."""
@@ -65,9 +73,9 @@ def annotate(self, **annotations) -> "Self": # type: ignore[valid-type, overrid
name of an existing field on the model as the alias name. This
version of the function does allow that.
- This is done by temporarily renaming the fields in order to avoid the
- check for conflicts that the base class does.
- We rename all fields instead of the ones that already exist because
+ This is done by temporarily renaming the fields in order to
+ avoid the check for conflicts that the base class does. We
+ rename all fields instead of the ones that already exist because
the annotations are stored in an OrderedDict. Renaming only the
conflicts will mess up the order.
"""
@@ -174,11 +182,12 @@ def bulk_insert(
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified
"""
+ if rows is None:
+ return []
- def is_empty(r):
- return all([False for _ in r])
+ rows = peek_iterator(iter(rows))
- if not rows or is_empty(rows):
+ if not rows:
return []
if not self.conflict_target and not self.conflict_action:
@@ -527,19 +536,22 @@ def _build_insert_compiler(
compiler = query.get_compiler(using)
return compiler
- def _is_magical_field(self, model_instance, field, is_insert: bool):
- """Verifies whether this field is gonna modify something on its own.
-
- "Magical" means that a field modifies the field value
- during the pre_save.
+ def _pre_save_field(
+ self,
+ model_instance: models.Model,
+ field: models.Field,
+ *,
+ is_insert: bool
+ ):
+ """Pre-saves the model and gets whether the :see:pre_save method makes
+ any modifications to the field value.
Arguments:
model_instance:
The model instance the field is defined on.
field:
- The field to get of whether the field is
- magical.
+ The field to pre-save.
is_insert:
Pretend whether this is an insert?
@@ -585,11 +597,12 @@ def _get_upsert_fields(self, kwargs):
and include them in the list of insert/update fields.
"""
- model_instance = self.model(**kwargs)
insert_fields = []
update_values = {}
- for field in model_instance._meta.local_concrete_fields:
+ insert_model_instance = self.model(**kwargs)
+ update_model_instance = self.model(**kwargs)
+ for field in insert_model_instance._meta.local_concrete_fields:
has_default = field.default != NOT_PROVIDED
if field.name in kwargs or field.column in kwargs:
insert_fields.append(field)
@@ -607,10 +620,14 @@ def _get_upsert_fields(self, kwargs):
update_values[field.name] = ExcludedCol(field)
continue
- if self._is_magical_field(model_instance, field, is_insert=True):
+ if self._pre_save_field(
+ insert_model_instance, field, is_insert=True
+ ):
insert_fields.append(field)
- if self._is_magical_field(model_instance, field, is_insert=False):
+ if self._pre_save_field(
+ update_model_instance, field, is_insert=False
+ ):
update_values[field.name] = ExcludedCol(field)
return insert_fields, update_values
diff --git a/psqlextra/settings.py b/psqlextra/settings.py
index 6f75c779..b6061766 100644
--- a/psqlextra/settings.py
+++ b/psqlextra/settings.py
@@ -16,7 +16,8 @@ def postgres_set_local(
The effect is undone when the context manager exits.
- See https://www.postgresql.org/docs/current/runtime-config-client.html
+ See
+ https://www.postgresql.org/docs/current/runtime-config-client.html
for an overview of all available options.
"""
diff --git a/psqlextra/sql.py b/psqlextra/sql.py
index b2655088..750287c5 100644
--- a/psqlextra/sql.py
+++ b/psqlextra/sql.py
@@ -1,4 +1,5 @@
from collections import OrderedDict
+from collections.abc import Iterable
from typing import Any, Dict, List, Optional, Tuple, Union
import django
@@ -7,6 +8,7 @@
from django.db import connections, models
from django.db.models import Expression, sql
from django.db.models.constants import LOOKUP_SEP
+from django.db.models.expressions import Ref
from .compiler import PostgresInsertOnConflictCompiler
from .compiler import SQLUpdateCompiler as PostgresUpdateCompiler
@@ -68,12 +70,24 @@ def rename_annotations(self, annotations) -> None:
# and a list in Django 5.x and newer.
# https://github.com/django/django/commit/d6b6e5d0fd4e6b6d0183b4cf6e4bd4f9afc7bf67
if isinstance(self.annotation_select_mask, set):
- self.annotation_select_mask.discard(old_name)
- self.annotation_select_mask.add(new_name)
+ updated_annotation_select_mask = set(
+ self.annotation_select_mask
+ )
+ updated_annotation_select_mask.discard(old_name)
+ updated_annotation_select_mask.add(new_name)
+ self.set_annotation_mask(updated_annotation_select_mask)
elif isinstance(self.annotation_select_mask, list):
self.annotation_select_mask.remove(old_name)
self.annotation_select_mask.append(new_name)
+ if isinstance(self.group_by, Iterable):
+ for statement in self.group_by:
+ if not isinstance(statement, Ref):
+ continue
+
+ if statement.refs in annotations: # type: ignore[attr-defined]
+ statement.refs = annotations[statement.refs] # type: ignore[attr-defined]
+
self.annotations.clear()
self.annotations.update(new_annotations)
diff --git a/pyproject.toml b/pyproject.toml
index fb35b3b4..a68f344f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ exclude = '''
| .env
| env
| venv
- | tests/snapshots
+ | tests/__snapshots__
)/
)
'''
@@ -19,9 +19,68 @@ exclude = "(env|build|dist|migrations)"
[[tool.mypy.overrides]]
module = [
- "psycopg.*"
+ "psycopg.*",
+ "django.db.models.fields.composite"
]
ignore_missing_imports = true
[tool.django-stubs]
django_settings_module = "settings"
+
+[tool.poe.tasks]
+_autoflake = "python3 -m autoflake --remove-all -i -r setup.py psqlextra tests"
+_autopep8 = "autopep8 -i -r setup.py psqlextra tests"
+_isort_setup_py = "isort setup.py"
+_isort_psqlextra = "isort psqlextra"
+_isort_tests = "isort tests"
+_isort_verify_setup_py = "isort -c setup.py"
+_isort_verify_psqlextra = "isort -c psqlextra"
+_isort_verify_tests = "isort -c tests"
+
+[tool.poe.tasks.lint]
+cmd = "python3 -m flake8 --builtin=__version__ setup.py psqlextra tests"
+help = "Lints all the code."
+
+[tool.poe.tasks.lint_fix]
+sequence = ["_autoflake", "_autopep8"]
+help = "Auto-fixes linter errors."
+
+[tool.poe.tasks.lint_types]
+cmd = "mypy --package psqlextra --pretty --show-error-codes"
+help = "Type-checks the code."
+
+[tool.poe.tasks.format]
+cmd = "black setup.py psqlextra tests"
+help = "Auto-formats the code."
+
+[tool.poe.tasks.format_verify]
+cmd = "black --check setup.py psqlextra tests"
+help = "Verifies that the code was formatted properly."
+
+[tool.poe.tasks.format_docstrings]
+cmd = "docformatter -r -i ."
+help = "Auto-formats doc strings."
+
+[tool.poe.tasks.format_docstrings_verify]
+cmd = "docformatter -r -c ."
+help = "Verifies all doc strings are properly formatted."
+
+[tool.poe.tasks.sort_imports]
+sequence = ["_isort_setup_py", "_isort_psqlextra", "_isort_tests"]
+help = "Auto-sorts the imports."
+
+[tool.poe.tasks.sort_imports_verify]
+sequence = ["_isort_verify_setup_py", "_isort_verify_psqlextra", "_isort_verify_tests"]
+help = "Verifies that the imports are properly sorted."
+
+[tool.poe.tasks.fix]
+sequence = ["format", "format_docstrings", "sort_imports", "lint_fix", "lint", "lint_types"]
+help = "Automatically format code and fix linting errors."
+
+[tool.poe.tasks.verify]
+sequence = ["format_verify", "format_docstrings_verify", "sort_imports_verify", "lint", "lint_types"]
+help = "Automatically format code and fix linting errors."
+
+[tool.poe.tasks.test]
+cmd = "pytest --cov=psqlextra --cov-report=term --cov-report=xml:reports/xml --cov-report=html:reports/html --junitxml=reports/junit/tests.xml --reuse-db -vv"
+help = "Runs all the tests."
diff --git a/requirements-all.txt b/requirements-all.txt
index d4ca40f0..c7ae18d2 100644
--- a/requirements-all.txt
+++ b/requirements-all.txt
@@ -1,4 +1,6 @@
-e .
+-e .[dev]
-e .[test]
+-e .[test-report]
-e .[analysis]
-e .[docs]
diff --git a/requirements-test.txt b/requirements-test.txt
new file mode 100644
index 00000000..bd31d78f
--- /dev/null
+++ b/requirements-test.txt
@@ -0,0 +1,3 @@
+-e .
+-e .[dev]
+-e .[test]
diff --git a/settings.py b/settings.py
index ed0d0f98..2a5e0fac 100644
--- a/settings.py
+++ b/settings.py
@@ -1,4 +1,20 @@
-import dj_database_url
+import os
+
+from urllib.parse import urlparse
+
+
+def _parse_db_url(/service/url: str):
+ parsed_url = urlparse(url)
+
+ return {
+ 'ENGINE': 'django.db.backends.postgresql',
+ 'NAME': (parsed_url.path or '').strip('/') or "postgres",
+ 'HOST': parsed_url.hostname or None,
+ 'PORT': parsed_url.port or None,
+ 'USER': parsed_url.username or None,
+ 'PASSWORD': parsed_url.password or None,
+ }
+
DEBUG = True
TEMPLATE_DEBUG = True
@@ -8,10 +24,10 @@
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
DATABASES = {
- 'default': dj_database_url.config(default='postgres:///psqlextra'),
+ 'default': _parse_db_url(https://github.com/os.environ.get('DATABASE_URL',%20'postgres:///psqlextra')),
}
-DATABASES['default']['ENGINE'] = 'psqlextra.backend'
+DATABASES['default']['ENGINE'] = 'tests.psqlextra_test_backend'
LANGUAGE_CODE = 'en'
LANGUAGES = (
@@ -24,3 +40,8 @@
'psqlextra',
'tests',
)
+
+USE_TZ = True
+TIME_ZONE = 'UTC'
+
+DATABASE_IN_CONTAINER = os.environ.get('DATABASE_IN_CONTAINER') == 'true'
diff --git a/setup.cfg b/setup.cfg
index 65713eaa..ecb84153 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -9,4 +9,3 @@ lines_between_types=1
include_trailing_comma=True
known_third_party=pytest,freezegun
float_to_top=true
-skip_glob=tests/snapshots/*.py
diff --git a/setup.py b/setup.py
index c3431e27..047d3613 100644
--- a/setup.py
+++ b/setup.py
@@ -1,35 +1,9 @@
-import distutils.cmd
import os
-import subprocess
from setuptools import find_packages, setup
exec(open("psqlextra/_version.py").read())
-
-class BaseCommand(distutils.cmd.Command):
- user_options = []
-
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
-
-def create_command(text, commands):
- """Creates a custom setup.py command."""
-
- class CustomCommand(BaseCommand):
- description = text
-
- def run(self):
- for cmd in commands:
- subprocess.check_call(cmd)
-
- return CustomCommand
-
-
with open(
os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8"
) as readme:
@@ -38,7 +12,7 @@ def run(self):
setup(
name="django-postgres-extra",
- version=__version__,
+ version=__version__, # noqa
packages=find_packages(exclude=["tests"]),
package_data={"psqlextra": ["py.typed"]},
include_package_data=True,
@@ -49,7 +23,16 @@ def run(self):
url="https://github.com/SectorLabs/django-postgres-extra",
author="Sector Labs",
author_email="open-source@sectorlabs.ro",
- keywords=["django", "postgres", "extra", "hstore", "ltree"],
+ keywords=[
+ "django",
+ "postgres",
+ "extra",
+ "hstore",
+ "upsert",
+ "partioning",
+ "materialized",
+ "view",
+ ],
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
@@ -72,148 +55,62 @@ def run(self):
"python-dateutil>=2.8.0,<=3.0.0",
],
extras_require={
+ # Python 3.6 - Python 3.13
':python_version <= "3.6"': ["dataclasses"],
- "docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"],
+ "dev": [
+ "poethepoet==0.34.0; python_version >= '3.9'",
+ "poethepoet==0.30.0; python_version >= '3.8' and python_version < '3.9'",
+ "poethepoet==0.19.0; python_version >= '3.7' and python_version < '3.8'",
+ "poethepoet==0.13.1; python_version >= '3.6' and python_version < '3.7'",
+ ],
"test": [
- "psycopg2>=2.8.4,<3.0.0",
- "dj-database-url==0.5.0",
- "pytest==6.2.5",
- "pytest-benchmark==3.4.1",
- "pytest-django==4.4.0",
- "pytest-cov==3.0.0",
- "pytest-lazy-fixture==0.6.3",
- "pytest-freezegun==0.4.2",
- "tox==3.24.4",
- "freezegun==1.1.0",
- "coveralls==3.3.0",
- "snapshottest==0.6.0",
+ "psycopg2==2.9.10; python_version >= '3.8'",
+ "psycopg2==2.9.9; python_version >= '3.7' and python_version < '3.8'",
+ "psycopg2==2.9.8; python_version >= '3.6' and python_version < '3.7'",
+ "types-psycopg2==2.9.21.20250516; python_version >= '3.9'",
+ "types-psycopg2==2.9.8; python_version >= '3.6' and python_version < '3.9'",
+ "pytest==8.4.0; python_version > '3.8'",
+ "pytest==7.0.1; python_version <= '3.8'",
+ "pytest-benchmark==5.1.0; python_version > '3.8'",
+ "pytest-benchmark==3.4.1; python_version <= '3.8'",
+ "pytest-django==4.11.1; python_version > '3.7'",
+ "pytest-django==4.5.2; python_version <= '3.7'",
+ "pytest-cov==6.1.1; python_version > '3.8'",
+ "pytest-cov==4.0.0; python_version <= '3.8'",
+ "coverage==7.8.2; python_version > '3.8'",
+ "coverage==7.6.1; python_version >= '3.8' and python_version <= '3.8'",
+ "coverage==6.2; python_version <= '3.7'",
+ "tox==4.26.0; python_version > '3.8'",
+ "tox==3.28.0; python_version <= '3.8'",
+ "freezegun==1.5.2; python_version > '3.7'",
+ "freezegun==1.2.2; python_version <= '3.7'",
+ "syrupy==4.9.1; python_version >= '3.9'",
+ "syrupy==2.3.1; python_version <= '3.8'",
],
+ # Python 3.11 assumed from below
+ "test-report": ["coveralls==4.0.1"],
"analysis": [
"black==22.3.0",
- "flake8==4.0.1",
- "autoflake==1.4",
- "autopep8==1.6.0",
- "isort==5.10.0",
- "docformatter==1.4",
- "mypy==1.2.0; python_version > '3.6'",
- "mypy==0.971; python_version <= '3.6'",
- "django-stubs==1.16.0; python_version > '3.6'",
- "django-stubs==1.9.0; python_version <= '3.6'",
- "typing-extensions==4.5.0; python_version > '3.6'",
- "typing-extensions==4.1.0; python_version <= '3.6'",
- "types-dj-database-url==1.3.0.0",
- "types-psycopg2==2.9.21.9",
- "types-python-dateutil==2.8.19.12",
+ "flake8==7.2.0",
+ "autoflake==2.3.1",
+ "autopep8==2.3.2",
+ "isort==6.0.1",
+ "docformatter==1.7.7",
+ "mypy==1.16.0",
+ "django-stubs==4.2.7",
+ "typing-extensions==4.14.0",
+ "types-dj-database-url==1.3.0.4",
+ "types-python-dateutil==2.9.0.20250516",
+ ],
+ "docs": [
+ "Sphinx==8.2.3",
+ "sphinx-rtd-theme==3.0.2",
+ "docutils==0.21.2",
+ "Jinja2==3.1.6",
],
"publish": [
"build==0.7.0",
"twine==3.7.1",
],
},
- cmdclass={
- "lint": create_command(
- "Lints the code",
- [
- [
- "flake8",
- "--builtin=__version__",
- "setup.py",
- "psqlextra",
- "tests",
- ]
- ],
- ),
- "lint_fix": create_command(
- "Lints the code",
- [
- [
- "autoflake",
- "--remove-all",
- "-i",
- "-r",
- "setup.py",
- "psqlextra",
- "tests",
- ],
- ["autopep8", "-i", "-r", "setup.py", "psqlextra", "tests"],
- ],
- ),
- "lint_types": create_command(
- "Type-checks the code",
- [
- [
- "mypy",
- "--package",
- "psqlextra",
- "--pretty",
- "--show-error-codes",
- ],
- ],
- ),
- "format": create_command(
- "Formats the code", [["black", "setup.py", "psqlextra", "tests"]]
- ),
- "format_verify": create_command(
- "Checks if the code is auto-formatted",
- [["black", "--check", "setup.py", "psqlextra", "tests"]],
- ),
- "format_docstrings": create_command(
- "Auto-formats doc strings", [["docformatter", "-r", "-i", "."]]
- ),
- "format_docstrings_verify": create_command(
- "Verifies that doc strings are properly formatted",
- [["docformatter", "-r", "-c", "."]],
- ),
- "sort_imports": create_command(
- "Automatically sorts imports",
- [
- ["isort", "setup.py"],
- ["isort", "psqlextra"],
- ["isort", "tests"],
- ],
- ),
- "sort_imports_verify": create_command(
- "Verifies all imports are properly sorted.",
- [
- ["isort", "-c", "setup.py"],
- ["isort", "-c", "psqlextra"],
- ["isort", "-c", "tests"],
- ],
- ),
- "fix": create_command(
- "Automatically format code and fix linting errors",
- [
- ["python", "setup.py", "format"],
- ["python", "setup.py", "format_docstrings"],
- ["python", "setup.py", "sort_imports"],
- ["python", "setup.py", "lint_fix"],
- ["python", "setup.py", "lint"],
- ["python", "setup.py", "lint_types"],
- ],
- ),
- "verify": create_command(
- "Verifies whether the code is auto-formatted and has no linting errors",
- [
- ["python", "setup.py", "format_verify"],
- ["python", "setup.py", "format_docstrings_verify"],
- ["python", "setup.py", "sort_imports_verify"],
- ["python", "setup.py", "lint"],
- ["python", "setup.py", "lint_types"],
- ],
- ),
- "test": create_command(
- "Runs all the tests",
- [
- [
- "pytest",
- "--cov=psqlextra",
- "--cov-report=term",
- "--cov-report=xml:reports/xml",
- "--cov-report=html:reports/html",
- "--junitxml=reports/junit/tests.xml",
- "--reuse-db",
- ]
- ],
- ),
- },
)
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[y].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[y].json
new file mode 100644
index 00000000..664538ac
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[y].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nOperations applied.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[yes].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[yes].json
new file mode 100644
index 00000000..664538ac
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[yes].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nOperations applied.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_n].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_n].json
new file mode 100644
index 00000000..f1c2aa68
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_n].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_no].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_no].json
new file mode 100644
index 00000000..f1c2aa68
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_no].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[n].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[n].json
new file mode 100644
index 00000000..f1c2aa68
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[n].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[no].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[no].json
new file mode 100644
index 00000000..f1c2aa68
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[no].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[title_no].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[title_no].json
new file mode 100644
index 00000000..f1c2aa68
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[title_no].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_y].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_y].json
new file mode 100644
index 00000000..530f6bdb
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_y].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_yes].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_yes].json
new file mode 100644
index 00000000..530f6bdb
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_yes].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[y].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[y].json
new file mode 100644
index 00000000..530f6bdb
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[y].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[yes].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[yes].json
new file mode 100644
index 00000000..530f6bdb
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[yes].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[d].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[d].json
new file mode 100644
index 00000000..6b67fa96
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[d].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\n"
diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[dry].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[dry].json
new file mode 100644
index 00000000..6b67fa96
--- /dev/null
+++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[dry].json
@@ -0,0 +1 @@
+"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\n"
diff --git a/tests/conftest.py b/tests/conftest.py
index 387edd3b..9620d123 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,10 +1,62 @@
+import tempfile
+import uuid
+
import pytest
+from django.conf import settings
from django.contrib.postgres.signals import register_type_handlers
from django.db import connection
from .fake_model import define_fake_app
+custom_tablespace_name = f"psqlextra-tblspace-tests-{str(uuid.uuid4())[:8]}"
+
+
+@pytest.fixture
+def custom_tablespace():
+ """Gets the name of a custom tablespace that is not the default to be used
+ for tests that need to assert functionality that depends on custom
+ tablespaces.
+
+ A single custom tablespace is used. Nothing should persist in the
+ tablespace because each test runs in a transaction that is rolled
+ back.
+ """
+
+ return custom_tablespace_name
+
+
+@pytest.fixture(scope="session")
+def django_db_setup(django_db_setup, django_db_blocker):
+ """Extend default pytest-django DB set up to create a single, custom
+ tablespace to be used by tests that need to test functionality that depends
+ on custom tablespaces."""
+
+ with django_db_blocker.unblock():
+ qn = connection.ops.quote_name
+ with tempfile.TemporaryDirectory() as temp_dir:
+ with connection.cursor() as cursor:
+ # If the database is remote, like in a CI environment, make
+ # sure the temporary directory exists in the container
+ # that PostgreSQL is running.
+ #
+ # Note that this only typically works in CI environments
+ # where we have utter control to execute arbitary commands.
+ if settings.DATABASE_IN_CONTAINER:
+ cursor.execute(
+ f"COPY (select 1) TO PROGRAM 'mkdir --mode=777 -p {temp_dir}'"
+ )
+
+ cursor.execute(
+ f"CREATE TABLESPACE {qn(custom_tablespace_name)} LOCATION %s",
+ (temp_dir,),
+ )
+
+ yield
+
+ with connection.cursor() as cursor:
+ cursor.execute(f"DROP TABLESPACE {qn(custom_tablespace_name)}")
+
@pytest.fixture(scope="function", autouse=True)
def database_access(db):
diff --git a/tests/snapshots/__init__.py b/tests/psqlextra_test_backend/__init__.py
similarity index 100%
rename from tests/snapshots/__init__.py
rename to tests/psqlextra_test_backend/__init__.py
diff --git a/tests/psqlextra_test_backend/base.py b/tests/psqlextra_test_backend/base.py
new file mode 100644
index 00000000..0961a2bc
--- /dev/null
+++ b/tests/psqlextra_test_backend/base.py
@@ -0,0 +1,23 @@
+from datetime import timezone
+
+import django
+
+from django.conf import settings
+
+from psqlextra.backend.base import DatabaseWrapper as PSQLExtraDatabaseWrapper
+
+
+class DatabaseWrapper(PSQLExtraDatabaseWrapper):
+ # Works around the compatibility issue of Django <3.0 and psycopg2.9
+ # in combination with USE_TZ
+ #
+ # See: https://github.com/psycopg/psycopg2/issues/1293#issuecomment-862835147
+ if django.VERSION < (3, 1):
+
+ def create_cursor(self, name=None):
+ cursor = super().create_cursor(name)
+ cursor.tzinfo_factory = (
+ lambda offset: timezone.utc if settings.USE_TZ else None
+ )
+
+ return cursor
diff --git a/tests/snapshots/snap_test_management_command_partition.py b/tests/snapshots/snap_test_management_command_partition.py
deleted file mode 100644
index 1cac2227..00000000
--- a/tests/snapshots/snap_test_management_command_partition.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# -*- coding: utf-8 -*-
-# snapshottest: v1 - https://goo.gl/zC4yUc
-from __future__ import unicode_literals
-
-from snapshottest import GenericRepr, Snapshot
-
-
-snapshots = Snapshot()
-
-snapshots['test_management_command_partition_auto_confirm[--yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')")
-
-snapshots['test_management_command_partition_auto_confirm[-y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_no[NO] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_no[N] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_no[No] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_no[n] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_no[no] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_yes[YES] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_yes[Y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_yes[y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
-
-snapshots['test_management_command_partition_confirm_yes[yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
-
-snapshots['test_management_command_partition_dry_run[--dry] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')")
-
-snapshots['test_management_command_partition_dry_run[-d] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')")
diff --git a/tests/test_introspect.py b/tests/test_introspect.py
index 5e5a9ffc..bf50d3f1 100644
--- a/tests/test_introspect.py
+++ b/tests/test_introspect.py
@@ -1,4 +1,5 @@
import django
+import freezegun
import pytest
from django.contrib.postgres.fields import ArrayField
@@ -51,13 +52,14 @@ def mocked_model_foreign_keys(
@pytest.fixture
-def mocked_model_varying_fields_instance(freezer, mocked_model_varying_fields):
- return mocked_model_varying_fields.objects.create(
- title="hello world",
- updated_at=timezone.now(),
- content={"a": 1},
- items=["a", "b"],
- )
+def mocked_model_varying_fields_instance(mocked_model_varying_fields):
+ with freezegun.freeze_time("2020-1-1 12:00:00.0"):
+ return mocked_model_varying_fields.objects.create(
+ title="hello world",
+ updated_at=timezone.now(),
+ content={"a": 1},
+ items=["a", "b"],
+ )
@pytest.fixture
@@ -78,17 +80,22 @@ def models_from_cursor_wrapper_single():
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
- "models_from_cursor_wrapper",
+ "models_from_cursor_wrapper_name",
[
- pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
- pytest.lazy_fixture("models_from_cursor_wrapper_single"),
+ "models_from_cursor_wrapper_multiple",
+ "models_from_cursor_wrapper_single",
],
)
def test_models_from_cursor_applies_converters(
+ request,
mocked_model_varying_fields,
mocked_model_varying_fields_instance,
- models_from_cursor_wrapper,
+ models_from_cursor_wrapper_name,
):
+ models_from_cursor_wrapper = request.getfixturevalue(
+ models_from_cursor_wrapper_name
+ )
+
with connection.cursor() as cursor:
cursor.execute(
*mocked_model_varying_fields.objects.all().query.sql_with_params()
@@ -114,17 +121,22 @@ def test_models_from_cursor_applies_converters(
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
- "models_from_cursor_wrapper",
+ "models_from_cursor_wrapper_name",
[
- pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
- pytest.lazy_fixture("models_from_cursor_wrapper_single"),
+ "models_from_cursor_wrapper_multiple",
+ "models_from_cursor_wrapper_single",
],
)
def test_models_from_cursor_handles_field_order(
+ request,
mocked_model_varying_fields,
mocked_model_varying_fields_instance,
- models_from_cursor_wrapper,
+ models_from_cursor_wrapper_name,
):
+ models_from_cursor_wrapper = request.getfixturevalue(
+ models_from_cursor_wrapper_name
+ )
+
with connection.cursor() as cursor:
cursor.execute(
f'SELECT content, items, id, title, updated_at FROM "{mocked_model_varying_fields._meta.db_table}"',
@@ -151,17 +163,22 @@ def test_models_from_cursor_handles_field_order(
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
- "models_from_cursor_wrapper",
+ "models_from_cursor_wrapper_name",
[
- pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
- pytest.lazy_fixture("models_from_cursor_wrapper_single"),
+ "models_from_cursor_wrapper_multiple",
+ "models_from_cursor_wrapper_single",
],
)
def test_models_from_cursor_handles_partial_fields(
+ request,
mocked_model_varying_fields,
mocked_model_varying_fields_instance,
- models_from_cursor_wrapper,
+ models_from_cursor_wrapper_name,
):
+ models_from_cursor_wrapper = request.getfixturevalue(
+ models_from_cursor_wrapper_name
+ )
+
with connection.cursor() as cursor:
cursor.execute(
f'SELECT id FROM "{mocked_model_varying_fields._meta.db_table}"',
@@ -183,15 +200,19 @@ def test_models_from_cursor_handles_partial_fields(
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
- "models_from_cursor_wrapper",
+ "models_from_cursor_wrapper_name",
[
- pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
- pytest.lazy_fixture("models_from_cursor_wrapper_single"),
+ "models_from_cursor_wrapper_multiple",
+ "models_from_cursor_wrapper_single",
],
)
def test_models_from_cursor_handles_null(
- mocked_model_varying_fields, models_from_cursor_wrapper
+ request, mocked_model_varying_fields, models_from_cursor_wrapper_name
):
+ models_from_cursor_wrapper = request.getfixturevalue(
+ models_from_cursor_wrapper_name
+ )
+
instance = mocked_model_varying_fields.objects.create()
with connection.cursor() as cursor:
@@ -214,17 +235,22 @@ def test_models_from_cursor_handles_null(
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
- "models_from_cursor_wrapper",
+ "models_from_cursor_wrapper_name",
[
- pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
- pytest.lazy_fixture("models_from_cursor_wrapper_single"),
+ "models_from_cursor_wrapper_multiple",
+ "models_from_cursor_wrapper_single",
],
)
def test_models_from_cursor_foreign_key(
+ request,
mocked_model_single_field,
mocked_model_foreign_keys,
- models_from_cursor_wrapper,
+ models_from_cursor_wrapper_name,
):
+ models_from_cursor_wrapper = request.getfixturevalue(
+ models_from_cursor_wrapper_name
+ )
+
instance = mocked_model_foreign_keys.objects.create(
varying_fields=None,
single_field=mocked_model_single_field.objects.create(name="test"),
@@ -254,18 +280,23 @@ def test_models_from_cursor_foreign_key(
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
- "models_from_cursor_wrapper",
+ "models_from_cursor_wrapper_name",
[
- pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
- pytest.lazy_fixture("models_from_cursor_wrapper_single"),
+ "models_from_cursor_wrapper_multiple",
+ "models_from_cursor_wrapper_single",
],
)
def test_models_from_cursor_related_fields(
+ request,
mocked_model_varying_fields,
mocked_model_single_field,
mocked_model_foreign_keys,
- models_from_cursor_wrapper,
+ models_from_cursor_wrapper_name,
):
+ models_from_cursor_wrapper = request.getfixturevalue(
+ models_from_cursor_wrapper_name
+ )
+
instance = mocked_model_foreign_keys.objects.create(
varying_fields=mocked_model_varying_fields.objects.create(
title="test", updated_at=timezone.now()
@@ -321,21 +352,26 @@ def test_models_from_cursor_related_fields(
reason=django_31_skip_reason,
)
@pytest.mark.parametrize(
- "models_from_cursor_wrapper",
+ "models_from_cursor_wrapper_name",
[
- pytest.lazy_fixture("models_from_cursor_wrapper_multiple"),
- pytest.lazy_fixture("models_from_cursor_wrapper_single"),
+ "models_from_cursor_wrapper_multiple",
+ "models_from_cursor_wrapper_single",
],
)
@pytest.mark.parametrize(
"selected", [True, False], ids=["selected", "not_selected"]
)
def test_models_from_cursor_related_fields_optional(
+ request,
mocked_model_varying_fields,
mocked_model_foreign_keys,
- models_from_cursor_wrapper,
+ models_from_cursor_wrapper_name,
selected,
):
+ models_from_cursor_wrapper = request.getfixturevalue(
+ models_from_cursor_wrapper_name
+ )
+
instance = mocked_model_foreign_keys.objects.create(
varying_fields=mocked_model_varying_fields.objects.create(
title="test", updated_at=timezone.now()
diff --git a/tests/test_make_migrations.py b/tests/test_make_migrations.py
index 6f63a0d6..a843b6eb 100644
--- a/tests/test_make_migrations.py
+++ b/tests/test_make_migrations.py
@@ -208,7 +208,9 @@ def test_make_migration_field_operations_view_models(
def test_autodetect_fk_issue(fake_app, method):
"""Test whether Django can perform ForeignKey optimization.
- Fixes https://github.com/SectorLabs/django-postgres-extra/issues/123 for Django >= 2.2
+ Fixes
+ https://github.com/SectorLabs/django-postgres-extra/issues/123
+ for Django >= 2.2
"""
meta_options = {"app_label": fake_app.name}
partitioning_options = {"method": method, "key": "artist_id"}
diff --git a/tests/test_management_command_partition.py b/tests/test_management_command_partition.py
index 6e305fb9..c621cf15 100644
--- a/tests/test_management_command_partition.py
+++ b/tests/test_management_command_partition.py
@@ -6,6 +6,7 @@
from django.db import models
from django.test import override_settings
+from syrupy.extensions.json import JSONSnapshotExtension
from psqlextra.backend.introspection import (
PostgresIntrospectedPartitionTable,
@@ -20,6 +21,11 @@
from .fake_model import define_fake_partitioned_model
+@pytest.fixture
+def snapshot(snapshot):
+ return snapshot.use_extension(JSONSnapshotExtension)
+
+
@pytest.fixture
def fake_strategy():
strategy = create_autospec(PostgresPartitioningStrategy)
@@ -88,12 +94,12 @@ def _run(*args):
command.add_arguments(parser)
command.handle(**vars(parser.parse_args(args)))
- return capsys.readouterr()
+ return capsys.readouterr().out
return _run
-@pytest.mark.parametrize("args", ["-d", "--dry"])
+@pytest.mark.parametrize("args", ["-d", "--dry"], ids=["d", "dry"])
def test_management_command_partition_dry_run(
args, snapshot, run, fake_model, fake_partitioning_manager
):
@@ -101,7 +107,7 @@ def test_management_command_partition_dry_run(
create/delete partitions."""
config = fake_partitioning_manager.find_config_for_model(fake_model)
- snapshot.assert_match(run(args))
+ assert run(args) == snapshot()
config.strategy.createable_partition.create.assert_not_called()
config.strategy.createable_partition.delete.assert_not_called()
@@ -109,7 +115,7 @@ def test_management_command_partition_dry_run(
config.strategy.deleteable_partition.delete.assert_not_called()
-@pytest.mark.parametrize("args", ["-y", "--yes"])
+@pytest.mark.parametrize("args", ["-y", "--yes"], ids=["y", "yes"])
def test_management_command_partition_auto_confirm(
args, snapshot, run, fake_model, fake_partitioning_manager
):
@@ -117,7 +123,7 @@ def test_management_command_partition_auto_confirm(
creating/deleting partitions."""
config = fake_partitioning_manager.find_config_for_model(fake_model)
- snapshot.assert_match(run(args))
+ assert run(args) == snapshot
config.strategy.createable_partition.create.assert_called_once()
config.strategy.createable_partition.delete.assert_not_called()
@@ -125,7 +131,11 @@ def test_management_command_partition_auto_confirm(
config.strategy.deleteable_partition.delete.assert_called_once()
-@pytest.mark.parametrize("answer", ["y", "Y", "yes", "YES"])
+@pytest.mark.parametrize(
+ "answer",
+ ["y", "Y", "yes", "YES"],
+ ids=["y", "capital_y", "yes", "capital_yes"],
+)
def test_management_command_partition_confirm_yes(
answer, monkeypatch, snapshot, run, fake_model, fake_partitioning_manager
):
@@ -135,7 +145,7 @@ def test_management_command_partition_confirm_yes(
config = fake_partitioning_manager.find_config_for_model(fake_model)
monkeypatch.setattr("builtins.input", lambda _: answer)
- snapshot.assert_match(run())
+ assert run() == snapshot
config.strategy.createable_partition.create.assert_called_once()
config.strategy.createable_partition.delete.assert_not_called()
@@ -143,7 +153,11 @@ def test_management_command_partition_confirm_yes(
config.strategy.deleteable_partition.delete.assert_called_once()
-@pytest.mark.parametrize("answer", ["n", "N", "no", "No", "NO"])
+@pytest.mark.parametrize(
+ "answer",
+ ["n", "N", "no", "No", "NO"],
+ ids=["n", "capital_n", "no", "title_no", "capital_no"],
+)
def test_management_command_partition_confirm_no(
answer, monkeypatch, snapshot, run, fake_model, fake_partitioning_manager
):
@@ -153,7 +167,7 @@ def test_management_command_partition_confirm_no(
config = fake_partitioning_manager.find_config_for_model(fake_model)
monkeypatch.setattr("builtins.input", lambda _: answer)
- snapshot.assert_match(run())
+ assert run() == snapshot
config.strategy.createable_partition.create.assert_not_called()
config.strategy.createable_partition.delete.assert_not_called()
diff --git a/tests/test_manager.py b/tests/test_manager.py
index 0fbe2a52..f68dd20a 100644
--- a/tests/test_manager.py
+++ b/tests/test_manager.py
@@ -34,10 +34,8 @@ def test_manager_backend_set(databases):
def test_manager_backend_not_set():
- """Tests whether creating a new instance of
- :see:PostgresManager fails if no database
- has `psqlextra.backend` configured
- as its ENGINE."""
+ """Tests whether creating a new instance of :see:PostgresManager fails if
+ no database has `psqlextra.backend` configured as its ENGINE."""
with override_settings(
DATABASES={"default": {"ENGINE": "django.db.backends.postgresql"}}
diff --git a/tests/test_on_conflict.py b/tests/test_on_conflict.py
index 02eda62f..b7cf0024 100644
--- a/tests/test_on_conflict.py
+++ b/tests/test_on_conflict.py
@@ -1,4 +1,5 @@
import django
+import freezegun
import pytest
from django.core.exceptions import SuspiciousOperation
@@ -130,13 +131,16 @@ def test_on_conflict_partial_get():
}
)
- obj1 = model.objects.on_conflict(
- ["title"], ConflictAction.UPDATE
- ).insert_and_get(title="beer", purpose="for-sale")
+ with freezegun.freeze_time("2020-1-1 12:00:00.0") as fg:
+ obj1 = model.objects.on_conflict(
+ ["title"], ConflictAction.UPDATE
+ ).insert_and_get(title="beer", purpose="for-sale")
- obj2 = model.objects.on_conflict(
- ["title"], ConflictAction.UPDATE
- ).insert_and_get(title="beer")
+ fg.tick()
+
+ obj2 = model.objects.on_conflict(
+ ["title"], ConflictAction.UPDATE
+ ).insert_and_get(title="beer")
obj2.refresh_from_db()
@@ -175,11 +179,11 @@ def test_on_conflict_outdated_model(conflict_action):
"""Tests whether insert properly handles fields that are in the database
but not on the model.
- This happens if somebody manually modified the database
- to add a column that is not present in the model.
+ This happens if somebody manually modified the database to add a
+ column that is not present in the model.
- This should be handled properly by ignoring the column
- returned by the database.
+ This should be handled properly by ignoring the column returned by
+ the database.
"""
model = get_fake_model(
diff --git a/tests/test_on_conflict_nothing.py b/tests/test_on_conflict_nothing.py
index 78c4c5f4..92e74dfc 100644
--- a/tests/test_on_conflict_nothing.py
+++ b/tests/test_on_conflict_nothing.py
@@ -170,17 +170,26 @@ def test_on_conflict_nothing_foreign_key_by_id():
assert obj1.data == "some data"
-def test_on_conflict_nothing_duplicate_rows():
+@pytest.mark.parametrize(
+ "rows,expected_row_count",
+ [
+ ([dict(amount=1), dict(amount=1)], 1),
+ (iter([dict(amount=1), dict(amount=1)]), 1),
+ ((row for row in [dict(amount=1), dict(amount=1)]), 1),
+ ([], 0),
+ (iter([]), 0),
+ ((row for row in []), 0),
+ ],
+)
+def test_on_conflict_nothing_duplicate_rows(rows, expected_row_count):
"""Tests whether duplicate rows are filtered out when doing a insert
NOTHING and no error is raised when the list of rows contains
duplicates."""
model = get_fake_model({"amount": models.IntegerField(unique=True)})
- rows = [dict(amount=1), dict(amount=1)]
+ inserted_rows = model.objects.on_conflict(
+ ["amount"], ConflictAction.NOTHING
+ ).bulk_insert(rows)
- (
- model.objects.on_conflict(
- ["amount"], ConflictAction.NOTHING
- ).bulk_insert(rows)
- )
+ assert len(inserted_rows) == expected_row_count
diff --git a/tests/test_partitioned_model.py b/tests/test_partitioned_model.py
index 89562730..55c66516 100644
--- a/tests/test_partitioned_model.py
+++ b/tests/test_partitioned_model.py
@@ -1,7 +1,13 @@
+import django
+import pytest
+
+from django.core.exceptions import ImproperlyConfigured
+from django.db import models
+
from psqlextra.models import PostgresPartitionedModel
from psqlextra.types import PostgresPartitioningMethod
-from .fake_model import define_fake_partitioned_model
+from .fake_model import define_fake_model, define_fake_partitioned_model
def test_partitioned_model_abstract():
@@ -70,3 +76,190 @@ def test_partitioned_model_key_option_none():
model = define_fake_partitioned_model(partitioning_options=dict(key=None))
assert model._partitioning_meta.key == []
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_custom_composite_primary_key_with_auto_field():
+ model = define_fake_partitioned_model(
+ fields={
+ "auto_id": models.AutoField(primary_key=True),
+ "my_custom_pk": models.CompositePrimaryKey("auto_id", "timestamp"),
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+ assert isinstance(model._meta.pk, models.CompositePrimaryKey)
+ assert model._meta.pk.name == "my_custom_pk"
+ assert model._meta.pk.columns == ("auto_id", "timestamp")
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_custom_composite_primary_key_with_id_field():
+ model = define_fake_partitioned_model(
+ fields={
+ "id": models.IntegerField(),
+ "my_custom_pk": models.CompositePrimaryKey("id", "timestamp"),
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+ assert isinstance(model._meta.pk, models.CompositePrimaryKey)
+ assert model._meta.pk.name == "my_custom_pk"
+ assert model._meta.pk.columns == ("id", "timestamp")
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_custom_composite_primary_key_named_id():
+ model = define_fake_partitioned_model(
+ fields={
+ "other_field": models.TextField(),
+ "id": models.CompositePrimaryKey("other_field", "timestamp"),
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+ assert isinstance(model._meta.pk, models.CompositePrimaryKey)
+ assert model._meta.pk.name == "id"
+ assert model._meta.pk.columns == ("other_field", "timestamp")
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_field_named_pk_not_composite_not_primary():
+ with pytest.raises(ImproperlyConfigured):
+ define_fake_partitioned_model(
+ fields={
+ "pk": models.TextField(),
+ "id": models.CompositePrimaryKey("other_field", "timestamp"),
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_field_named_pk_not_composite():
+ with pytest.raises(ImproperlyConfigured):
+ define_fake_partitioned_model(
+ fields={
+ "pk": models.AutoField(primary_key=True),
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_field_multiple_pks():
+ with pytest.raises(ImproperlyConfigured):
+ define_fake_partitioned_model(
+ fields={
+ "id": models.AutoField(primary_key=True),
+ "another_pk": models.TextField(primary_key=True),
+ "timestamp": models.DateTimeField(),
+ "real_pk": models.CompositePrimaryKey("id", "timestamp"),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_no_pk_defined():
+ model = define_fake_partitioned_model(
+ fields={
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+ assert isinstance(model._meta.pk, models.CompositePrimaryKey)
+ assert model._meta.pk.name == "pk"
+ assert model._meta.pk.columns == ("id", "timestamp")
+
+ id_field = model._meta.get_field("id")
+ assert id_field.name == "id"
+ assert id_field.column == "id"
+ assert isinstance(id_field, models.AutoField)
+ assert id_field.primary_key is False
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_composite_primary_key():
+ model = define_fake_partitioned_model(
+ fields={
+ "id": models.AutoField(primary_key=True),
+ "pk": models.CompositePrimaryKey("id", "timestamp"),
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+ assert isinstance(model._meta.pk, models.CompositePrimaryKey)
+ assert model._meta.pk.name == "pk"
+ assert model._meta.pk.columns == ("id", "timestamp")
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_composite_primary_key_foreign_key():
+ model = define_fake_partitioned_model(
+ fields={
+ "timestamp": models.DateTimeField(),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+ define_fake_model(
+ fields={
+ "model": models.ForeignKey(model, on_delete=models.CASCADE),
+ },
+ )
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+def test_partitioned_model_custom_composite_primary_key_foreign_key():
+ model = define_fake_partitioned_model(
+ fields={
+ "id": models.TextField(primary_key=True),
+ "timestamp": models.DateTimeField(),
+ "custom": models.CompositePrimaryKey("id", "timestamp"),
+ },
+ partitioning_options=dict(key=["timestamp"]),
+ )
+
+ define_fake_model(
+ fields={
+ "model": models.ForeignKey(model, on_delete=models.CASCADE),
+ },
+ )
diff --git a/tests/test_partitioning_manager.py b/tests/test_partitioning_manager.py
index 979bd1af..2a8cf6b9 100644
--- a/tests/test_partitioning_manager.py
+++ b/tests/test_partitioning_manager.py
@@ -1,7 +1,8 @@
import pytest
-from django.db import models
+from django.db import connection, models
+from psqlextra.backend.schema import PostgresSchemaEditor
from psqlextra.partitioning import (
PostgresPartitioningError,
PostgresPartitioningManager,
@@ -49,6 +50,39 @@ def test_partitioning_manager_find_config_for_model():
assert manager.find_config_for_model(model2) == config2
+def test_partitioning_manager_plan_specific_model_names():
+ """Tests that only planning for specific models works as expected."""
+
+ model1 = define_fake_partitioned_model(
+ {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
+ )
+
+ config1 = partition_by_current_time(model1, years=1, count=3)
+
+ model2 = define_fake_partitioned_model(
+ {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
+ )
+
+ config2 = partition_by_current_time(model2, months=1, count=2)
+
+ schema_editor = PostgresSchemaEditor(connection)
+ schema_editor.create_partitioned_model(model1)
+ schema_editor.create_partitioned_model(model2)
+
+ manager = PostgresPartitioningManager([config1, config2])
+
+ plan = manager.plan()
+ assert len(plan.model_plans) == 2
+
+ plan = manager.plan(model_names=[model2.__name__])
+ assert len(plan.model_plans) == 1
+ assert plan.model_plans[0].config.model == model2
+
+ # make sure casing is irrelevant
+ plan = manager.plan(model_names=[model2.__name__.lower()])
+ assert len(plan.model_plans) == 1
+
+
def test_partitioning_manager_plan_not_partitioned_model():
"""Tests that the auto partitioner does not try to auto partition for non-
partitioned models/tables."""
diff --git a/tests/test_partitioning_time.py b/tests/test_partitioning_time.py
index 9f6b5bf1..0ab0daf6 100644
--- a/tests/test_partitioning_time.py
+++ b/tests/test_partitioning_time.py
@@ -254,6 +254,99 @@ def test_partitioning_time_daily_apply():
assert table.partitions[6].name == "2019_jun_04"
+@pytest.mark.postgres_version(lt=110000)
+def test_partitioning_time_hourly_apply():
+ """Tests whether automatically creating new partitions ahead hourly works
+ as expected."""
+
+ model = define_fake_partitioned_model(
+ {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
+ )
+
+ schema_editor = connection.schema_editor()
+ schema_editor.create_partitioned_model(model)
+
+ # create partitions for the next 4 hours (including the current)
+ with freezegun.freeze_time("2019-1-23"):
+ manager = PostgresPartitioningManager(
+ [partition_by_current_time(model, hours=1, count=4)]
+ )
+ manager.plan().apply()
+
+ table = _get_partitioned_table(model)
+ assert len(table.partitions) == 4
+ assert table.partitions[0].name == "2019_jan_23_00:00:00"
+ assert table.partitions[1].name == "2019_jan_23_01:00:00"
+ assert table.partitions[2].name == "2019_jan_23_02:00:00"
+ assert table.partitions[3].name == "2019_jan_23_03:00:00"
+
+ # re-running it with 5, should just create one additional partition
+ with freezegun.freeze_time("2019-1-23"):
+ manager = PostgresPartitioningManager(
+ [partition_by_current_time(model, hours=1, count=5)]
+ )
+ manager.plan().apply()
+
+ table = _get_partitioned_table(model)
+ assert len(table.partitions) == 5
+ assert table.partitions[4].name == "2019_jan_23_04:00:00"
+
+ # it's june now, we want to partition two hours ahead
+ with freezegun.freeze_time("2019-06-03"):
+ manager = PostgresPartitioningManager(
+ [partition_by_current_time(model, hours=1, count=2)]
+ )
+ manager.plan().apply()
+
+ table = _get_partitioned_table(model)
+ assert len(table.partitions) == 7
+ assert table.partitions[5].name == "2019_jun_03_00:00:00"
+ assert table.partitions[6].name == "2019_jun_03_01:00:00"
+
+
+@pytest.mark.postgres_version(lt=110000)
+def test_partitioning_time_consistent_daily_apply():
+ """Ensures that automatic daily partition creation is consistent and
+ aligned when the partition size spans multiple days (e.g., days > 1)"""
+
+ model = define_fake_partitioned_model(
+ {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
+ )
+
+ schema_editor = connection.schema_editor()
+ schema_editor.create_partitioned_model(model)
+
+ with freezegun.freeze_time("2025-06-20"):
+ manager = PostgresPartitioningManager(
+ [partition_by_current_time(model, days=5, count=3)]
+ )
+ manager.plan().apply()
+
+ table = _get_partitioned_table(model)
+ assert len(table.partitions) == 3
+
+ # Partitions are aligned based on the fixed anchor (Unix Epoch by default).
+ # 2025-06-20 falls within the partition starting at 2025-06-16,
+ # since it's the most recent multiple of 5 days since 1970-01-01.
+ assert table.partitions[0].name == "2025_jun_16"
+ assert table.partitions[1].name == "2025_jun_21"
+ assert table.partitions[2].name == "2025_jun_26"
+
+ # re-running it another day only creates the next one needed.
+ with freezegun.freeze_time("2025-06-22"):
+ manager = PostgresPartitioningManager(
+ [partition_by_current_time(model, days=5, count=3)]
+ )
+ manager.plan().apply()
+
+ table = _get_partitioned_table(model)
+ assert len(table.partitions) == 4
+ assert table.partitions[0].name == "2025_jun_16"
+ assert table.partitions[1].name == "2025_jun_21"
+ assert table.partitions[2].name == "2025_jun_26"
+ assert table.partitions[3].name == "2025_jul_01"
+
+
@pytest.mark.postgres_version(lt=110000)
def test_partitioning_time_monthly_apply_insert():
"""Tests whether automatically created monthly partitions line up
@@ -372,11 +465,52 @@ def test_partitioning_time_daily_apply_insert():
model.objects.create(timestamp=datetime.date(2019, 1, 10))
+@pytest.mark.postgres_version(lt=110000)
+def test_partitioning_time_hourly_apply_insert():
+ """Tests whether automatically created hourly partitions line up
+ perfectly."""
+
+ model = define_fake_partitioned_model(
+ {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]}
+ )
+
+ schema_editor = connection.schema_editor()
+ schema_editor.create_partitioned_model(model)
+
+ # that's a monday
+ with freezegun.freeze_time("2019-1-07"):
+ manager = PostgresPartitioningManager(
+ [partition_by_current_time(model, hours=1, count=2)]
+ )
+ manager.plan().apply()
+
+ table = _get_partitioned_table(model)
+ assert len(table.partitions) == 2
+
+ model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 0))
+ model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 1))
+
+ with transaction.atomic():
+ with pytest.raises(IntegrityError):
+ model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 2))
+ model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 3))
+
+ with freezegun.freeze_time("2019-1-07"):
+ manager = PostgresPartitioningManager(
+ [partition_by_current_time(model, hours=1, count=4)]
+ )
+ manager.plan().apply()
+
+ model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 2))
+ model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 3))
+
+
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize(
"kwargs,partition_names",
[
- (dict(days=2), ["2019_jan_01", "2019_jan_03"]),
+ (dict(days=2), ["2018_dec_31", "2019_jan_02"]),
+ (dict(hours=2), ["2019_jan_01_00:00:00", "2019_jan_01_02:00:00"]),
(dict(weeks=2), ["2018_week_53", "2019_week_02"]),
(dict(months=2), ["2019_jan", "2019_mar"]),
(dict(years=2), ["2019", "2021"]),
@@ -422,7 +556,7 @@ def test_partitioning_time_multiple(kwargs, partition_names):
dict(days=7, max_age=relativedelta(weeks=1)),
[
("2019-1-1", 6),
- ("2019-1-4", 6),
+ ("2019-1-4", 5),
("2019-1-8", 5),
("2019-1-15", 4),
("2019-1-16", 4),
@@ -450,7 +584,7 @@ def test_partitioning_time_delete(kwargs, timepoints):
with freezegun.freeze_time(timepoints[0][0]):
manager.plan().apply()
- for index, (dt, partition_count) in enumerate(timepoints):
+ for (dt, partition_count) in timepoints:
with freezegun.freeze_time(dt):
manager.plan(skip_create=True).apply()
diff --git a/tests/test_query.py b/tests/test_query.py
index 7db4beab..38d6b3cb 100644
--- a/tests/test_query.py
+++ b/tests/test_query.py
@@ -1,5 +1,8 @@
+from datetime import datetime, timezone
+
from django.db import connection, models
-from django.db.models import Case, F, Q, Value, When
+from django.db.models import Case, F, Min, Q, Value, When
+from django.db.models.functions.datetime import TruncSecond
from django.test.utils import CaptureQueriesContext, override_settings
from psqlextra.expressions import HStoreRef
@@ -96,6 +99,40 @@ def test_query_annotate_in_expression():
assert result.is_he_henk == "really henk"
+def test_query_annotate_group_by():
+ """Tests whether annotations with GROUP BY clauses are properly renamed
+ when the annotation overwrites a field name."""
+
+ model = get_fake_model(
+ {
+ "name": models.TextField(),
+ "timestamp": models.DateTimeField(null=False),
+ "value": models.IntegerField(),
+ }
+ )
+
+ timestamp = datetime(2024, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc)
+
+ model.objects.create(name="me", timestamp=timestamp, value=1)
+
+ result = (
+ model.objects.values("name")
+ .annotate(
+ timestamp=TruncSecond("timestamp", tzinfo=timezone.utc),
+ value=Min("value"),
+ )
+ .values_list(
+ "name",
+ "value",
+ "timestamp",
+ )
+ .order_by("name")
+ .first()
+ )
+
+ assert result == ("me", 1, timestamp)
+
+
def test_query_hstore_value_update_f_ref():
"""Tests whether F(..) expressions can be used in hstore values when
performing update queries."""
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 7ae4a3f2..49802cf8 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -3,7 +3,6 @@
from django.core.exceptions import SuspiciousOperation, ValidationError
from django.db import InternalError, ProgrammingError, connection
-from psycopg2 import errorcodes
from psqlextra.error import extract_postgres_error_code
from psqlextra.schema import PostgresSchema, postgres_temporary_schema
@@ -93,7 +92,7 @@ def test_postgres_schema_delete_and_create():
schema = PostgresSchema.delete_and_create(schema.name)
pg_error = extract_postgres_error_code(exc_info.value)
- assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST
+ assert pg_error == "2BP01" # DEPENDENT_OBJECTS_STILL_EXIST
# Verify that the schema and table still exist
assert _does_schema_exist(schema.name)
@@ -113,7 +112,7 @@ def test_postgres_schema_delete_and_create():
assert cursor.fetchone() == ("hello",)
pg_error = extract_postgres_error_code(exc_info.value)
- assert pg_error == errorcodes.UNDEFINED_TABLE
+ assert pg_error == "42P01" # UNDEFINED_TABLE
def test_postgres_schema_delete():
@@ -135,7 +134,7 @@ def test_postgres_schema_delete_not_empty():
schema.delete()
pg_error = extract_postgres_error_code(exc_info.value)
- assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST
+ assert pg_error == "2BP01" # DEPENDENT_OBJECTS_STILL_EXIST
def test_postgres_schema_delete_cascade_not_empty():
@@ -177,7 +176,7 @@ def test_postgres_temporary_schema_not_empty():
)
pg_error = extract_postgres_error_code(exc_info.value)
- assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST
+ assert pg_error == "2BP01" # DEPENDENT_OBJECTS_STILL_EXIST
def test_postgres_temporary_schema_not_empty_cascade():
diff --git a/tests/test_schema_editor_clone_model_to_schema.py b/tests/test_schema_editor_clone_model_to_schema.py
index c3d41917..c84e74cc 100644
--- a/tests/test_schema_editor_clone_model_to_schema.py
+++ b/tests/test_schema_editor_clone_model_to_schema.py
@@ -156,6 +156,33 @@ def fake_model_fk_target_2():
@pytest.fixture
def fake_model(fake_model_fk_target_1, fake_model_fk_target_2):
+ meta_options = {
+ "indexes": [
+ models.Index(fields=["age", "height"]),
+ models.Index(fields=["age"], name="age_index"),
+ GinIndex(fields=["nicknames"], name="nickname_index"),
+ ],
+ "constraints": [
+ models.UniqueConstraint(
+ fields=["first_name", "last_name"],
+ name="first_last_name_uniq",
+ ),
+ models.CheckConstraint(
+ check=Q(age__gt=0, height__gt=0), name="age_height_check"
+ ),
+ ],
+ "unique_together": (
+ "first_name",
+ "nicknames",
+ ),
+ }
+
+ if django.VERSION < (5, 1):
+ meta_options["index_together"] = (
+ "blob",
+ "age",
+ )
+
model = get_fake_model(
{
"first_name": models.TextField(null=True),
@@ -171,30 +198,7 @@ def fake_model(fake_model_fk_target_1, fake_model_fk_target_2):
fake_model_fk_target_2, null=True, on_delete=models.SET_NULL
),
},
- meta_options={
- "indexes": [
- models.Index(fields=["age", "height"]),
- models.Index(fields=["age"], name="age_index"),
- GinIndex(fields=["nicknames"], name="nickname_index"),
- ],
- "constraints": [
- models.UniqueConstraint(
- fields=["first_name", "last_name"],
- name="first_last_name_uniq",
- ),
- models.CheckConstraint(
- check=Q(age__gt=0, height__gt=0), name="age_height_check"
- ),
- ],
- "unique_together": (
- "first_name",
- "nicknames",
- ),
- "index_together": (
- "blob",
- "age",
- ),
- },
+ meta_options=meta_options,
)
yield model
diff --git a/tests/test_schema_editor_partitioning.py b/tests/test_schema_editor_partitioning.py
index c80efd52..d3602c0d 100644
--- a/tests/test_schema_editor_partitioning.py
+++ b/tests/test_schema_editor_partitioning.py
@@ -1,3 +1,4 @@
+import django
import pytest
from django.core.exceptions import ImproperlyConfigured
@@ -11,7 +12,14 @@
@pytest.mark.postgres_version(lt=110000)
-def test_schema_editor_create_delete_partitioned_model_range():
+@pytest.mark.parametrize(
+ "in_custom_tablespace",
+ [False, True],
+ ids=["default_tablespace", "custom_tablespace"],
+)
+def test_schema_editor_create_delete_partitioned_model_range(
+ custom_tablespace, in_custom_tablespace
+):
"""Tests whether creating a partitioned model and adding a list partition
to it using the :see:PostgresSchemaEditor works."""
@@ -21,6 +29,7 @@ def test_schema_editor_create_delete_partitioned_model_range():
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
+ {"db_tablespace": custom_tablespace if in_custom_tablespace else None},
)
schema_editor = PostgresSchemaEditor(connection)
@@ -44,7 +53,14 @@ def test_schema_editor_create_delete_partitioned_model_range():
@pytest.mark.postgres_version(lt=110000)
-def test_schema_editor_create_delete_partitioned_model_list():
+@pytest.mark.parametrize(
+ "in_custom_tablespace",
+ [False, True],
+ ids=["default_tablespace", "custom_tablespace"],
+)
+def test_schema_editor_create_delete_partitioned_model_list(
+ custom_tablespace, in_custom_tablespace
+):
"""Tests whether creating a partitioned model and adding a range partition
to it using the :see:PostgresSchemaEditor works."""
@@ -54,6 +70,7 @@ def test_schema_editor_create_delete_partitioned_model_list():
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
+ {"db_tablespace": custom_tablespace if in_custom_tablespace else None},
)
schema_editor = PostgresSchemaEditor(connection)
@@ -78,7 +95,14 @@ def test_schema_editor_create_delete_partitioned_model_list():
@pytest.mark.postgres_version(lt=110000)
@pytest.mark.parametrize("key", [["name"], ["id", "name"]])
-def test_schema_editor_create_delete_partitioned_model_hash(key):
+@pytest.mark.parametrize(
+ "in_custom_tablespace",
+ [False, True],
+ ids=["default_tablespace", "custom_tablespace"],
+)
+def test_schema_editor_create_delete_partitioned_model_hash(
+ key, custom_tablespace, in_custom_tablespace
+):
"""Tests whether creating a partitioned model and adding a hash partition
to it using the :see:PostgresSchemaEditor works."""
@@ -87,6 +111,7 @@ def test_schema_editor_create_delete_partitioned_model_hash(key):
model = define_fake_partitioned_model(
{"name": models.TextField()},
{"method": method, "key": key},
+ {"db_tablespace": custom_tablespace if in_custom_tablespace else None},
)
schema_editor = PostgresSchemaEditor(connection)
@@ -275,3 +300,114 @@ def test_schema_editor_add_default_partition(method, key):
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
+
+
+@pytest.mark.postgres_version(lt=110000)
+@pytest.mark.parametrize(
+ "in_custom_tablespace",
+ [False, True],
+ ids=["default_tablespace", "custom_tablespace"],
+)
+def test_schema_editor_create_partitioned_custom_primary_key(
+ custom_tablespace, in_custom_tablespace
+):
+ model = define_fake_partitioned_model(
+ {
+ "custom_pk": models.IntegerField(primary_key=True),
+ "name": models.TextField(),
+ "timestamp": models.DateTimeField(),
+ },
+ {"method": PostgresPartitioningMethod.RANGE, "key": ["timestamp"]},
+ {"db_tablespace": custom_tablespace if in_custom_tablespace else None},
+ )
+
+ schema_editor = PostgresSchemaEditor(connection)
+ schema_editor.create_partitioned_model(model)
+
+ constraints = db_introspection.get_constraints(model._meta.db_table)
+ primary_key_constraint = next(
+ (
+ constraint
+ for constraint in constraints.values()
+ if constraint["primary_key"]
+ ),
+ None,
+ )
+
+ assert primary_key_constraint
+ assert primary_key_constraint["columns"] == ["custom_pk", "timestamp"]
+
+
+@pytest.mark.postgres_version(lt=110000)
+@pytest.mark.parametrize(
+ "in_custom_tablespace",
+ [False, True],
+ ids=["default_tablespace", "custom_tablespace"],
+)
+def test_schema_editor_create_partitioned_partioning_key_is_primary_key(
+ custom_tablespace, in_custom_tablespace
+):
+ model = define_fake_partitioned_model(
+ {
+ "name": models.TextField(),
+ "timestamp": models.DateTimeField(primary_key=True),
+ },
+ {"method": PostgresPartitioningMethod.RANGE, "key": ["timestamp"]},
+ {"db_tablespace": custom_tablespace if in_custom_tablespace else None},
+ )
+
+ schema_editor = PostgresSchemaEditor(connection)
+ schema_editor.create_partitioned_model(model)
+
+ constraints = db_introspection.get_constraints(model._meta.db_table)
+ primary_key_constraint = next(
+ (
+ constraint
+ for constraint in constraints.values()
+ if constraint["primary_key"]
+ ),
+ None,
+ )
+
+ assert primary_key_constraint
+ assert primary_key_constraint["columns"] == ["timestamp"]
+
+
+@pytest.mark.skipif(
+ django.VERSION < (5, 2),
+ reason="Django < 5.2 doesn't implement composite primary keys",
+)
+@pytest.mark.postgres_version(lt=110000)
+@pytest.mark.parametrize(
+ "in_custom_tablespace",
+ [False, True],
+ ids=["default_tablespace", "custom_tablespace"],
+)
+def test_schema_editor_create_partitioned_custom_composite_primary_key(
+ custom_tablespace, in_custom_tablespace
+):
+ model = define_fake_partitioned_model(
+ {
+ "pk": models.CompositePrimaryKey("name", "timestamp"),
+ "name": models.TextField(),
+ "timestamp": models.DateTimeField(),
+ },
+ {"method": PostgresPartitioningMethod.RANGE, "key": ["timestamp"]},
+ {"db_tablespace": custom_tablespace if in_custom_tablespace else None},
+ )
+
+ schema_editor = PostgresSchemaEditor(connection)
+ schema_editor.create_partitioned_model(model)
+
+ constraints = db_introspection.get_constraints(model._meta.db_table)
+ primary_key_constraint = next(
+ (
+ constraint
+ for constraint in constraints.values()
+ if constraint["primary_key"]
+ ),
+ None,
+ )
+
+ assert primary_key_constraint
+ assert primary_key_constraint["columns"] == ["name", "timestamp"]
diff --git a/tests/test_schema_editor_view.py b/tests/test_schema_editor_view.py
index f7bf0308..ff20ef6a 100644
--- a/tests/test_schema_editor_view.py
+++ b/tests/test_schema_editor_view.py
@@ -1,6 +1,9 @@
-from django.db import connection, models
+import pytest
+
+from django.db import OperationalError, connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
+from psqlextra.error import extract_postgres_error_code
from . import db_introspection
from .fake_model import (
@@ -103,6 +106,34 @@ def test_schema_editor_create_delete_materialized_view():
assert model._meta.db_table not in db_introspection.table_names(True)
+@pytest.mark.django_db(transaction=True)
+def test_schema_editor_create_materialized_view_without_data():
+ underlying_model = get_fake_model({"name": models.TextField()})
+
+ model = define_fake_materialized_view_model(
+ {"name": models.TextField()},
+ {"query": underlying_model.objects.filter(name="test1")},
+ )
+
+ underlying_model.objects.create(name="test1")
+ underlying_model.objects.create(name="test2")
+
+ schema_editor = PostgresSchemaEditor(connection)
+ schema_editor.create_materialized_view_model(model, with_data=False)
+
+ with pytest.raises(OperationalError) as exc_info:
+ list(model.objects.all())
+
+ pg_error = extract_postgres_error_code(exc_info.value)
+ assert pg_error == "55000" # OBJECT_NOT_IN_PREREQUISITE_STATE
+
+ model.refresh()
+
+ objs = list(model.objects.all())
+ assert len(objs) == 1
+ assert objs[0].name == "test1"
+
+
def test_schema_editor_replace_materialized_view():
"""Tests whether creating a materialized view and then replacing it with
another one (thus changing the backing query) works as expected."""
diff --git a/tox.ini b/tox.ini
index 70a0e8ce..a5a33ed8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,7 @@ envlist =
{py36,py37}-dj{20,21,22,30,31,32}-psycopg{28,29}
{py38,py39,py310}-dj{21,22,30,31,32,40}-psycopg{28,29}
{py38,py39,py310,py311}-dj{41}-psycopg{28,29}
- {py310,py311}-dj{42,50}-psycopg{28,29,31}
+ {py310,py311,py312,py313}-dj{42,50,51,52}-psycopg{28,29,31,32}
[testenv]
deps =
@@ -17,11 +17,15 @@ deps =
dj41: Django~=4.1.0
dj42: Django~=4.2.0
dj50: Django~=5.0.1
+ dj51: Django~=5.1.0
+ dj52: Django~=5.2.0
psycopg28: psycopg2[binary]~=2.8
psycopg29: psycopg2[binary]~=2.9
psycopg31: psycopg[binary]~=3.1
+ psycopg32: psycopg[binary]~=3.2
+ .[dev]
.[test]
setenv =
DJANGO_SETTINGS_MODULE=settings
-passenv = DATABASE_URL
-commands = python setup.py test
+passenv = DATABASE_URL, DATABASE_IN_CONTAINER
+commands = poe test