From 13b5672cfe1aed0ec10dcb0b3f4b382d22719de7 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Tue, 18 Jun 2024 12:24:46 +0200 Subject: [PATCH 01/55] Fix automatic hstore extension creation not working on Django 4.2 or newer The following change broke the auto setup: https://github.com/django/django/commit/d3e746ace5eeea07216da97d9c3801f2fdc43223 This breaks because the call to `pscygop2.extras.register_hstore` is now conditional. Before, it would be called multiple times with empty OIDS, when eventually our auto registration would kick in and psycopg2 would fetch the OIDs itself. --- psqlextra/backend/base.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/psqlextra/backend/base.py b/psqlextra/backend/base.py index 5c788a05..c8ae73c5 100644 --- a/psqlextra/backend/base.py +++ b/psqlextra/backend/base.py @@ -3,6 +3,10 @@ from typing import TYPE_CHECKING from django.conf import settings +from django.contrib.postgres.signals import ( + get_hstore_oids, + register_type_handlers, +) from django.db import ProgrammingError from . import base_impl @@ -94,3 +98,22 @@ def prepare_database(self): "or add the extension manually.", exc_info=True, ) + return + + # Clear old (non-existent), stale oids. + get_hstore_oids.cache_clear() + + # Verify that we (and Django) can find the OIDs + # for hstore. + oids, _ = get_hstore_oids(self.alias) + if not oids: + logger.warning( + '"hstore" extension was created, but we cannot find the oids' + "in the database. Something went wrong.", + ) + return + + # We must trigger Django into registering the type handlers now + # so that any subsequent code can properly use the newly + # registered types. + register_type_handlers(self) From 200f2b9e66bdfd66bfaf63590b9f8e7ccd7ebe74 Mon Sep 17 00:00:00 2001 From: seroy Date: Mon, 20 May 2024 13:06:22 +0400 Subject: [PATCH 02/55] Fix `StopIteration` in deduplication rows code when `conflict_action == ConflictAction.NOTHING` and rows parameter is iterator or generator --- psqlextra/query.py | 14 +++++++++++--- tests/test_on_conflict_nothing.py | 17 ++++++++++++----- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/psqlextra/query.py b/psqlextra/query.py index 65a20c50..c75ce9c4 100644 --- a/psqlextra/query.py +++ b/psqlextra/query.py @@ -174,11 +174,19 @@ def bulk_insert( A list of either the dicts of the rows inserted, including the pk or the models of the rows inserted with defaults for any fields not specified """ + if rows is None: + return [] + + def peek(iterable): + try: + first = next(iterable) + except StopIteration: + return None + return list(chain([first], iterable)) - def is_empty(r): - return all([False for _ in r]) + rows = peek(iter(rows)) - if not rows or is_empty(rows): + if not rows: return [] if not self.conflict_target and not self.conflict_action: diff --git a/tests/test_on_conflict_nothing.py b/tests/test_on_conflict_nothing.py index 78c4c5f4..eb3b8a3c 100644 --- a/tests/test_on_conflict_nothing.py +++ b/tests/test_on_conflict_nothing.py @@ -179,8 +179,15 @@ def test_on_conflict_nothing_duplicate_rows(): rows = [dict(amount=1), dict(amount=1)] - ( - model.objects.on_conflict( - ["amount"], ConflictAction.NOTHING - ).bulk_insert(rows) - ) + inserted_rows = model.objects.on_conflict( + ["amount"], ConflictAction.NOTHING + ).bulk_insert(rows) + + assert len(inserted_rows) == 1 + + rows = iter([dict(amount=2), dict(amount=2)]) + inserted_rows = model.objects.on_conflict( + ["amount"], ConflictAction.NOTHING + ).bulk_insert(rows) + + assert len(inserted_rows) == 1 From 422e91f0a17c467fa9df7db8a622535726b6fd1c Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Tue, 18 Jun 2024 13:36:49 +0200 Subject: [PATCH 03/55] Add additional tests for ON CONFLICT DO NOTHING duplicate rows filtering --- psqlextra/query.py | 17 +++++++++-------- tests/test_on_conflict_nothing.py | 24 +++++++++++++----------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/psqlextra/query.py b/psqlextra/query.py index c75ce9c4..6a86f18e 100644 --- a/psqlextra/query.py +++ b/psqlextra/query.py @@ -41,6 +41,14 @@ QuerySetBase = QuerySet +def peek_iterator(iterable): + try: + first = next(iterable) + except StopIteration: + return None + return list(chain([first], iterable)) + + class PostgresQuerySet(QuerySetBase, Generic[TModel]): """Adds support for PostgreSQL specifics.""" @@ -177,14 +185,7 @@ def bulk_insert( if rows is None: return [] - def peek(iterable): - try: - first = next(iterable) - except StopIteration: - return None - return list(chain([first], iterable)) - - rows = peek(iter(rows)) + rows = peek_iterator(iter(rows)) if not rows: return [] diff --git a/tests/test_on_conflict_nothing.py b/tests/test_on_conflict_nothing.py index eb3b8a3c..92e74dfc 100644 --- a/tests/test_on_conflict_nothing.py +++ b/tests/test_on_conflict_nothing.py @@ -170,24 +170,26 @@ def test_on_conflict_nothing_foreign_key_by_id(): assert obj1.data == "some data" -def test_on_conflict_nothing_duplicate_rows(): +@pytest.mark.parametrize( + "rows,expected_row_count", + [ + ([dict(amount=1), dict(amount=1)], 1), + (iter([dict(amount=1), dict(amount=1)]), 1), + ((row for row in [dict(amount=1), dict(amount=1)]), 1), + ([], 0), + (iter([]), 0), + ((row for row in []), 0), + ], +) +def test_on_conflict_nothing_duplicate_rows(rows, expected_row_count): """Tests whether duplicate rows are filtered out when doing a insert NOTHING and no error is raised when the list of rows contains duplicates.""" model = get_fake_model({"amount": models.IntegerField(unique=True)}) - rows = [dict(amount=1), dict(amount=1)] - - inserted_rows = model.objects.on_conflict( - ["amount"], ConflictAction.NOTHING - ).bulk_insert(rows) - - assert len(inserted_rows) == 1 - - rows = iter([dict(amount=2), dict(amount=2)]) inserted_rows = model.objects.on_conflict( ["amount"], ConflictAction.NOTHING ).bulk_insert(rows) - assert len(inserted_rows) == 1 + assert len(inserted_rows) == expected_row_count From 6cb8b4f15d595b00a2fcd7e21990c004645ae0e7 Mon Sep 17 00:00:00 2001 From: Tyler Kennedy Date: Mon, 3 Jun 2024 08:52:23 -0400 Subject: [PATCH 04/55] Allow tuples as a valid type for `meta.key` Most modern linters (like `ruff`) will complain about `meta.key` being a list, as it's a mutable class variable. Allowing a tuple here appears to work fine and removes the need to override linter rules for `meta.key`. --- psqlextra/backend/schema.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py index 28e9211a..31a23414 100644 --- a/psqlextra/backend/schema.py +++ b/psqlextra/backend/schema.py @@ -1045,7 +1045,7 @@ def _partitioning_properties_for_model(model: Type[Model]): % (model.__name__, meta.method) ) - if not isinstance(meta.key, list): + if not isinstance(meta.key, (list, tuple)): raise ImproperlyConfigured( ( "Model '%s' is not properly configured to be partitioned." From 7aa6923964ec8352bc871c02fcbffc4b688262b9 Mon Sep 17 00:00:00 2001 From: Filippo Campi Date: Wed, 5 Jun 2024 11:45:40 +0200 Subject: [PATCH 05/55] Support of restart_identity on truncate operation --- docs/source/deletion.rst | 25 +++++++++++++++++++++++++ psqlextra/manager/manager.py | 12 ++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/docs/source/deletion.rst b/docs/source/deletion.rst index c27cdcb6..9308594c 100644 --- a/docs/source/deletion.rst +++ b/docs/source/deletion.rst @@ -48,3 +48,28 @@ By default, Postgres will raise an error if any other table is referencing one o MyModel.objects.truncate(cascade=True) print(MyModel1.objects.count()) # zero records left print(MyModel2.objects.count()) # zero records left + + +Restart identity +**************** + +If specified, any sequences on the table will be restarted. + +.. code-block:: python + + from django.db import models + from psqlextra.models import PostgresModel + + class MyModel(PostgresModel): + pass + + mymodel = MyModel.objects.create() + assert mymodel.id == 1 + + MyModel.objects.truncate(restart_identity=True) # table is empty after this + print(MyModel.objects.count()) # zero records left + + # Create a new row, it should get ID 1 again because + # the sequence got restarted. + mymodel = MyModel.objects.create() + assert mymodel.id == 1 diff --git a/psqlextra/manager/manager.py b/psqlextra/manager/manager.py index 0931b38a..ee1eb58b 100644 --- a/psqlextra/manager/manager.py +++ b/psqlextra/manager/manager.py @@ -37,7 +37,10 @@ def __init__(self, *args, **kwargs): ) def truncate( - self, cascade: bool = False, using: Optional[str] = None + self, + cascade: bool = False, + restart_identity: bool = False, + using: Optional[str] = None, ) -> None: """Truncates this model/table using the TRUNCATE statement. @@ -51,14 +54,19 @@ def truncate( False, an error will be raised if there are rows in other tables referencing the rows you're trying to delete. + restart_identity: + Automatically restart sequences owned by + columns of the truncated table(s). """ connection = connections[using or "default"] table_name = connection.ops.quote_name(self.model._meta.db_table) with connection.cursor() as cursor: - sql = "TRUNCATE TABLE %s" % table_name + sql = f"TRUNCATE TABLE {table_name}" if cascade: sql += " CASCADE" + if restart_identity: + sql += " RESTART IDENTITY" cursor.execute(sql) From 1fecd9bb10d2b276aa69a1ecf20c1fff4d52c5e6 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Wed, 7 Feb 2024 08:56:02 +0100 Subject: [PATCH 06/55] Upgrade mypy and django-stubs to the latest version --- psqlextra/introspect/models.py | 8 +++++--- setup.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/psqlextra/introspect/models.py b/psqlextra/introspect/models.py index 61a478dd..e160bcaf 100644 --- a/psqlextra/introspect/models.py +++ b/psqlextra/introspect/models.py @@ -7,6 +7,7 @@ Optional, Type, TypeVar, + Union, cast, ) @@ -115,9 +116,10 @@ def models_from_cursor( ) for index, related_field_name in enumerate(related_fields): - related_model = model._meta.get_field( - related_field_name - ).related_model + related_model = cast( + Union[Type[Model], None], + model._meta.get_field(related_field_name).related_model, + ) if not related_model: continue diff --git a/setup.py b/setup.py index c3431e27..918beb87 100644 --- a/setup.py +++ b/setup.py @@ -97,7 +97,7 @@ def run(self): "docformatter==1.4", "mypy==1.2.0; python_version > '3.6'", "mypy==0.971; python_version <= '3.6'", - "django-stubs==1.16.0; python_version > '3.6'", + "django-stubs==4.2.7; python_version > '3.6'", "django-stubs==1.9.0; python_version <= '3.6'", "typing-extensions==4.5.0; python_version > '3.6'", "typing-extensions==4.1.0; python_version <= '3.6'", From cf87e7a5276ce3cb89017fd00d8f7cb15993b25e Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Wed, 7 Feb 2024 08:56:06 +0100 Subject: [PATCH 07/55] Fix renamed annotations relying on group by clauses not working Co-authored-by: Hannes Engelhardt --- psqlextra/sql.py | 10 ++++++++++ settings.py | 3 +++ tests/test_query.py | 39 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/psqlextra/sql.py b/psqlextra/sql.py index b2655088..cf12d8c1 100644 --- a/psqlextra/sql.py +++ b/psqlextra/sql.py @@ -1,4 +1,5 @@ from collections import OrderedDict +from collections.abc import Iterable from typing import Any, Dict, List, Optional, Tuple, Union import django @@ -7,6 +8,7 @@ from django.db import connections, models from django.db.models import Expression, sql from django.db.models.constants import LOOKUP_SEP +from django.db.models.expressions import Ref from .compiler import PostgresInsertOnConflictCompiler from .compiler import SQLUpdateCompiler as PostgresUpdateCompiler @@ -74,6 +76,14 @@ def rename_annotations(self, annotations) -> None: self.annotation_select_mask.remove(old_name) self.annotation_select_mask.append(new_name) + if isinstance(self.group_by, Iterable): + for statement in self.group_by: + if not isinstance(statement, Ref): + continue + + if statement.refs in annotations: # type: ignore[attr-defined] + statement.refs = annotations[statement.refs] # type: ignore[attr-defined] + self.annotations.clear() self.annotations.update(new_annotations) diff --git a/settings.py b/settings.py index ed0d0f98..a78eed42 100644 --- a/settings.py +++ b/settings.py @@ -24,3 +24,6 @@ 'psqlextra', 'tests', ) + +USE_TZ = True +TIME_ZONE = 'UTC' diff --git a/tests/test_query.py b/tests/test_query.py index 7db4beab..38d6b3cb 100644 --- a/tests/test_query.py +++ b/tests/test_query.py @@ -1,5 +1,8 @@ +from datetime import datetime, timezone + from django.db import connection, models -from django.db.models import Case, F, Q, Value, When +from django.db.models import Case, F, Min, Q, Value, When +from django.db.models.functions.datetime import TruncSecond from django.test.utils import CaptureQueriesContext, override_settings from psqlextra.expressions import HStoreRef @@ -96,6 +99,40 @@ def test_query_annotate_in_expression(): assert result.is_he_henk == "really henk" +def test_query_annotate_group_by(): + """Tests whether annotations with GROUP BY clauses are properly renamed + when the annotation overwrites a field name.""" + + model = get_fake_model( + { + "name": models.TextField(), + "timestamp": models.DateTimeField(null=False), + "value": models.IntegerField(), + } + ) + + timestamp = datetime(2024, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc) + + model.objects.create(name="me", timestamp=timestamp, value=1) + + result = ( + model.objects.values("name") + .annotate( + timestamp=TruncSecond("timestamp", tzinfo=timezone.utc), + value=Min("value"), + ) + .values_list( + "name", + "value", + "timestamp", + ) + .order_by("name") + .first() + ) + + assert result == ("me", 1, timestamp) + + def test_query_hstore_value_update_f_ref(): """Tests whether F(..) expressions can be used in hstore values when performing update queries.""" From 7d582d92ee5eb774a8306994e319777e3a709d1a Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Tue, 18 Jun 2024 15:04:33 +0200 Subject: [PATCH 08/55] Work around psycopg2.9 + django 3.0 compatibility issue --- settings.py | 2 +- tests/psqlextra_test_backend/__init__.py | 0 tests/psqlextra_test_backend/base.py | 23 +++++++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 tests/psqlextra_test_backend/__init__.py create mode 100644 tests/psqlextra_test_backend/base.py diff --git a/settings.py b/settings.py index a78eed42..7266ccb4 100644 --- a/settings.py +++ b/settings.py @@ -11,7 +11,7 @@ 'default': dj_database_url.config(default='postgres:///psqlextra'), } -DATABASES['default']['ENGINE'] = 'psqlextra.backend' +DATABASES['default']['ENGINE'] = 'tests.psqlextra_test_backend' LANGUAGE_CODE = 'en' LANGUAGES = ( diff --git a/tests/psqlextra_test_backend/__init__.py b/tests/psqlextra_test_backend/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/psqlextra_test_backend/base.py b/tests/psqlextra_test_backend/base.py new file mode 100644 index 00000000..0961a2bc --- /dev/null +++ b/tests/psqlextra_test_backend/base.py @@ -0,0 +1,23 @@ +from datetime import timezone + +import django + +from django.conf import settings + +from psqlextra.backend.base import DatabaseWrapper as PSQLExtraDatabaseWrapper + + +class DatabaseWrapper(PSQLExtraDatabaseWrapper): + # Works around the compatibility issue of Django <3.0 and psycopg2.9 + # in combination with USE_TZ + # + # See: https://github.com/psycopg/psycopg2/issues/1293#issuecomment-862835147 + if django.VERSION < (3, 1): + + def create_cursor(self, name=None): + cursor = super().create_cursor(name) + cursor.tzinfo_factory = ( + lambda offset: timezone.utc if settings.USE_TZ else None + ) + + return cursor From 8760a67a6ad8790de54b90f1edf11a60ae2da863 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 09:44:47 +0200 Subject: [PATCH 09/55] Fix `rename_annotation` for Django 5.2 and newer Was broken by this change: https://github.com/django/django/commit/65ad4ade74dc9208b9d686a451cd6045df0c9c3a --- psqlextra/sql.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/psqlextra/sql.py b/psqlextra/sql.py index cf12d8c1..750287c5 100644 --- a/psqlextra/sql.py +++ b/psqlextra/sql.py @@ -70,8 +70,12 @@ def rename_annotations(self, annotations) -> None: # and a list in Django 5.x and newer. # https://github.com/django/django/commit/d6b6e5d0fd4e6b6d0183b4cf6e4bd4f9afc7bf67 if isinstance(self.annotation_select_mask, set): - self.annotation_select_mask.discard(old_name) - self.annotation_select_mask.add(new_name) + updated_annotation_select_mask = set( + self.annotation_select_mask + ) + updated_annotation_select_mask.discard(old_name) + updated_annotation_select_mask.add(new_name) + self.set_annotation_mask(updated_annotation_select_mask) elif isinstance(self.annotation_select_mask, list): self.annotation_select_mask.remove(old_name) self.annotation_select_mask.append(new_name) From 70c0ee27b4a4e0801726f82f5154577693505a8d Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 09:48:45 +0200 Subject: [PATCH 10/55] Account for `index_together` removal in Django 5.2 --- psqlextra/backend/schema.py | 9 ++-- ...est_schema_editor_clone_model_to_schema.py | 52 ++++++++++--------- 2 files changed, 33 insertions(+), 28 deletions(-) diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py index 31a23414..81acbbb3 100644 --- a/psqlextra/backend/schema.py +++ b/psqlextra/backend/schema.py @@ -250,10 +250,11 @@ def clone_model_constraints_and_indexes_to_schema( model, tuple(), model._meta.unique_together ) - if model._meta.index_together: - self.alter_index_together( - model, tuple(), model._meta.index_together - ) + if django.VERSION < (5, 2): + if model._meta.index_together: + self.alter_index_together( + model, tuple(), model._meta.index_together + ) for field in model._meta.local_concrete_fields: # type: ignore[attr-defined] # Django creates primary keys later added to the model with diff --git a/tests/test_schema_editor_clone_model_to_schema.py b/tests/test_schema_editor_clone_model_to_schema.py index c3d41917..ef919bcd 100644 --- a/tests/test_schema_editor_clone_model_to_schema.py +++ b/tests/test_schema_editor_clone_model_to_schema.py @@ -156,6 +156,33 @@ def fake_model_fk_target_2(): @pytest.fixture def fake_model(fake_model_fk_target_1, fake_model_fk_target_2): + meta_options = { + "indexes": [ + models.Index(fields=["age", "height"]), + models.Index(fields=["age"], name="age_index"), + GinIndex(fields=["nicknames"], name="nickname_index"), + ], + "constraints": [ + models.UniqueConstraint( + fields=["first_name", "last_name"], + name="first_last_name_uniq", + ), + models.CheckConstraint( + check=Q(age__gt=0, height__gt=0), name="age_height_check" + ), + ], + "unique_together": ( + "first_name", + "nicknames", + ), + } + + if django.VERSION < (5, 2): + meta_options["index_together"] = ( + "blob", + "age", + ) + model = get_fake_model( { "first_name": models.TextField(null=True), @@ -171,30 +198,7 @@ def fake_model(fake_model_fk_target_1, fake_model_fk_target_2): fake_model_fk_target_2, null=True, on_delete=models.SET_NULL ), }, - meta_options={ - "indexes": [ - models.Index(fields=["age", "height"]), - models.Index(fields=["age"], name="age_index"), - GinIndex(fields=["nicknames"], name="nickname_index"), - ], - "constraints": [ - models.UniqueConstraint( - fields=["first_name", "last_name"], - name="first_last_name_uniq", - ), - models.CheckConstraint( - check=Q(age__gt=0, height__gt=0), name="age_height_check" - ), - ], - "unique_together": ( - "first_name", - "nicknames", - ), - "index_together": ( - "blob", - "age", - ), - }, + meta_options=meta_options, ) yield model From 4d70da54993b78b5bfcadcf63d0f4de8169664fe Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 13:33:56 +0200 Subject: [PATCH 11/55] Fix an issue whether detection of update/insert fields isn't reliable This is because `pre_save` mutates the model instance. Since we call it twice (once with `is_insert=True` and once with `is_insert=False`), the second call might have a different result. This problem manifests with `DateTimeField(auto_now=True)`. It'll always be treated as an insert field, even though it it should be treated as both. --- psqlextra/query.py | 30 +++++++++++++++++++----------- tests/test_on_conflict.py | 16 ++++++++++------ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/psqlextra/query.py b/psqlextra/query.py index 6a86f18e..5dd1cdb3 100644 --- a/psqlextra/query.py +++ b/psqlextra/query.py @@ -536,19 +536,22 @@ def _build_insert_compiler( compiler = query.get_compiler(using) return compiler - def _is_magical_field(self, model_instance, field, is_insert: bool): - """Verifies whether this field is gonna modify something on its own. - - "Magical" means that a field modifies the field value - during the pre_save. + def _pre_save_field( + self, + model_instance: models.Model, + field: models.Field, + *, + is_insert: bool + ): + """Pre-saves the model and gets whether the :see:pre_save method makes + any modifications to the field value. Arguments: model_instance: The model instance the field is defined on. field: - The field to get of whether the field is - magical. + The field to pre-save. is_insert: Pretend whether this is an insert? @@ -594,11 +597,12 @@ def _get_upsert_fields(self, kwargs): and include them in the list of insert/update fields. """ - model_instance = self.model(**kwargs) insert_fields = [] update_values = {} - for field in model_instance._meta.local_concrete_fields: + insert_model_instance = self.model(**kwargs) + update_model_instance = self.model(**kwargs) + for field in insert_model_instance._meta.local_concrete_fields: has_default = field.default != NOT_PROVIDED if field.name in kwargs or field.column in kwargs: insert_fields.append(field) @@ -616,10 +620,14 @@ def _get_upsert_fields(self, kwargs): update_values[field.name] = ExcludedCol(field) continue - if self._is_magical_field(model_instance, field, is_insert=True): + if self._pre_save_field( + insert_model_instance, field, is_insert=True + ): insert_fields.append(field) - if self._is_magical_field(model_instance, field, is_insert=False): + if self._pre_save_field( + update_model_instance, field, is_insert=False + ): update_values[field.name] = ExcludedCol(field) return insert_fields, update_values diff --git a/tests/test_on_conflict.py b/tests/test_on_conflict.py index 02eda62f..7f3f5ab8 100644 --- a/tests/test_on_conflict.py +++ b/tests/test_on_conflict.py @@ -1,4 +1,5 @@ import django +import freezegun import pytest from django.core.exceptions import SuspiciousOperation @@ -130,13 +131,16 @@ def test_on_conflict_partial_get(): } ) - obj1 = model.objects.on_conflict( - ["title"], ConflictAction.UPDATE - ).insert_and_get(title="beer", purpose="for-sale") + with freezegun.freeze_time("2020-1-1 12:00:00.0") as fg: + obj1 = model.objects.on_conflict( + ["title"], ConflictAction.UPDATE + ).insert_and_get(title="beer", purpose="for-sale") - obj2 = model.objects.on_conflict( - ["title"], ConflictAction.UPDATE - ).insert_and_get(title="beer") + fg.tick() + + obj2 = model.objects.on_conflict( + ["title"], ConflictAction.UPDATE + ).insert_and_get(title="beer") obj2.refresh_from_db() From b0883006b5e7ea95e8e86c98be2e11269127aa48 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 13:42:08 +0200 Subject: [PATCH 12/55] Consistent ordering for partition names when introspected --- psqlextra/backend/introspection.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/psqlextra/backend/introspection.py b/psqlextra/backend/introspection.py index bd775779..a9106bdc 100644 --- a/psqlextra/backend/introspection.py +++ b/psqlextra/backend/introspection.py @@ -93,6 +93,8 @@ def get_partitioned_tables( pg_class ON pg_class.oid = pg_partitioned_table.partrelid + ORDER BY + pg_partitioned_table.partrelid """ ) @@ -151,6 +153,9 @@ def get_partitions( pg_description.objoid = child.oid WHERE parent.relname = %s + ORDER BY + child.oid, + child.relname """ cursor.execute(sql, (table_name,)) @@ -196,6 +201,9 @@ def get_partition_key(self, cursor, table_name: str) -> List[str]: AND ordinal_position = pt.column_index WHERE table_name = %s + ORDER BY + col.ordinal_position, + col.column_name """ cursor.execute(sql, (table_name,)) @@ -213,6 +221,9 @@ def get_schema_list(self, cursor) -> List[str]: schema_name FROM information_schema.schemata + ORDER BY + schema_name, + catalog_name """, tuple(), ) From 4258d509ed9b3e54b3309cda3fded6912259b090 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 13:43:50 +0200 Subject: [PATCH 13/55] Run tests against Django 5.2 and pyscopg 3.2 --- tox.ini | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 70a0e8ce..94d7dd7c 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = {py36,py37}-dj{20,21,22,30,31,32}-psycopg{28,29} {py38,py39,py310}-dj{21,22,30,31,32,40}-psycopg{28,29} {py38,py39,py310,py311}-dj{41}-psycopg{28,29} - {py310,py311}-dj{42,50}-psycopg{28,29,31} + {py310,py311}-dj{42,50,51,52}-psycopg{28,29,31,32} [testenv] deps = @@ -17,9 +17,12 @@ deps = dj41: Django~=4.1.0 dj42: Django~=4.2.0 dj50: Django~=5.0.1 + dj51: Django~=5.1.0 + dj52: Django~=5.2.0 psycopg28: psycopg2[binary]~=2.8 psycopg29: psycopg2[binary]~=2.9 psycopg31: psycopg[binary]~=3.1 + psycopg32: psycopg[binary]~=3.2 .[test] setenv = DJANGO_SETTINGS_MODULE=settings From 6de1e2b20c7a41e6e40b0ca42d829b1e2bb4e50a Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 13:45:16 +0200 Subject: [PATCH 14/55] Declare support in README for Django 5.1 and 5.2 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 17037d87..b831ded1 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ | :memo: | **License** | [![License](https://img.shields.io/:license-mit-blue.svg)](http://doge.mit-license.org) | | :package: | **PyPi** | [![PyPi](https://badge.fury.io/py/django-postgres-extra.svg)](https://pypi.python.org/pypi/django-postgres-extra) | | :four_leaf_clover: | **Code coverage** | [![Coverage Status](https://coveralls.io/repos/github/SectorLabs/django-postgres-extra/badge.svg?branch=coveralls)](https://coveralls.io/github/SectorLabs/django-postgres-extra?branch=master) | -| | **Django Versions** | 2.0, 2.1, 2.2, 3.0, 3.1, 3.2, 4.0, 4.1, 4.2, 5.0 | +| | **Django Versions** | 2.0, 2.1, 2.2, 3.0, 3.1, 3.2, 4.0, 4.1, 4.2, 5.0, 5.1, 5.2 | | | **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11 | | | **Psycopg Versions** | 2, 3 | | :book: | **Documentation** | [Read The Docs](https://django-postgres-extra.readthedocs.io/en/master/) | From 5e0877c7251fb5cd41335117228a1858d17845cc Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 13:47:48 +0200 Subject: [PATCH 15/55] Use transactions on the right database connection in schema editor Fixes #255 --- psqlextra/backend/schema.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py index 81acbbb3..5c478840 100644 --- a/psqlextra/backend/schema.py +++ b/psqlextra/backend/schema.py @@ -571,7 +571,7 @@ def replace_materialized_view_model(self, model: Type[Model]) -> None: cursor, model._meta.db_table ) - with transaction.atomic(): + with transaction.atomic(using=self.connection.alias): self.delete_materialized_view_model(model) self.create_materialized_view_model(model) @@ -674,7 +674,7 @@ def add_range_partition( "%s", ) - with transaction.atomic(): + with transaction.atomic(using=self.connection.alias): self.execute(sql, (from_values, to_values)) if comment: @@ -717,7 +717,7 @@ def add_list_partition( ",".join(["%s" for _ in range(len(values))]), ) - with transaction.atomic(): + with transaction.atomic(using=self.connection.alias): self.execute(sql, values) if comment: @@ -763,7 +763,7 @@ def add_hash_partition( "%s", ) - with transaction.atomic(): + with transaction.atomic(using=self.connection.alias): self.execute(sql, (modulus, remainder)) if comment: @@ -800,7 +800,7 @@ def add_default_partition( self.quote_name(model._meta.db_table), ) - with transaction.atomic(): + with transaction.atomic(using=self.connection.alias): self.execute(sql) if comment: From 7d8ec1be0777bd576a7a58fc6613126a7179c5d5 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 14:25:53 +0200 Subject: [PATCH 16/55] Drop direct dependency on psycopg2 in tests --- setup.py | 3 +-- tests/test_schema.py | 9 ++++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index 918beb87..b0076fa9 100644 --- a/setup.py +++ b/setup.py @@ -75,7 +75,7 @@ def run(self): ':python_version <= "3.6"': ["dataclasses"], "docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"], "test": [ - "psycopg2>=2.8.4,<3.0.0", + "psycopg~=3.2", "dj-database-url==0.5.0", "pytest==6.2.5", "pytest-benchmark==3.4.1", @@ -102,7 +102,6 @@ def run(self): "typing-extensions==4.5.0; python_version > '3.6'", "typing-extensions==4.1.0; python_version <= '3.6'", "types-dj-database-url==1.3.0.0", - "types-psycopg2==2.9.21.9", "types-python-dateutil==2.8.19.12", ], "publish": [ diff --git a/tests/test_schema.py b/tests/test_schema.py index 7ae4a3f2..49802cf8 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -3,7 +3,6 @@ from django.core.exceptions import SuspiciousOperation, ValidationError from django.db import InternalError, ProgrammingError, connection -from psycopg2 import errorcodes from psqlextra.error import extract_postgres_error_code from psqlextra.schema import PostgresSchema, postgres_temporary_schema @@ -93,7 +92,7 @@ def test_postgres_schema_delete_and_create(): schema = PostgresSchema.delete_and_create(schema.name) pg_error = extract_postgres_error_code(exc_info.value) - assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST + assert pg_error == "2BP01" # DEPENDENT_OBJECTS_STILL_EXIST # Verify that the schema and table still exist assert _does_schema_exist(schema.name) @@ -113,7 +112,7 @@ def test_postgres_schema_delete_and_create(): assert cursor.fetchone() == ("hello",) pg_error = extract_postgres_error_code(exc_info.value) - assert pg_error == errorcodes.UNDEFINED_TABLE + assert pg_error == "42P01" # UNDEFINED_TABLE def test_postgres_schema_delete(): @@ -135,7 +134,7 @@ def test_postgres_schema_delete_not_empty(): schema.delete() pg_error = extract_postgres_error_code(exc_info.value) - assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST + assert pg_error == "2BP01" # DEPENDENT_OBJECTS_STILL_EXIST def test_postgres_schema_delete_cascade_not_empty(): @@ -177,7 +176,7 @@ def test_postgres_temporary_schema_not_empty(): ) pg_error = extract_postgres_error_code(exc_info.value) - assert pg_error == errorcodes.DEPENDENT_OBJECTS_STILL_EXIST + assert pg_error == "2BP01" # DEPENDENT_OBJECTS_STILL_EXIST def test_postgres_temporary_schema_not_empty_cascade(): From 38b77a65e39593c076ab3a5f0a66e5d99b0656cb Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 14:26:31 +0200 Subject: [PATCH 17/55] Correct version constraint; `index_together` was removed in Django 5.1 https://docs.djangoproject.com/en/5.2/releases/5.1/#features-removed-in-5-1 --- psqlextra/backend/schema.py | 2 +- tests/test_schema_editor_clone_model_to_schema.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py index 5c478840..c7253549 100644 --- a/psqlextra/backend/schema.py +++ b/psqlextra/backend/schema.py @@ -250,7 +250,7 @@ def clone_model_constraints_and_indexes_to_schema( model, tuple(), model._meta.unique_together ) - if django.VERSION < (5, 2): + if django.VERSION < (5, 1): if model._meta.index_together: self.alter_index_together( model, tuple(), model._meta.index_together diff --git a/tests/test_schema_editor_clone_model_to_schema.py b/tests/test_schema_editor_clone_model_to_schema.py index ef919bcd..c84e74cc 100644 --- a/tests/test_schema_editor_clone_model_to_schema.py +++ b/tests/test_schema_editor_clone_model_to_schema.py @@ -177,7 +177,7 @@ def fake_model(fake_model_fk_target_1, fake_model_fk_target_2): ), } - if django.VERSION < (5, 2): + if django.VERSION < (5, 1): meta_options["index_together"] = ( "blob", "age", From 51a7a3757ea5e46b0c86ea8f7ec0050af6820d37 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 14:32:16 +0200 Subject: [PATCH 18/55] Use Postgres when testing Python >=3.10 and Django >=5.x --- .circleci/config.yml | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 92d9093b..bda61ac7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,11 +3,14 @@ version: 2.1 executors: python: parameters: - version: + pyversion: + type: string + pgversion: type: string + default: "16.0" docker: - - image: python:<< parameters.version >>-buster - - image: postgres:13.0 + - image: python:<< parameters.pyversion >>-buster + - image: postgres:<< parameters.pgversion >> environment: POSTGRES_DB: 'psqlextra' POSTGRES_USER: 'psqlextra' @@ -44,7 +47,8 @@ jobs: test-python36: executor: name: python - version: "3.6" + pyversion: "3.6" + pgversion: "13.0" steps: - checkout - install-dependencies: @@ -55,7 +59,8 @@ jobs: test-python37: executor: name: python - version: "3.7" + pyversion: "3.7" + pgversion: "13.0" steps: - checkout - install-dependencies: @@ -66,7 +71,8 @@ jobs: test-python38: executor: name: python - version: "3.8" + pyversion: "3.8" + pgversion: "13.0" steps: - checkout - install-dependencies: @@ -77,7 +83,8 @@ jobs: test-python39: executor: name: python - version: "3.9" + pyversion: "3.9" + pgversion: "13.0" steps: - checkout - install-dependencies: @@ -88,7 +95,8 @@ jobs: test-python310: executor: name: python - version: "3.10" + pyversion: "3.10" + pgversion: "16.0" steps: - checkout - install-dependencies: @@ -99,7 +107,8 @@ jobs: test-python311: executor: name: python - version: "3.11" + pyversion: "3.11" + pgversion: "16.0" steps: - checkout - install-dependencies: @@ -115,7 +124,7 @@ jobs: analysis: executor: name: python - version: "3.9" + pyversion: "3.10" steps: - checkout - install-dependencies: @@ -127,7 +136,7 @@ jobs: publish: executor: name: python - version: "3.9" + pyversion: "3.10" steps: - checkout - install-dependencies: From fb1e2995040dd9f45cc4cab1bc004af46a17f96d Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 14:34:00 +0200 Subject: [PATCH 19/55] Run tests against Python 3.12 --- .circleci/config.yml | 18 ++++++++++++++++++ README.md | 2 +- tox.ini | 2 +- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index bda61ac7..ab84856b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -115,6 +115,18 @@ jobs: extra: test - run-tests: pyversion: 311 + + test-python312: + executor: + name: python + pyversion: "3.12" + pgversion: "16.0" + steps: + - checkout + - install-dependencies: + extra: test + - run-tests: + pyversion: 312 - store_test_results: path: reports - run: @@ -197,6 +209,12 @@ workflows: only: /.*/ branches: only: /.*/ + - test-python312: + filters: + tags: + only: /.*/ + branches: + only: /.*/ - analysis: filters: tags: diff --git a/README.md b/README.md index b831ded1..745e36d6 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ | :package: | **PyPi** | [![PyPi](https://badge.fury.io/py/django-postgres-extra.svg)](https://pypi.python.org/pypi/django-postgres-extra) | | :four_leaf_clover: | **Code coverage** | [![Coverage Status](https://coveralls.io/repos/github/SectorLabs/django-postgres-extra/badge.svg?branch=coveralls)](https://coveralls.io/github/SectorLabs/django-postgres-extra?branch=master) | | | **Django Versions** | 2.0, 2.1, 2.2, 3.0, 3.1, 3.2, 4.0, 4.1, 4.2, 5.0, 5.1, 5.2 | -| | **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11 | +| | **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11, 3.12 | | | **Psycopg Versions** | 2, 3 | | :book: | **Documentation** | [Read The Docs](https://django-postgres-extra.readthedocs.io/en/master/) | | :warning: | **Upgrade** | [Upgrade from v1.x](https://django-postgres-extra.readthedocs.io/en/master/major_releases.html#new-features) diff --git a/tox.ini b/tox.ini index 94d7dd7c..dafb6b1a 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = {py36,py37}-dj{20,21,22,30,31,32}-psycopg{28,29} {py38,py39,py310}-dj{21,22,30,31,32,40}-psycopg{28,29} {py38,py39,py310,py311}-dj{41}-psycopg{28,29} - {py310,py311}-dj{42,50,51,52}-psycopg{28,29,31,32} + {py310,py311,py312}-dj{42,50,51,52}-psycopg{28,29,31,32} [testenv] deps = From ec542582e5fe40a7f8f8d59a4b585af51d7719d8 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 14:36:16 +0200 Subject: [PATCH 20/55] Run tests against Python 3.13 --- .circleci/config.yml | 22 ++++++++++++++++++++-- README.md | 2 +- tox.ini | 2 +- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ab84856b..87e0d6cd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -127,6 +127,18 @@ jobs: extra: test - run-tests: pyversion: 312 + + test-python313: + executor: + name: python + pyversion: "3.13" + pgversion: "16.0" + steps: + - checkout + - install-dependencies: + extra: test + - run-tests: + pyversion: 313 - store_test_results: path: reports - run: @@ -136,7 +148,7 @@ jobs: analysis: executor: name: python - pyversion: "3.10" + pyversion: "3.11" steps: - checkout - install-dependencies: @@ -148,7 +160,7 @@ jobs: publish: executor: name: python - pyversion: "3.10" + pyversion: "3.11" steps: - checkout - install-dependencies: @@ -215,6 +227,12 @@ workflows: only: /.*/ branches: only: /.*/ + - test-python313: + filters: + tags: + only: /.*/ + branches: + only: /.*/ - analysis: filters: tags: diff --git a/README.md b/README.md index 745e36d6..eeab68d8 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ | :package: | **PyPi** | [![PyPi](https://badge.fury.io/py/django-postgres-extra.svg)](https://pypi.python.org/pypi/django-postgres-extra) | | :four_leaf_clover: | **Code coverage** | [![Coverage Status](https://coveralls.io/repos/github/SectorLabs/django-postgres-extra/badge.svg?branch=coveralls)](https://coveralls.io/github/SectorLabs/django-postgres-extra?branch=master) | | | **Django Versions** | 2.0, 2.1, 2.2, 3.0, 3.1, 3.2, 4.0, 4.1, 4.2, 5.0, 5.1, 5.2 | -| | **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11, 3.12 | +| | **Python Versions** | 3.6, 3.7, 3.8, 3.9, 3.10, 3.11, 3.12, 3.13 | | | **Psycopg Versions** | 2, 3 | | :book: | **Documentation** | [Read The Docs](https://django-postgres-extra.readthedocs.io/en/master/) | | :warning: | **Upgrade** | [Upgrade from v1.x](https://django-postgres-extra.readthedocs.io/en/master/major_releases.html#new-features) diff --git a/tox.ini b/tox.ini index dafb6b1a..963f9d31 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = {py36,py37}-dj{20,21,22,30,31,32}-psycopg{28,29} {py38,py39,py310}-dj{21,22,30,31,32,40}-psycopg{28,29} {py38,py39,py310,py311}-dj{41}-psycopg{28,29} - {py310,py311,py312}-dj{42,50,51,52}-psycopg{28,29,31,32} + {py310,py311,py312,py313}-dj{42,50,51,52}-psycopg{28,29,31,32} [testenv] deps = From b0abc5c2427459f55231fc8ee9dfeb47c8aa8b2d Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 14:53:01 +0200 Subject: [PATCH 21/55] Pick correct psycopg 3.x version based on Python version --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b0076fa9..7ee873a3 100644 --- a/setup.py +++ b/setup.py @@ -75,7 +75,9 @@ def run(self): ':python_version <= "3.6"': ["dataclasses"], "docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"], "test": [ - "psycopg~=3.2", + "psycopg~=3.2; python_version >= '3.8'", + "psycopg~=3.1; python_version >= '3.7'", + "psycopg~=3.0; python_version <= '3.6'", "dj-database-url==0.5.0", "pytest==6.2.5", "pytest-benchmark==3.4.1", From 9052440c3f056372e2022a8cc893178bb1c3cd0a Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 14:58:23 +0200 Subject: [PATCH 22/55] Make sure we don't end up with `psycopg2` and `pyscopg` in tox envs --- requirements-all.txt | 2 +- setup.py | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/requirements-all.txt b/requirements-all.txt index d4ca40f0..8b6a1b6c 100644 --- a/requirements-all.txt +++ b/requirements-all.txt @@ -1,4 +1,4 @@ -e . --e .[test] +-e .[local] -e .[analysis] -e .[docs] diff --git a/setup.py b/setup.py index 7ee873a3..8137acf6 100644 --- a/setup.py +++ b/setup.py @@ -75,9 +75,6 @@ def run(self): ':python_version <= "3.6"': ["dataclasses"], "docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"], "test": [ - "psycopg~=3.2; python_version >= '3.8'", - "psycopg~=3.1; python_version >= '3.7'", - "psycopg~=3.0; python_version <= '3.6'", "dj-database-url==0.5.0", "pytest==6.2.5", "pytest-benchmark==3.4.1", @@ -90,6 +87,13 @@ def run(self): "coveralls==3.3.0", "snapshottest==0.6.0", ], + "local": [ + "Django~=4.2; python_version < '3.10'", + "Django~=5.2; python_version >= '3.10'", + "psycopg~=3.2; python_version >= '3.8'", + "psycopg~=3.1; python_version >= '3.7'", + "psycopg~=3.0; python_version <= '3.6'", + ], "analysis": [ "black==22.3.0", "flake8==4.0.1", From b5942fc315bd711d261cf6ff294f77206ebe2d01 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:21:33 +0200 Subject: [PATCH 23/55] Switch back to installing `psycopg2` for `test` You can use `pyscopg3` while having `psycopg2` installed, but not the other way around. Django will use `psycopg3` when available, otherwise `psycopg2`. --- setup.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/setup.py b/setup.py index 8137acf6..ce5cb366 100644 --- a/setup.py +++ b/setup.py @@ -75,6 +75,7 @@ def run(self): ':python_version <= "3.6"': ["dataclasses"], "docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"], "test": [ + "psycopg2==2.9.10", "dj-database-url==0.5.0", "pytest==6.2.5", "pytest-benchmark==3.4.1", @@ -87,13 +88,6 @@ def run(self): "coveralls==3.3.0", "snapshottest==0.6.0", ], - "local": [ - "Django~=4.2; python_version < '3.10'", - "Django~=5.2; python_version >= '3.10'", - "psycopg~=3.2; python_version >= '3.8'", - "psycopg~=3.1; python_version >= '3.7'", - "psycopg~=3.0; python_version <= '3.6'", - ], "analysis": [ "black==22.3.0", "flake8==4.0.1", From 6be7afc5dbebb2afb3643e5af8de2221dd22b647 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:36:33 +0200 Subject: [PATCH 24/55] Make psycopg2 version dependent on Python version --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ce5cb366..f687529c 100644 --- a/setup.py +++ b/setup.py @@ -75,7 +75,9 @@ def run(self): ':python_version <= "3.6"': ["dataclasses"], "docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"], "test": [ - "psycopg2==2.9.10", + "psycopg2==2.9.10; python_version >= '3.8'", + "psycopg2==2.9.9; python_version >= '3.7'", + "psycopg2==2.9.8; python_version >= '3.6'", "dj-database-url==0.5.0", "pytest==6.2.5", "pytest-benchmark==3.4.1", From 5a5931cdba190a995d91677d55490d9f5212fca8 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:36:42 +0200 Subject: [PATCH 25/55] Upgrade Sphinx and related libraries to latest --- setup.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f687529c..30cc5557 100644 --- a/setup.py +++ b/setup.py @@ -73,7 +73,12 @@ def run(self): ], extras_require={ ':python_version <= "3.6"': ["dataclasses"], - "docs": ["Sphinx==2.2.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18"], + "docs": [ + "Sphinx==8.2.3", + "sphinx-rtd-theme==3.0.2", + "docutils==0.21.2", + "Jinja2==3.1.6", + ], "test": [ "psycopg2==2.9.10; python_version >= '3.8'", "psycopg2==2.9.9; python_version >= '3.7'", From 07b095cc955b7e02b9c1bceaebcb00e06c91932c Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:36:54 +0200 Subject: [PATCH 26/55] Add missing cast for `extract_postgres_error` --- psqlextra/error.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psqlextra/error.py b/psqlextra/error.py index b3a5cf83..5be8c37c 100644 --- a/psqlextra/error.py +++ b/psqlextra/error.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional, Type, Union +from typing import TYPE_CHECKING, Optional, Type, Union, cast from django import db @@ -38,7 +38,7 @@ def extract_postgres_error( ): return None - return error.__cause__ + return cast(Union["_Psycopg2Error", "_Psycopg3Error"], error.__cause__) def extract_postgres_error_code(error: db.Error) -> Optional[str]: From 3d98f8191e77c82416cb187e05ffd1ccdebb87f8 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:38:46 +0200 Subject: [PATCH 27/55] Upgrade Debian dist to `bullseye` for Python 3.12 and 3.13 --- .circleci/config.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 87e0d6cd..87488f12 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,8 +8,11 @@ executors: pgversion: type: string default: "16.0" + debiandist: + type: string + default: "buster" docker: - - image: python:<< parameters.pyversion >>-buster + - image: python:<< parameters.pyversion >>-<< parameters.debiandist >> - image: postgres:<< parameters.pgversion >> environment: POSTGRES_DB: 'psqlextra' @@ -121,6 +124,7 @@ jobs: name: python pyversion: "3.12" pgversion: "16.0" + debiandist: "bullseye" steps: - checkout - install-dependencies: @@ -133,6 +137,7 @@ jobs: name: python pyversion: "3.13" pgversion: "16.0" + debiandist: "bullseye" steps: - checkout - install-dependencies: From 3cbb8aa30eaf34977b0e3005aa5a5af71c9e035f Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:42:58 +0200 Subject: [PATCH 28/55] Fix conflicting psycopg2 dependencies in older Python versions --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 30cc5557..8aa2ccb9 100644 --- a/setup.py +++ b/setup.py @@ -81,8 +81,8 @@ def run(self): ], "test": [ "psycopg2==2.9.10; python_version >= '3.8'", - "psycopg2==2.9.9; python_version >= '3.7'", - "psycopg2==2.9.8; python_version >= '3.6'", + "psycopg2==2.9.9; python_version >= '3.7' and python_version < '3.8'", + "psycopg2==2.9.8; python_version >= '3.6' and python_version < '3.7'", "dj-database-url==0.5.0", "pytest==6.2.5", "pytest-benchmark==3.4.1", From b1305a932b5d3cf3d771627384d42061a6a9e7b2 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:47:29 +0200 Subject: [PATCH 29/55] Make Debian package names work for both buster & bullseye --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 87488f12..6e3b4e81 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,7 +28,7 @@ commands: steps: - run: name: Install packages - command: apt-get update && apt-get install -y --no-install-recommends postgresql-client-11 libpq-dev build-essential git + command: apt-get update && apt-get install -y --no-install-recommends postgresql-client libpq-dev build-essential git - run: name: Install Python packages From 35ed1d2b12716fb9a14cf1d025383b225dd94382 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 8 Jun 2025 20:51:23 +0200 Subject: [PATCH 30/55] Adjust dependencies to work with Python 3.6 all the way to Python 3.13 --- .circleci/config.yml | 30 +-- .gitignore | 1 + README.md | 16 +- .../migrations/patched_autodetector.py | 4 +- psqlextra/backend/schema.py | 6 +- psqlextra/models/view.py | 4 +- psqlextra/partitioning/plan.py | 2 +- psqlextra/query.py | 6 +- psqlextra/settings.py | 3 +- pyproject.toml | 60 ++++- requirements-all.txt | 4 +- requirements-test.txt | 3 + settings.py | 20 +- setup.cfg | 1 - setup.py | 217 +++++------------- ...ent_command_partition_auto_confirm[y].json | 1 + ...t_command_partition_auto_confirm[yes].json | 1 + ...mmand_partition_confirm_no[capital_n].json | 1 + ...mand_partition_confirm_no[capital_no].json | 1 + ...ement_command_partition_confirm_no[n].json | 1 + ...ment_command_partition_confirm_no[no].json | 1 + ...ommand_partition_confirm_no[title_no].json | 1 + ...mand_partition_confirm_yes[capital_y].json | 1 + ...nd_partition_confirm_yes[capital_yes].json | 1 + ...ment_command_partition_confirm_yes[y].json | 1 + ...nt_command_partition_confirm_yes[yes].json | 1 + ...nagement_command_partition_dry_run[d].json | 1 + ...gement_command_partition_dry_run[dry].json | 1 + tests/snapshots/__init__.py | 0 .../snap_test_management_command_partition.py | 34 --- tests/test_introspect.py | 106 ++++++--- tests/test_make_migrations.py | 4 +- tests/test_management_command_partition.py | 32 ++- tests/test_manager.py | 6 +- tests/test_on_conflict.py | 8 +- tox.ini | 3 +- 36 files changed, 293 insertions(+), 290 deletions(-) create mode 100644 requirements-test.txt create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[y].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[yes].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_n].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_no].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[n].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[no].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[title_no].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_y].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_yes].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[y].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[yes].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[d].json create mode 100644 tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[dry].json delete mode 100644 tests/snapshots/__init__.py delete mode 100644 tests/snapshots/snap_test_management_command_partition.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 6e3b4e81..a00cc61e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -55,7 +55,7 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test - run-tests: pyversion: 36 @@ -67,7 +67,7 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test - run-tests: pyversion: 37 @@ -79,7 +79,7 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test - run-tests: pyversion: 38 @@ -91,7 +91,7 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test - run-tests: pyversion: 39 @@ -103,7 +103,7 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test - run-tests: pyversion: 310 @@ -115,9 +115,14 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test, test-report - run-tests: pyversion: 311 + - store_test_results: + path: reports + - run: + name: Upload coverage report + command: coveralls test-python312: executor: @@ -128,7 +133,7 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test - run-tests: pyversion: 312 @@ -141,14 +146,9 @@ jobs: steps: - checkout - install-dependencies: - extra: test + extra: dev, test - run-tests: pyversion: 313 - - store_test_results: - path: reports - - run: - name: Upload coverage report - command: coveralls analysis: executor: @@ -157,10 +157,10 @@ jobs: steps: - checkout - install-dependencies: - extra: analysis, test + extra: dev, analysis, test - run: name: Verify - command: python setup.py verify + command: poe verify publish: executor: diff --git a/.gitignore b/.gitignore index 97ebaa67..63d6378d 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ reports/ *.egg-info/ pip-wheel-metadata/ dist/ +build/ # Ignore stupid .DS_Store .DS_Store diff --git a/README.md b/README.md index eeab68d8..a26731de 100644 --- a/README.md +++ b/README.md @@ -59,9 +59,11 @@ With seamless we mean that any features we add will work truly seamlessly. You s ## Working with the code ### Prerequisites -* PostgreSQL 10 or newer. -* Django 2.0 or newer (including 3.x, 4.x). -* Python 3.6 or newer. +* PostgreSQL 14 or newer. +* Django 5.x or newer. +* Python 3.11 or newer. + +These are just for local development. CI for code analysis etc runs against these. Tests will pass on all Python, Django and PostgreSQL versions documented. Linting, formatting and type-checking the code might not work on other Python and/or Django versions. ### Getting started @@ -86,16 +88,16 @@ With seamless we mean that any features we add will work truly seamlessly. You s 4. Install the development/test dependencies: - λ pip install .[test] .[analysis] + λ pip install -r requirements-test.txt 5. Run the tests: - λ tox + λ poe test 6. Run the benchmarks: - λ py.test -c pytest-benchmark.ini + λ poe benchmark 7. Auto-format code, sort imports and auto-fix linting errors: - λ python setup.py fix + λ poe fix diff --git a/psqlextra/backend/migrations/patched_autodetector.py b/psqlextra/backend/migrations/patched_autodetector.py index e5ba8938..07f9e528 100644 --- a/psqlextra/backend/migrations/patched_autodetector.py +++ b/psqlextra/backend/migrations/patched_autodetector.py @@ -37,8 +37,8 @@ class AddOperationHandler: """Handler for when operations are being added to a new migration. - This is where we intercept operations such as - :see:CreateModel to replace it with our own. + This is where we intercept operations such as :see:CreateModel to + replace it with our own. """ def __init__(self, autodetector, app_label, args, kwargs): diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py index c7253549..22acc075 100644 --- a/psqlextra/backend/schema.py +++ b/psqlextra/backend/schema.py @@ -558,9 +558,9 @@ def replace_materialized_view_model(self, model: Type[Model]) -> None: This is used to alter the backing query of a materialized view. - Replacing a materialized view is a lot trickier than a normal view. - For normal views we can use `CREATE OR REPLACE VIEW`, but for - materialized views, we have to create the new view, copy all + Replacing a materialized view is a lot trickier than a normal + view. For normal views we can use `CREATE OR REPLACE VIEW`, but + for materialized views, we have to create the new view, copy all indexes and constraints and drop the old one. This operation is atomic as it runs in a transaction. diff --git a/psqlextra/models/view.py b/psqlextra/models/view.py index b19f88c8..d24ed5a0 100644 --- a/psqlextra/models/view.py +++ b/psqlextra/models/view.py @@ -54,8 +54,8 @@ def _view_query_as_sql_with_params( When copying the meta options from the model, we convert any from the above to a raw SQL query with bind parameters. We do - this is because it is what the SQL driver understands and - we can easily serialize it into a migration. + this is because it is what the SQL driver understands and we can + easily serialize it into a migration. """ # might be a callable to support delayed imports diff --git a/psqlextra/partitioning/plan.py b/psqlextra/partitioning/plan.py index 3fcac44d..301b4241 100644 --- a/psqlextra/partitioning/plan.py +++ b/psqlextra/partitioning/plan.py @@ -54,7 +54,7 @@ def apply(self, using: Optional[str]) -> None: def print(self) -> None: """Prints this model plan to the terminal in a readable format.""" - print(f"{self.config.model.__name__}:") + print(f"{self.config.model.__name__}: ") for partition in self.deletions: print(" - %s" % partition.name()) diff --git a/psqlextra/query.py b/psqlextra/query.py index 5dd1cdb3..ca1d2226 100644 --- a/psqlextra/query.py +++ b/psqlextra/query.py @@ -73,9 +73,9 @@ def annotate(self, **annotations) -> "Self": # type: ignore[valid-type, overrid name of an existing field on the model as the alias name. This version of the function does allow that. - This is done by temporarily renaming the fields in order to avoid the - check for conflicts that the base class does. - We rename all fields instead of the ones that already exist because + This is done by temporarily renaming the fields in order to + avoid the check for conflicts that the base class does. We + rename all fields instead of the ones that already exist because the annotations are stored in an OrderedDict. Renaming only the conflicts will mess up the order. """ diff --git a/psqlextra/settings.py b/psqlextra/settings.py index 6f75c779..b6061766 100644 --- a/psqlextra/settings.py +++ b/psqlextra/settings.py @@ -16,7 +16,8 @@ def postgres_set_local( The effect is undone when the context manager exits. - See https://www.postgresql.org/docs/current/runtime-config-client.html + See + https://www.postgresql.org/docs/current/runtime-config-client.html for an overview of all available options. """ diff --git a/pyproject.toml b/pyproject.toml index fb35b3b4..cb27ce10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ exclude = ''' | .env | env | venv - | tests/snapshots + | tests/__snapshots__ )/ ) ''' @@ -25,3 +25,61 @@ ignore_missing_imports = true [tool.django-stubs] django_settings_module = "settings" + +[tool.poe.tasks] +_autoflake = "python3 -m autoflake --remove-all -i -r setup.py psqlextra tests" +_autopep8 = "autopep8 -i -r setup.py psqlextra tests" +_isort_setup_py = "isort setup.py" +_isort_psqlextra = "isort psqlextra" +_isort_tests = "isort tests" +_isort_verify_setup_py = "isort -c setup.py" +_isort_verify_psqlextra = "isort -c psqlextra" +_isort_verify_tests = "isort -c tests" + +[tool.poe.tasks.lint] +cmd = "python3 -m flake8 --builtin=__version__ setup.py psqlextra tests" +help = "Lints all the code." + +[tool.poe.tasks.lint_fix] +sequence = ["_autoflake", "_autopep8"] +help = "Auto-fixes linter errors." + +[tool.poe.tasks.lint_types] +cmd = "mypy --package psqlextra --pretty --show-error-codes" +help = "Type-checks the code." + +[tool.poe.tasks.format] +cmd = "black setup.py psqlextra tests" +help = "Auto-formats the code." + +[tool.poe.tasks.format_verify] +cmd = "black --check setup.py psqlextra tests" +help = "Verifies that the code was formatted properly." + +[tool.poe.tasks.format_docstrings] +cmd = "docformatter -r -i ." +help = "Auto-formats doc strings." + +[tool.poe.tasks.format_docstrings_verify] +cmd = "docformatter -r -c ." +help = "Verifies all doc strings are properly formatted." + +[tool.poe.tasks.sort_imports] +sequence = ["_isort_setup_py", "_isort_psqlextra", "_isort_tests"] +help = "Auto-sorts the imports." + +[tool.poe.tasks.sort_imports_verify] +sequence = ["_isort_verify_setup_py", "_isort_verify_psqlextra", "_isort_verify_tests"] +help = "Verifies that the imports are properly sorted." + +[tool.poe.tasks.fix] +sequence = ["format", "format_docstrings", "sort_imports", "lint_fix", "lint", "lint_types"] +help = "Automatically format code and fix linting errors." + +[tool.poe.tasks.verify] +sequence = ["format_verify", "format_docstrings_verify", "sort_imports_verify", "lint", "lint_types"] +help = "Automatically format code and fix linting errors." + +[tool.poe.tasks.test] +cmd = "pytest --cov=psqlextra --cov-report=term --cov-report=xml:reports/xml --cov-report=html:reports/html --junitxml=reports/junit/tests.xml --reuse-db -vv" +help = "Runs all the tests." diff --git a/requirements-all.txt b/requirements-all.txt index 8b6a1b6c..c7ae18d2 100644 --- a/requirements-all.txt +++ b/requirements-all.txt @@ -1,4 +1,6 @@ -e . --e .[local] +-e .[dev] +-e .[test] +-e .[test-report] -e .[analysis] -e .[docs] diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 00000000..bd31d78f --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,3 @@ +-e . +-e .[dev] +-e .[test] diff --git a/settings.py b/settings.py index 7266ccb4..7ece1712 100644 --- a/settings.py +++ b/settings.py @@ -1,4 +1,20 @@ -import dj_database_url +import os + +from urllib.parse import urlparse + + +def _parse_db_url(/service/url: str): + parsed_url = urlparse(url) + + return { + 'ENGINE': 'django.db.backends.postgresql', + 'NAME': (parsed_url.path or '').strip('/') or "postgres", + 'HOST': parsed_url.hostname or None, + 'PORT': parsed_url.port or None, + 'USER': parsed_url.username or None, + 'PASSWORD': parsed_url.password or None, + } + DEBUG = True TEMPLATE_DEBUG = True @@ -8,7 +24,7 @@ TEST_RUNNER = 'django.test.runner.DiscoverRunner' DATABASES = { - 'default': dj_database_url.config(default='postgres:///psqlextra'), + 'default': _parse_db_url(https://github.com/os.environ.get('DATABASE_URL',%20'postgres:///psqlextra')), } DATABASES['default']['ENGINE'] = 'tests.psqlextra_test_backend' diff --git a/setup.cfg b/setup.cfg index 65713eaa..ecb84153 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,4 +9,3 @@ lines_between_types=1 include_trailing_comma=True known_third_party=pytest,freezegun float_to_top=true -skip_glob=tests/snapshots/*.py diff --git a/setup.py b/setup.py index 8aa2ccb9..047d3613 100644 --- a/setup.py +++ b/setup.py @@ -1,35 +1,9 @@ -import distutils.cmd import os -import subprocess from setuptools import find_packages, setup exec(open("psqlextra/_version.py").read()) - -class BaseCommand(distutils.cmd.Command): - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - -def create_command(text, commands): - """Creates a custom setup.py command.""" - - class CustomCommand(BaseCommand): - description = text - - def run(self): - for cmd in commands: - subprocess.check_call(cmd) - - return CustomCommand - - with open( os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8" ) as readme: @@ -38,7 +12,7 @@ def run(self): setup( name="django-postgres-extra", - version=__version__, + version=__version__, # noqa packages=find_packages(exclude=["tests"]), package_data={"psqlextra": ["py.typed"]}, include_package_data=True, @@ -49,7 +23,16 @@ def run(self): url="https://github.com/SectorLabs/django-postgres-extra", author="Sector Labs", author_email="open-source@sectorlabs.ro", - keywords=["django", "postgres", "extra", "hstore", "ltree"], + keywords=[ + "django", + "postgres", + "extra", + "hstore", + "upsert", + "partioning", + "materialized", + "view", + ], classifiers=[ "Environment :: Web Environment", "Framework :: Django", @@ -72,154 +55,62 @@ def run(self): "python-dateutil>=2.8.0,<=3.0.0", ], extras_require={ + # Python 3.6 - Python 3.13 ':python_version <= "3.6"': ["dataclasses"], - "docs": [ - "Sphinx==8.2.3", - "sphinx-rtd-theme==3.0.2", - "docutils==0.21.2", - "Jinja2==3.1.6", + "dev": [ + "poethepoet==0.34.0; python_version >= '3.9'", + "poethepoet==0.30.0; python_version >= '3.8' and python_version < '3.9'", + "poethepoet==0.19.0; python_version >= '3.7' and python_version < '3.8'", + "poethepoet==0.13.1; python_version >= '3.6' and python_version < '3.7'", ], "test": [ "psycopg2==2.9.10; python_version >= '3.8'", "psycopg2==2.9.9; python_version >= '3.7' and python_version < '3.8'", "psycopg2==2.9.8; python_version >= '3.6' and python_version < '3.7'", - "dj-database-url==0.5.0", - "pytest==6.2.5", - "pytest-benchmark==3.4.1", - "pytest-django==4.4.0", - "pytest-cov==3.0.0", - "pytest-lazy-fixture==0.6.3", - "pytest-freezegun==0.4.2", - "tox==3.24.4", - "freezegun==1.1.0", - "coveralls==3.3.0", - "snapshottest==0.6.0", + "types-psycopg2==2.9.21.20250516; python_version >= '3.9'", + "types-psycopg2==2.9.8; python_version >= '3.6' and python_version < '3.9'", + "pytest==8.4.0; python_version > '3.8'", + "pytest==7.0.1; python_version <= '3.8'", + "pytest-benchmark==5.1.0; python_version > '3.8'", + "pytest-benchmark==3.4.1; python_version <= '3.8'", + "pytest-django==4.11.1; python_version > '3.7'", + "pytest-django==4.5.2; python_version <= '3.7'", + "pytest-cov==6.1.1; python_version > '3.8'", + "pytest-cov==4.0.0; python_version <= '3.8'", + "coverage==7.8.2; python_version > '3.8'", + "coverage==7.6.1; python_version >= '3.8' and python_version <= '3.8'", + "coverage==6.2; python_version <= '3.7'", + "tox==4.26.0; python_version > '3.8'", + "tox==3.28.0; python_version <= '3.8'", + "freezegun==1.5.2; python_version > '3.7'", + "freezegun==1.2.2; python_version <= '3.7'", + "syrupy==4.9.1; python_version >= '3.9'", + "syrupy==2.3.1; python_version <= '3.8'", ], + # Python 3.11 assumed from below + "test-report": ["coveralls==4.0.1"], "analysis": [ "black==22.3.0", - "flake8==4.0.1", - "autoflake==1.4", - "autopep8==1.6.0", - "isort==5.10.0", - "docformatter==1.4", - "mypy==1.2.0; python_version > '3.6'", - "mypy==0.971; python_version <= '3.6'", - "django-stubs==4.2.7; python_version > '3.6'", - "django-stubs==1.9.0; python_version <= '3.6'", - "typing-extensions==4.5.0; python_version > '3.6'", - "typing-extensions==4.1.0; python_version <= '3.6'", - "types-dj-database-url==1.3.0.0", - "types-python-dateutil==2.8.19.12", + "flake8==7.2.0", + "autoflake==2.3.1", + "autopep8==2.3.2", + "isort==6.0.1", + "docformatter==1.7.7", + "mypy==1.16.0", + "django-stubs==4.2.7", + "typing-extensions==4.14.0", + "types-dj-database-url==1.3.0.4", + "types-python-dateutil==2.9.0.20250516", + ], + "docs": [ + "Sphinx==8.2.3", + "sphinx-rtd-theme==3.0.2", + "docutils==0.21.2", + "Jinja2==3.1.6", ], "publish": [ "build==0.7.0", "twine==3.7.1", ], }, - cmdclass={ - "lint": create_command( - "Lints the code", - [ - [ - "flake8", - "--builtin=__version__", - "setup.py", - "psqlextra", - "tests", - ] - ], - ), - "lint_fix": create_command( - "Lints the code", - [ - [ - "autoflake", - "--remove-all", - "-i", - "-r", - "setup.py", - "psqlextra", - "tests", - ], - ["autopep8", "-i", "-r", "setup.py", "psqlextra", "tests"], - ], - ), - "lint_types": create_command( - "Type-checks the code", - [ - [ - "mypy", - "--package", - "psqlextra", - "--pretty", - "--show-error-codes", - ], - ], - ), - "format": create_command( - "Formats the code", [["black", "setup.py", "psqlextra", "tests"]] - ), - "format_verify": create_command( - "Checks if the code is auto-formatted", - [["black", "--check", "setup.py", "psqlextra", "tests"]], - ), - "format_docstrings": create_command( - "Auto-formats doc strings", [["docformatter", "-r", "-i", "."]] - ), - "format_docstrings_verify": create_command( - "Verifies that doc strings are properly formatted", - [["docformatter", "-r", "-c", "."]], - ), - "sort_imports": create_command( - "Automatically sorts imports", - [ - ["isort", "setup.py"], - ["isort", "psqlextra"], - ["isort", "tests"], - ], - ), - "sort_imports_verify": create_command( - "Verifies all imports are properly sorted.", - [ - ["isort", "-c", "setup.py"], - ["isort", "-c", "psqlextra"], - ["isort", "-c", "tests"], - ], - ), - "fix": create_command( - "Automatically format code and fix linting errors", - [ - ["python", "setup.py", "format"], - ["python", "setup.py", "format_docstrings"], - ["python", "setup.py", "sort_imports"], - ["python", "setup.py", "lint_fix"], - ["python", "setup.py", "lint"], - ["python", "setup.py", "lint_types"], - ], - ), - "verify": create_command( - "Verifies whether the code is auto-formatted and has no linting errors", - [ - ["python", "setup.py", "format_verify"], - ["python", "setup.py", "format_docstrings_verify"], - ["python", "setup.py", "sort_imports_verify"], - ["python", "setup.py", "lint"], - ["python", "setup.py", "lint_types"], - ], - ), - "test": create_command( - "Runs all the tests", - [ - [ - "pytest", - "--cov=psqlextra", - "--cov-report=term", - "--cov-report=xml:reports/xml", - "--cov-report=html:reports/html", - "--junitxml=reports/junit/tests.xml", - "--reuse-db", - ] - ], - ), - }, ) diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[y].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[y].json new file mode 100644 index 00000000..664538ac --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[y].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nOperations applied.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[yes].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[yes].json new file mode 100644 index 00000000..664538ac --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_auto_confirm[yes].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nOperations applied.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_n].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_n].json new file mode 100644 index 00000000..f1c2aa68 --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_n].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_no].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_no].json new file mode 100644 index 00000000..f1c2aa68 --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[capital_no].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[n].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[n].json new file mode 100644 index 00000000..f1c2aa68 --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[n].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[no].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[no].json new file mode 100644 index 00000000..f1c2aa68 --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[no].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[title_no].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[title_no].json new file mode 100644 index 00000000..f1c2aa68 --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_no[title_no].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operation aborted.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_y].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_y].json new file mode 100644 index 00000000..530f6bdb --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_y].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_yes].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_yes].json new file mode 100644 index 00000000..530f6bdb --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[capital_yes].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[y].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[y].json new file mode 100644 index 00000000..530f6bdb --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[y].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[yes].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[yes].json new file mode 100644 index 00000000..530f6bdb --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_confirm_yes[yes].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\nDo you want to proceed? (y/N) Operations applied.\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[d].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[d].json new file mode 100644 index 00000000..6b67fa96 --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[d].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\n" diff --git a/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[dry].json b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[dry].json new file mode 100644 index 00000000..6b67fa96 --- /dev/null +++ b/tests/__snapshots__/test_management_command_partition/test_management_command_partition_dry_run[dry].json @@ -0,0 +1 @@ +"test: \n - tobedeleted\n + tobecreated\n\n1 partitions will be deleted\n1 partitions will be created\n" diff --git a/tests/snapshots/__init__.py b/tests/snapshots/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/snapshots/snap_test_management_command_partition.py b/tests/snapshots/snap_test_management_command_partition.py deleted file mode 100644 index 1cac2227..00000000 --- a/tests/snapshots/snap_test_management_command_partition.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -# snapshottest: v1 - https://goo.gl/zC4yUc -from __future__ import unicode_literals - -from snapshottest import GenericRepr, Snapshot - - -snapshots = Snapshot() - -snapshots['test_management_command_partition_auto_confirm[--yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')") - -snapshots['test_management_command_partition_auto_confirm[-y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')") - -snapshots['test_management_command_partition_confirm_no[NO] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')") - -snapshots['test_management_command_partition_confirm_no[N] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')") - -snapshots['test_management_command_partition_confirm_no[No] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')") - -snapshots['test_management_command_partition_confirm_no[n] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')") - -snapshots['test_management_command_partition_confirm_no[no] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')") - -snapshots['test_management_command_partition_confirm_yes[YES] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')") - -snapshots['test_management_command_partition_confirm_yes[Y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')") - -snapshots['test_management_command_partition_confirm_yes[y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')") - -snapshots['test_management_command_partition_confirm_yes[yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')") - -snapshots['test_management_command_partition_dry_run[--dry] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')") - -snapshots['test_management_command_partition_dry_run[-d] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')") diff --git a/tests/test_introspect.py b/tests/test_introspect.py index 5e5a9ffc..bf50d3f1 100644 --- a/tests/test_introspect.py +++ b/tests/test_introspect.py @@ -1,4 +1,5 @@ import django +import freezegun import pytest from django.contrib.postgres.fields import ArrayField @@ -51,13 +52,14 @@ def mocked_model_foreign_keys( @pytest.fixture -def mocked_model_varying_fields_instance(freezer, mocked_model_varying_fields): - return mocked_model_varying_fields.objects.create( - title="hello world", - updated_at=timezone.now(), - content={"a": 1}, - items=["a", "b"], - ) +def mocked_model_varying_fields_instance(mocked_model_varying_fields): + with freezegun.freeze_time("2020-1-1 12:00:00.0"): + return mocked_model_varying_fields.objects.create( + title="hello world", + updated_at=timezone.now(), + content={"a": 1}, + items=["a", "b"], + ) @pytest.fixture @@ -78,17 +80,22 @@ def models_from_cursor_wrapper_single(): reason=django_31_skip_reason, ) @pytest.mark.parametrize( - "models_from_cursor_wrapper", + "models_from_cursor_wrapper_name", [ - pytest.lazy_fixture("models_from_cursor_wrapper_multiple"), - pytest.lazy_fixture("models_from_cursor_wrapper_single"), + "models_from_cursor_wrapper_multiple", + "models_from_cursor_wrapper_single", ], ) def test_models_from_cursor_applies_converters( + request, mocked_model_varying_fields, mocked_model_varying_fields_instance, - models_from_cursor_wrapper, + models_from_cursor_wrapper_name, ): + models_from_cursor_wrapper = request.getfixturevalue( + models_from_cursor_wrapper_name + ) + with connection.cursor() as cursor: cursor.execute( *mocked_model_varying_fields.objects.all().query.sql_with_params() @@ -114,17 +121,22 @@ def test_models_from_cursor_applies_converters( reason=django_31_skip_reason, ) @pytest.mark.parametrize( - "models_from_cursor_wrapper", + "models_from_cursor_wrapper_name", [ - pytest.lazy_fixture("models_from_cursor_wrapper_multiple"), - pytest.lazy_fixture("models_from_cursor_wrapper_single"), + "models_from_cursor_wrapper_multiple", + "models_from_cursor_wrapper_single", ], ) def test_models_from_cursor_handles_field_order( + request, mocked_model_varying_fields, mocked_model_varying_fields_instance, - models_from_cursor_wrapper, + models_from_cursor_wrapper_name, ): + models_from_cursor_wrapper = request.getfixturevalue( + models_from_cursor_wrapper_name + ) + with connection.cursor() as cursor: cursor.execute( f'SELECT content, items, id, title, updated_at FROM "{mocked_model_varying_fields._meta.db_table}"', @@ -151,17 +163,22 @@ def test_models_from_cursor_handles_field_order( reason=django_31_skip_reason, ) @pytest.mark.parametrize( - "models_from_cursor_wrapper", + "models_from_cursor_wrapper_name", [ - pytest.lazy_fixture("models_from_cursor_wrapper_multiple"), - pytest.lazy_fixture("models_from_cursor_wrapper_single"), + "models_from_cursor_wrapper_multiple", + "models_from_cursor_wrapper_single", ], ) def test_models_from_cursor_handles_partial_fields( + request, mocked_model_varying_fields, mocked_model_varying_fields_instance, - models_from_cursor_wrapper, + models_from_cursor_wrapper_name, ): + models_from_cursor_wrapper = request.getfixturevalue( + models_from_cursor_wrapper_name + ) + with connection.cursor() as cursor: cursor.execute( f'SELECT id FROM "{mocked_model_varying_fields._meta.db_table}"', @@ -183,15 +200,19 @@ def test_models_from_cursor_handles_partial_fields( reason=django_31_skip_reason, ) @pytest.mark.parametrize( - "models_from_cursor_wrapper", + "models_from_cursor_wrapper_name", [ - pytest.lazy_fixture("models_from_cursor_wrapper_multiple"), - pytest.lazy_fixture("models_from_cursor_wrapper_single"), + "models_from_cursor_wrapper_multiple", + "models_from_cursor_wrapper_single", ], ) def test_models_from_cursor_handles_null( - mocked_model_varying_fields, models_from_cursor_wrapper + request, mocked_model_varying_fields, models_from_cursor_wrapper_name ): + models_from_cursor_wrapper = request.getfixturevalue( + models_from_cursor_wrapper_name + ) + instance = mocked_model_varying_fields.objects.create() with connection.cursor() as cursor: @@ -214,17 +235,22 @@ def test_models_from_cursor_handles_null( reason=django_31_skip_reason, ) @pytest.mark.parametrize( - "models_from_cursor_wrapper", + "models_from_cursor_wrapper_name", [ - pytest.lazy_fixture("models_from_cursor_wrapper_multiple"), - pytest.lazy_fixture("models_from_cursor_wrapper_single"), + "models_from_cursor_wrapper_multiple", + "models_from_cursor_wrapper_single", ], ) def test_models_from_cursor_foreign_key( + request, mocked_model_single_field, mocked_model_foreign_keys, - models_from_cursor_wrapper, + models_from_cursor_wrapper_name, ): + models_from_cursor_wrapper = request.getfixturevalue( + models_from_cursor_wrapper_name + ) + instance = mocked_model_foreign_keys.objects.create( varying_fields=None, single_field=mocked_model_single_field.objects.create(name="test"), @@ -254,18 +280,23 @@ def test_models_from_cursor_foreign_key( reason=django_31_skip_reason, ) @pytest.mark.parametrize( - "models_from_cursor_wrapper", + "models_from_cursor_wrapper_name", [ - pytest.lazy_fixture("models_from_cursor_wrapper_multiple"), - pytest.lazy_fixture("models_from_cursor_wrapper_single"), + "models_from_cursor_wrapper_multiple", + "models_from_cursor_wrapper_single", ], ) def test_models_from_cursor_related_fields( + request, mocked_model_varying_fields, mocked_model_single_field, mocked_model_foreign_keys, - models_from_cursor_wrapper, + models_from_cursor_wrapper_name, ): + models_from_cursor_wrapper = request.getfixturevalue( + models_from_cursor_wrapper_name + ) + instance = mocked_model_foreign_keys.objects.create( varying_fields=mocked_model_varying_fields.objects.create( title="test", updated_at=timezone.now() @@ -321,21 +352,26 @@ def test_models_from_cursor_related_fields( reason=django_31_skip_reason, ) @pytest.mark.parametrize( - "models_from_cursor_wrapper", + "models_from_cursor_wrapper_name", [ - pytest.lazy_fixture("models_from_cursor_wrapper_multiple"), - pytest.lazy_fixture("models_from_cursor_wrapper_single"), + "models_from_cursor_wrapper_multiple", + "models_from_cursor_wrapper_single", ], ) @pytest.mark.parametrize( "selected", [True, False], ids=["selected", "not_selected"] ) def test_models_from_cursor_related_fields_optional( + request, mocked_model_varying_fields, mocked_model_foreign_keys, - models_from_cursor_wrapper, + models_from_cursor_wrapper_name, selected, ): + models_from_cursor_wrapper = request.getfixturevalue( + models_from_cursor_wrapper_name + ) + instance = mocked_model_foreign_keys.objects.create( varying_fields=mocked_model_varying_fields.objects.create( title="test", updated_at=timezone.now() diff --git a/tests/test_make_migrations.py b/tests/test_make_migrations.py index 6f63a0d6..a843b6eb 100644 --- a/tests/test_make_migrations.py +++ b/tests/test_make_migrations.py @@ -208,7 +208,9 @@ def test_make_migration_field_operations_view_models( def test_autodetect_fk_issue(fake_app, method): """Test whether Django can perform ForeignKey optimization. - Fixes https://github.com/SectorLabs/django-postgres-extra/issues/123 for Django >= 2.2 + Fixes + https://github.com/SectorLabs/django-postgres-extra/issues/123 + for Django >= 2.2 """ meta_options = {"app_label": fake_app.name} partitioning_options = {"method": method, "key": "artist_id"} diff --git a/tests/test_management_command_partition.py b/tests/test_management_command_partition.py index 6e305fb9..c621cf15 100644 --- a/tests/test_management_command_partition.py +++ b/tests/test_management_command_partition.py @@ -6,6 +6,7 @@ from django.db import models from django.test import override_settings +from syrupy.extensions.json import JSONSnapshotExtension from psqlextra.backend.introspection import ( PostgresIntrospectedPartitionTable, @@ -20,6 +21,11 @@ from .fake_model import define_fake_partitioned_model +@pytest.fixture +def snapshot(snapshot): + return snapshot.use_extension(JSONSnapshotExtension) + + @pytest.fixture def fake_strategy(): strategy = create_autospec(PostgresPartitioningStrategy) @@ -88,12 +94,12 @@ def _run(*args): command.add_arguments(parser) command.handle(**vars(parser.parse_args(args))) - return capsys.readouterr() + return capsys.readouterr().out return _run -@pytest.mark.parametrize("args", ["-d", "--dry"]) +@pytest.mark.parametrize("args", ["-d", "--dry"], ids=["d", "dry"]) def test_management_command_partition_dry_run( args, snapshot, run, fake_model, fake_partitioning_manager ): @@ -101,7 +107,7 @@ def test_management_command_partition_dry_run( create/delete partitions.""" config = fake_partitioning_manager.find_config_for_model(fake_model) - snapshot.assert_match(run(args)) + assert run(args) == snapshot() config.strategy.createable_partition.create.assert_not_called() config.strategy.createable_partition.delete.assert_not_called() @@ -109,7 +115,7 @@ def test_management_command_partition_dry_run( config.strategy.deleteable_partition.delete.assert_not_called() -@pytest.mark.parametrize("args", ["-y", "--yes"]) +@pytest.mark.parametrize("args", ["-y", "--yes"], ids=["y", "yes"]) def test_management_command_partition_auto_confirm( args, snapshot, run, fake_model, fake_partitioning_manager ): @@ -117,7 +123,7 @@ def test_management_command_partition_auto_confirm( creating/deleting partitions.""" config = fake_partitioning_manager.find_config_for_model(fake_model) - snapshot.assert_match(run(args)) + assert run(args) == snapshot config.strategy.createable_partition.create.assert_called_once() config.strategy.createable_partition.delete.assert_not_called() @@ -125,7 +131,11 @@ def test_management_command_partition_auto_confirm( config.strategy.deleteable_partition.delete.assert_called_once() -@pytest.mark.parametrize("answer", ["y", "Y", "yes", "YES"]) +@pytest.mark.parametrize( + "answer", + ["y", "Y", "yes", "YES"], + ids=["y", "capital_y", "yes", "capital_yes"], +) def test_management_command_partition_confirm_yes( answer, monkeypatch, snapshot, run, fake_model, fake_partitioning_manager ): @@ -135,7 +145,7 @@ def test_management_command_partition_confirm_yes( config = fake_partitioning_manager.find_config_for_model(fake_model) monkeypatch.setattr("builtins.input", lambda _: answer) - snapshot.assert_match(run()) + assert run() == snapshot config.strategy.createable_partition.create.assert_called_once() config.strategy.createable_partition.delete.assert_not_called() @@ -143,7 +153,11 @@ def test_management_command_partition_confirm_yes( config.strategy.deleteable_partition.delete.assert_called_once() -@pytest.mark.parametrize("answer", ["n", "N", "no", "No", "NO"]) +@pytest.mark.parametrize( + "answer", + ["n", "N", "no", "No", "NO"], + ids=["n", "capital_n", "no", "title_no", "capital_no"], +) def test_management_command_partition_confirm_no( answer, monkeypatch, snapshot, run, fake_model, fake_partitioning_manager ): @@ -153,7 +167,7 @@ def test_management_command_partition_confirm_no( config = fake_partitioning_manager.find_config_for_model(fake_model) monkeypatch.setattr("builtins.input", lambda _: answer) - snapshot.assert_match(run()) + assert run() == snapshot config.strategy.createable_partition.create.assert_not_called() config.strategy.createable_partition.delete.assert_not_called() diff --git a/tests/test_manager.py b/tests/test_manager.py index 0fbe2a52..f68dd20a 100644 --- a/tests/test_manager.py +++ b/tests/test_manager.py @@ -34,10 +34,8 @@ def test_manager_backend_set(databases): def test_manager_backend_not_set(): - """Tests whether creating a new instance of - :see:PostgresManager fails if no database - has `psqlextra.backend` configured - as its ENGINE.""" + """Tests whether creating a new instance of :see:PostgresManager fails if + no database has `psqlextra.backend` configured as its ENGINE.""" with override_settings( DATABASES={"default": {"ENGINE": "django.db.backends.postgresql"}} diff --git a/tests/test_on_conflict.py b/tests/test_on_conflict.py index 7f3f5ab8..b7cf0024 100644 --- a/tests/test_on_conflict.py +++ b/tests/test_on_conflict.py @@ -179,11 +179,11 @@ def test_on_conflict_outdated_model(conflict_action): """Tests whether insert properly handles fields that are in the database but not on the model. - This happens if somebody manually modified the database - to add a column that is not present in the model. + This happens if somebody manually modified the database to add a + column that is not present in the model. - This should be handled properly by ignoring the column - returned by the database. + This should be handled properly by ignoring the column returned by + the database. """ model = get_fake_model( diff --git a/tox.ini b/tox.ini index 963f9d31..697d1c44 100644 --- a/tox.ini +++ b/tox.ini @@ -23,8 +23,9 @@ deps = psycopg29: psycopg2[binary]~=2.9 psycopg31: psycopg[binary]~=3.1 psycopg32: psycopg[binary]~=3.2 + .[dev] .[test] setenv = DJANGO_SETTINGS_MODULE=settings passenv = DATABASE_URL -commands = python setup.py test +commands = poe test From 5b8ef9ee52fd12ef63a18fd8c1550f9611d57257 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 08:38:17 +0200 Subject: [PATCH 31/55] Warn in the documentation that some features are native in newer Django versions --- README.md | 35 ++++++++++++++++++++--------------- docs/source/hstore.rst | 8 ++++++++ docs/source/index.rst | 37 ++++++++++++++++++++++--------------- docs/source/indexes.rst | 15 +++++++++++++-- 4 files changed, 63 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index a26731de..74803270 100644 --- a/README.md +++ b/README.md @@ -31,30 +31,35 @@ With seamless we mean that any features we add will work truly seamlessly. You s [See the full list](http://django-postgres-extra.readthedocs.io/#features) -* **Native upserts** +* **Conflict handling (atomic upsert)** - * Single query - * Concurrency safe - * With bulk support (single query) + Add support for PostgreSQL's `ON CONFLICT` syntax for inserts. Supports `DO UPDATE` and `DO NOTHING`. Single statement, atomic and concurrency safe upserts. Supports conditional updates as well. -* **Extended support for HStoreField** +* **Table partitioning** - * Unique constraints - * Null constraints - * Select individual keys using ``.values()`` or ``.values_list()`` + Add support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. -* **PostgreSQL 11.x declarative table partitioning** +* **Locking models & tables** - * Supports both range and list partitioning + Support for explicit table-level locks. -* **Faster deletes** +* **Creating/dropping schemas** - * Truncate tables (with cascade) + Support for managing PostgreSQL schemas. -* **Indexes** +* **Truncating tables** - * Conditional unique index. - * Case sensitive unique index. + Support for ``TRUNCATE TABLE`` statements (including cascading). + +For Django 3.1 and older: + +* **Conditional unique index** +* **Case insensitive index** + +For Django 2.2 and older: + +* **Unique index** +* **HStore unique and required constraints on specific HStore keys** ## Working with the code ### Prerequisites diff --git a/docs/source/hstore.rst b/docs/source/hstore.rst index 6dc22304..401b40ee 100644 --- a/docs/source/hstore.rst +++ b/docs/source/hstore.rst @@ -15,6 +15,10 @@ Constraints Unique ****** +.. warning:: + + In Django 2.2 or newer, you might want to use :class:`~django.db.models.UniqueConstraint` instead. + The ``uniqueness`` constraint can be added on one or more `hstore`_ keys, similar to how a ``UNIQUE`` constraint can be added to a column. Setting this option causes unique indexes to be created on the specified keys. You can specify a ``list`` of strings to specify the keys that must be marked as unique: @@ -44,6 +48,10 @@ In the example above, ``key1`` and ``key2`` must unique **together**, and ``key3 Required ******** +.. warning:: + + In Django 2.2 or newer, you might want to use :class:`~django.db.models.CheckConstraint` instead. + The ``required`` option can be added to ensure that the specified `hstore`_ keys are set for every row. This is similar to a ``NOT NULL`` constraint on a column. You can specify a list of `hstore`_ keys that are required: .. code-block:: python diff --git a/docs/source/index.rst b/docs/source/index.rst index 1959016e..2ac00edb 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -13,36 +13,43 @@ Explore the documentation to learn about all features: * :ref:`Conflict handling ` - Adds support for PostgreSQL's ``ON CONFLICT`` syntax for inserts. Supports for ``DO UPDATE`` and ``DO NOTHING``. In other words; single statement, atomic, concurrency safe upserts. + Adds support for PostgreSQL's ``ON CONFLICT`` syntax for inserts. Supports for ``DO UPDATE`` and ``DO NOTHING``. Single statement, atomic, concurrency safe upserts. Supports conditional updates as well. -* :ref:`HStore ` - - Built on top Django's built-in support for `hstore`_ fields. Adds support for indices on keys and unique/required constraints. All of these features integrate well with Django's migrations sytem. +* :ref:`Table partitioning ` -* :ref:`Partial unique index ` + Add support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. - Partial (unique) index that only applies when a certain condition is true. - -* :ref:`Case insensitive index ` +* :ref:`Locking models & tables ` - Case insensitive index, allows searching a column and ignoring the casing. + Support for explicit table-level locks. -* :ref:`Table partitioning ` +* :ref:`Creating/dropping schemas ` - Adds support for PostgreSQL 11.x declarative table partitioning. + Support for managing Postgres schemas. * :ref:`Truncating tables ` Support for ``TRUNCATE TABLE`` statements (including cascading). -* :ref:`Locking models & tables ` +For Django 3.1 and older: - Support for explicit table-level locks. +* :ref:`Partial unique index ` + Partial (unique) index that only applies when a certain condition is true. -* :ref:`Creating/dropping schemas ` +* :ref:`Case insensitive index ` - Support for managing Postgres schemas. + Case insensitive index, allows searching a column and ignoring the casing. + +For Django 2.2 and older: + +* :ref:`Unique index ` + + Unique indices that can span more than one field. + +* :ref:`HStore key unique & required constraint ` + + Add unique and required constraints in specific hstore keys. .. toctree:: diff --git a/docs/source/indexes.rst b/docs/source/indexes.rst index 622d1ce6..236b6976 100644 --- a/docs/source/indexes.rst +++ b/docs/source/indexes.rst @@ -7,6 +7,11 @@ Indexes Unique Index ----------------------------- + +.. warning:: + + In Django 2.2 or newer, you might want to use :class:`~django.db.models.UniqueConstraint` instead. + The :class:`~psqlextra.indexes.UniqueIndex` lets you create a unique index. Normally Django only allows you to create unique indexes by specifying ``unique=True`` on the model field. Although it can be used on any Django model, it is most useful on views and materialized views where ``unique=True`` does not work. @@ -32,13 +37,14 @@ Although it can be used on any Django model, it is most useful on views and mate Conditional Unique Index ------------------------ -The :class:`~psqlextra.indexes.ConditionalUniqueIndex` lets you create partial unique indexes in case you ever need :attr:`~django:django.db.models.Options.unique_together` constraints -on nullable columns. .. warning:: In Django 3.1 or newer, you might want to use :attr:`~django.db.models.indexes.condition` instead. +The :class:`~psqlextra.indexes.ConditionalUniqueIndex` lets you create partial unique indexes in case you ever need :attr:`~django:django.db.models.Options.unique_together` constraints +on nullable columns. + Before: .. code-block:: python @@ -83,6 +89,11 @@ After: Case Insensitive Unique Index ----------------------------- + +.. warning:: + + In Django 3.1 or newer, you might want to use :attr:`~django.db.models.indexes.condition` instead. + The :class:`~psqlextra.indexes.CaseInsensitiveUniqueIndex` lets you create an index that ignores the casing for the specified field(s). This makes the field(s) behave more like a text field in MySQL. From 939ca84c9b8f40ea76495b7b1d94b075873f590e Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 08:41:02 +0200 Subject: [PATCH 32/55] Don't mark partitioning as a "new" feature in docs --- docs/source/table_partitioning.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst index 1bb5ba6f..1150dc30 100644 --- a/docs/source/table_partitioning.rst +++ b/docs/source/table_partitioning.rst @@ -2,7 +2,7 @@ .. warning:: - Table partitioning is a relatively new and advanded PostgreSQL feature. It has plenty of ways to shoot yourself in the foot with. + Table partitioning is an advanded PostgreSQL feature. It has plenty of ways to shoot yourself in the foot with. We HIGHLY RECOMMEND you only use this feature if you're already deeply familiar with table partitioning and aware of its advantages and disadvantages. From 53fe410fbf926f45d2125710e56ca8a43e681d4b Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 09:22:56 +0200 Subject: [PATCH 33/55] Add support for creating materialized view without data Fixes #216 --- .../create_materialized_view_model.py | 10 +++++- psqlextra/backend/schema.py | 18 ++++++++-- tests/test_schema_editor_view.py | 33 ++++++++++++++++++- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/psqlextra/backend/migrations/operations/create_materialized_view_model.py b/psqlextra/backend/migrations/operations/create_materialized_view_model.py index ce1028d6..40ca3339 100644 --- a/psqlextra/backend/migrations/operations/create_materialized_view_model.py +++ b/psqlextra/backend/migrations/operations/create_materialized_view_model.py @@ -23,10 +23,13 @@ def __init__( view_options={}, bases=None, managers=None, + *, + with_data: bool = True, ): super().__init__(name, fields, options, bases, managers) self.view_options = view_options or {} + self.with_data = with_data def state_forwards(self, app_label, state): state.add_model( @@ -46,7 +49,9 @@ def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): - schema_editor.create_materialized_view_model(model) + schema_editor.create_materialized_view_model( + model, with_data=self.with_data + ) def database_backwards( self, app_label, schema_editor, from_state, to_state @@ -63,6 +68,9 @@ def deconstruct(self): if self.view_options: kwargs["view_options"] = self.view_options + if self.with_data is not False: + kwargs["with_data"] = self.with_data + return name, args, kwargs def describe(self): diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py index 22acc075..74b5c49b 100644 --- a/psqlextra/backend/schema.py +++ b/psqlextra/backend/schema.py @@ -61,9 +61,12 @@ class PostgresSchemaEditor(SchemaEditor): sql_create_view = "CREATE VIEW %s AS (%s)" sql_replace_view = "CREATE OR REPLACE VIEW %s AS (%s)" sql_drop_view = "DROP VIEW IF EXISTS %s" - sql_create_materialized_view = ( + sql_create_materialized_view_with_data = ( "CREATE MATERIALIZED VIEW %s AS (%s) WITH DATA" ) + sql_create_materialized_view_without_data = ( + "CREATE MATERIALIZED VIEW %s AS (%s) WITH NO DATA" + ) sql_drop_materialized_view = "DROP MATERIALIZED VIEW %s" sql_refresh_materialized_view = "REFRESH MATERIALIZED VIEW %s" sql_refresh_materialized_view_concurrently = ( @@ -548,10 +551,19 @@ def delete_view_model(self, model: Type[Model]) -> None: sql = self.sql_drop_view % self.quote_name(model._meta.db_table) self.execute(sql) - def create_materialized_view_model(self, model: Type[Model]) -> None: + def create_materialized_view_model( + self, model: Type[Model], *, with_data: bool = True + ) -> None: """Creates a new materialized view model.""" - self._create_view_model(self.sql_create_materialized_view, model) + if with_data: + self._create_view_model( + self.sql_create_materialized_view_with_data, model + ) + else: + self._create_view_model( + self.sql_create_materialized_view_without_data, model + ) def replace_materialized_view_model(self, model: Type[Model]) -> None: """Replaces a materialized view with a newer version. diff --git a/tests/test_schema_editor_view.py b/tests/test_schema_editor_view.py index f7bf0308..ff20ef6a 100644 --- a/tests/test_schema_editor_view.py +++ b/tests/test_schema_editor_view.py @@ -1,6 +1,9 @@ -from django.db import connection, models +import pytest + +from django.db import OperationalError, connection, models from psqlextra.backend.schema import PostgresSchemaEditor +from psqlextra.error import extract_postgres_error_code from . import db_introspection from .fake_model import ( @@ -103,6 +106,34 @@ def test_schema_editor_create_delete_materialized_view(): assert model._meta.db_table not in db_introspection.table_names(True) +@pytest.mark.django_db(transaction=True) +def test_schema_editor_create_materialized_view_without_data(): + underlying_model = get_fake_model({"name": models.TextField()}) + + model = define_fake_materialized_view_model( + {"name": models.TextField()}, + {"query": underlying_model.objects.filter(name="test1")}, + ) + + underlying_model.objects.create(name="test1") + underlying_model.objects.create(name="test2") + + schema_editor = PostgresSchemaEditor(connection) + schema_editor.create_materialized_view_model(model, with_data=False) + + with pytest.raises(OperationalError) as exc_info: + list(model.objects.all()) + + pg_error = extract_postgres_error_code(exc_info.value) + assert pg_error == "55000" # OBJECT_NOT_IN_PREREQUISITE_STATE + + model.refresh() + + objs = list(model.objects.all()) + assert len(objs) == 1 + assert objs[0].name == "test1" + + def test_schema_editor_replace_materialized_view(): """Tests whether creating a materialized view and then replacing it with another one (thus changing the backing query) works as expected.""" From 6e9d4cddfd2db2cc028d85ee1d4c519387e5862e Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 09:25:32 +0200 Subject: [PATCH 34/55] Fix some typos on feature documentation for table partitioning --- README.md | 5 +++-- docs/source/index.rst | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 74803270..56854677 100644 --- a/README.md +++ b/README.md @@ -33,11 +33,12 @@ With seamless we mean that any features we add will work truly seamlessly. You s * **Conflict handling (atomic upsert)** - Add support for PostgreSQL's `ON CONFLICT` syntax for inserts. Supports `DO UPDATE` and `DO NOTHING`. Single statement, atomic and concurrency safe upserts. Supports conditional updates as well. + Adds support for PostgreSQL's `ON CONFLICT` syntax for inserts. Supports `DO UPDATE` and `DO NOTHING`. Single statement, atomic and concurrency safe upserts. Supports conditional updates as well. * **Table partitioning** - Add support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. + Adds support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. + * **Locking models & tables** diff --git a/docs/source/index.rst b/docs/source/index.rst index 2ac00edb..d4248a39 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -17,7 +17,7 @@ Explore the documentation to learn about all features: * :ref:`Table partitioning ` - Add support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. + Adds support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. * :ref:`Locking models & tables ` From 68f22a5821076cd0e7dd1c0e0d0f8ea1832b9ba5 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 09:29:45 +0200 Subject: [PATCH 35/55] Add short explanation on primary/foreign keys in partitioned tables in the docs --- docs/source/table_partitioning.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst index 1150dc30..0a766f9c 100644 --- a/docs/source/table_partitioning.rst +++ b/docs/source/table_partitioning.rst @@ -34,6 +34,10 @@ Creating partitioned tables Partitioned tables are declared like regular Django models with a special base class and two extra options to set the partitioning method and key. Once declared, they behave like regular Django models. +.. warning:: + + The partitioning key becomes the (composite) primary key of the table automatically. Creating foreign keys to partitioned tables can only be done in raw SQL. You can use the `django-composite-foreignkey `_ package to represent the foreign key in Django. + Declaring the model ******************* From eebe518d5fe9237c6f7a0184595249df3d72cc21 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 09:37:55 +0200 Subject: [PATCH 36/55] Fix bug with `with_data` flag in `PostgresCreateMaterializedView` --- .../migrations/operations/create_materialized_view_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psqlextra/backend/migrations/operations/create_materialized_view_model.py b/psqlextra/backend/migrations/operations/create_materialized_view_model.py index 40ca3339..9ca2320f 100644 --- a/psqlextra/backend/migrations/operations/create_materialized_view_model.py +++ b/psqlextra/backend/migrations/operations/create_materialized_view_model.py @@ -68,7 +68,7 @@ def deconstruct(self): if self.view_options: kwargs["view_options"] = self.view_options - if self.with_data is not False: + if self.with_data is False: kwargs["with_data"] = self.with_data return name, args, kwargs From 5dec4a2948dd903827e909d21291c1b41ce7f66e Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 20:45:48 +0200 Subject: [PATCH 37/55] Django 5.2 CompositeForeignKey support in partitioned tables Also fixes partitioned tables in custom tablespaces. Fixes #256 --- docs/source/snippets/postgres_doc_links.rst | 1 + docs/source/table_partitioning.rst | 84 +++++++++++- psqlextra/backend/schema.py | 119 ++++++++++++++-- pyproject.toml | 3 +- tests/conftest.py | 41 ++++++ tests/test_schema_editor_partitioning.py | 142 +++++++++++++++++++- 6 files changed, 369 insertions(+), 21 deletions(-) diff --git a/docs/source/snippets/postgres_doc_links.rst b/docs/source/snippets/postgres_doc_links.rst index fe0f4d76..ef65d0c6 100644 --- a/docs/source/snippets/postgres_doc_links.rst +++ b/docs/source/snippets/postgres_doc_links.rst @@ -3,3 +3,4 @@ .. _hstore: https://www.postgresql.org/docs/11/hstore.html .. _PostgreSQL Declarative Table Partitioning: https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE .. _Explicit table-level locks: https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES +.. _PostgreSQL Table Partitioning Limitations: https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE-LIMITATIONS diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst index 0a766f9c..3c5ea31b 100644 --- a/docs/source/table_partitioning.rst +++ b/docs/source/table_partitioning.rst @@ -8,6 +8,7 @@ Do study the PostgreSQL documentation carefully. + .. _table_partitioning_page: @@ -22,11 +23,57 @@ The following partitioning methods are available: * ``PARTITION BY LIST`` * ``PARTITION BY HASH`` -.. note:: +Known limitations +----------------- + +Foreign keys +~~~~~~~~~~~~ +Support for foreign keys to partitioned models is limited in Django 5.1 and older. These are only suported under specific conditions. + +For full support for foreign keys to partitioned models, use Django 5.2 or newer. Django 5.2 supports composite primary and foreign keys native through :class:`~django:django.db.models.CompositePrimaryKey` to support. + +Foreing keys **on** a partitioned models to other, non-partitioned models are always supported. + +PostgreSQL 10.x +~~~~~~~~~~~~~~~ +Although table partitioning is available in PostgreSQL 10.x, it is highly recommended you use PostgresSQL 11.x. Table partitioning got a major upgrade in PostgreSQL 11.x. + +PostgreSQL 10.x does not support creating foreign keys to/from partitioned tables and does not automatically create an index across all partitions. + +Transforming existing models into partitioned models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There is **NO SUPPORT** whatsoever to transform an existing, non-partitioned model into a partitioned model. + +At a high-level, you have the following options to do this: + +1. Drop the model first and re-create it as a partitioned model according to the documentation. + + .. warning:: + + Blindly doing this causes the original table & data to be lost. + +2. Craft a custom migration to use the original table as a default partition. + + Migration #1: Rename the original table to ``_default`` + + Migration #2: Create the partitioned model with the old name. + + Migration #3: Attach the original (renamed) table as the default partition. + + Migration #4: Create more partitions and/or move data from the default partition + + .. warning:: - Although table partitioning is available in PostgreSQL 10.x, it is highly recommended you use PostgresSQL 11.x. Table partitioning got a major upgrade in PostgreSQL 11.x. + This is not an officially supported flow. Be extremely cautious to avoid + data loss. - PostgreSQL 10.x does not support creating foreign keys to/from partitioned tables and does not automatically create an index across all partitions. +Lock-free and/or concurrency safe operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There is **NO SUPPORT** whatsoever to create/attach partitions and move data between partitions in a lock-free and concurrency safe manner. + +Most operations require ``AccessExclusiveLock`` and **will** block reads/writes. Be extremely cautious on production environments and study the associated locks with the SQL operations before proceeding. Creating partitioned tables @@ -34,10 +81,6 @@ Creating partitioned tables Partitioned tables are declared like regular Django models with a special base class and two extra options to set the partitioning method and key. Once declared, they behave like regular Django models. -.. warning:: - - The partitioning key becomes the (composite) primary key of the table automatically. Creating foreign keys to partitioned tables can only be done in raw SQL. You can use the `django-composite-foreignkey `_ package to represent the foreign key in Django. - Declaring the model ******************* @@ -63,6 +106,33 @@ Inherit your model from :class:`psqlextra.models.PostgresPartitionedModel` and d name = models.TextField() timestamp = models.DateTimeField() +Primary key +~~~~~~~~~~~ + +PostgreSQL demands that the primary key is the same or is part of the partitioning key. See `PostgreSQL Table Partitioning Limitations`_. + +TL;DR Foreign keys don't work in Django <5.2. Use Django 5.2 or newer for proper support. + +**In Django <5.2, the behavior is as following:** + + - If the primary key is the same as the partitioning key: + + Foreign keys to partitioned tables will work as you expect. + + - If the primary key is not the exact same as the partitioning key or the partitioning key consists of more than one field: + + An implicit composite primary key (not visible from Django) is created. + + Foreign keys to partitioned tables will **NOT** work. + +**In Django >5.2, the behavior is as following:** + + - If no explicit primary key is defined, a :class:`~django:django.db.models.CompositePrimaryKey` is created automatically that includes an auto-incrementing `id` primary key field and the partitioning keys. + + - If an explicit :class:`~django:django.db.models.CompositePrimaryKey` is specified, no modifications are made to it and it is your responsibility to make sure the partitioning keys are part of the primary key. + + In Django 5.2 and newer, foreign keys to partitioned models always work. + Generating a migration ********************** diff --git a/psqlextra/backend/schema.py b/psqlextra/backend/schema.py index 74b5c49b..b8dd0a76 100644 --- a/psqlextra/backend/schema.py +++ b/psqlextra/backend/schema.py @@ -617,21 +617,54 @@ def create_partitioned_model(self, model: Type[Model]) -> None: self.quote_name(field_name) for field_name in meta.key ) + pk_field = model._meta.pk + has_composite_pk = self._is_composite_primary_key(pk_field) + # create a composite key that includes the partitioning key - sql = sql.replace(" PRIMARY KEY", "") - if model._meta.pk and model._meta.pk.name not in meta.key: - sql = sql[:-1] + ", PRIMARY KEY (%s, %s))" % ( - self.quote_name(model._meta.pk.name), - partitioning_key_sql, + # if the user didn't already define one + if not has_composite_pk: + inline_pk_sql = self._create_primary_key_inline_sql(model, pk_field) + inline_tablespace_sql = ( + self._create_primary_key_inline_tablespace_sql(model, pk_field) ) - else: - sql = sql[:-1] + ", PRIMARY KEY (%s))" % (partitioning_key_sql,) + + sql = sql.replace(inline_pk_sql, "") + + if ( + not self._is_virtual_primary_key(pk_field) + and pk_field + and pk_field.name not in meta.key + ): + last_brace_idx = sql.rfind(")") + sql = ( + sql[:last_brace_idx] + + f", PRIMARY KEY (%s, %s){inline_tablespace_sql}" + % ( + self.quote_name(pk_field.name), + partitioning_key_sql, + ) + + sql[last_brace_idx:] + ) + else: + last_brace_idx = sql.rfind(")") + sql = ( + sql[:last_brace_idx] + + f", PRIMARY KEY (%s){inline_tablespace_sql}" + % (partitioning_key_sql,) + + sql[last_brace_idx:] + ) # extend the standard CREATE TABLE statement with # 'PARTITION BY ...' - sql += self.sql_partition_by % ( - meta.method.upper(), - partitioning_key_sql, + last_brace_idx = sql.rfind(")") + 1 + sql = ( + sql[:last_brace_idx] + + self.sql_partition_by + % ( + meta.method.upper(), + partitioning_key_sql, + ) + + sql[last_brace_idx:] ) self.execute(sql, params) @@ -1086,6 +1119,72 @@ def _partitioning_properties_for_model(model: Type[Model]): def create_partition_table_name(self, model: Type[Model], name: str) -> str: return "%s_%s" % (model._meta.db_table.lower(), name.lower()) + def _create_primary_key_inline_sql( + self, model: Type[Model], pk_field: Optional[Field] + ) -> str: + pk_field = model._meta.pk + if not pk_field: + return "" + + tablespace_sql = self._create_primary_key_inline_tablespace_sql( + model, pk_field + ) + + if self._is_virtual_primary_key(pk_field): + return "" + + pk_sql = " PRIMARY KEY" if pk_field else "" + if tablespace_sql: + pk_sql += tablespace_sql + + return pk_sql + + def _create_primary_key_inline_tablespace_sql( + self, model: Type[Model], pk_field: Optional[Field] + ) -> str: + tablespace = (pk_field.db_tablespace if pk_field else None) or model._meta.db_tablespace # type: ignore [attr-defined] + return ( + " " + self.connection.ops.tablespace_sql(tablespace, inline=True) + if tablespace + else "" + ) + + def _is_composite_primary_key(self, field: Optional[Field]) -> bool: + """Checks whether the specified field is a composite primary key. + + This needs to be wrapped because composite primary keys are only + natively supported in Django 5.2 and newer. + """ + + if not field: + return False + + try: + from django.db.models.fields.composite import CompositePrimaryKey + + return isinstance(field, CompositePrimaryKey) + except ImportError: + return False + + def _is_virtual_primary_key(self, field: Optional[Field]) -> bool: + """Gets whether the declared primary key is a virtual field that + doesn't construct any real column in the DB. + + It is pseudo-standard to have virtual fields by creating + a field with no DB type. CompositePrimaryKey in Django + 5.2 and newer use this. Some third-party packages use + the same technique. + + ManyToManyFields were the first to actually use this. + """ + + if not field: + return True + + pk_db_params = field.db_parameters(connection=self.connection) + pk_db_type = pk_db_params["type"] if pk_db_params else None + return not bool(pk_db_type) + def _clone_model_field(self, field: Field, **overrides) -> Field: """Clones the specified model field and overrides its kwargs with the specified overrides. diff --git a/pyproject.toml b/pyproject.toml index cb27ce10..a68f344f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,8 @@ exclude = "(env|build|dist|migrations)" [[tool.mypy.overrides]] module = [ - "psycopg.*" + "psycopg.*", + "django.db.models.fields.composite" ] ignore_missing_imports = true diff --git a/tests/conftest.py b/tests/conftest.py index 387edd3b..d0a379a9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,6 @@ +import tempfile +import uuid + import pytest from django.contrib.postgres.signals import register_type_handlers @@ -5,6 +8,44 @@ from .fake_model import define_fake_app +custom_tablespace_name = f"psqlextra-tblspace-tests-{str(uuid.uuid4())[:8]}" + + +@pytest.fixture +def custom_tablespace(): + """Gets the name of a custom tablespace that is not the default to be used + for tests that need to assert functionality that depends on custom + tablespaces. + + A single custom tablespace is used. Nothing should persist in the + tablespace because each test runs in a transaction that is rolled + back. + """ + + return custom_tablespace_name + + +@pytest.fixture(scope="session") +def django_db_setup(django_db_setup, django_db_blocker): + """Extend default pytest-django DB set up to create a single, custom + tablespace to be used by tests that need to test functionality that depends + on custom tablespaces.""" + + with django_db_blocker.unblock(): + qn = connection.ops.quote_name + + with tempfile.TemporaryDirectory() as temp_dir: + with connection.cursor() as cursor: + cursor.execute( + f"CREATE TABLESPACE {qn(custom_tablespace_name)} LOCATION %s", + (temp_dir,), + ) + + yield + + with connection.cursor() as cursor: + cursor.execute(f"DROP TABLESPACE {qn(custom_tablespace_name)}") + @pytest.fixture(scope="function", autouse=True) def database_access(db): diff --git a/tests/test_schema_editor_partitioning.py b/tests/test_schema_editor_partitioning.py index c80efd52..d3602c0d 100644 --- a/tests/test_schema_editor_partitioning.py +++ b/tests/test_schema_editor_partitioning.py @@ -1,3 +1,4 @@ +import django import pytest from django.core.exceptions import ImproperlyConfigured @@ -11,7 +12,14 @@ @pytest.mark.postgres_version(lt=110000) -def test_schema_editor_create_delete_partitioned_model_range(): +@pytest.mark.parametrize( + "in_custom_tablespace", + [False, True], + ids=["default_tablespace", "custom_tablespace"], +) +def test_schema_editor_create_delete_partitioned_model_range( + custom_tablespace, in_custom_tablespace +): """Tests whether creating a partitioned model and adding a list partition to it using the :see:PostgresSchemaEditor works.""" @@ -21,6 +29,7 @@ def test_schema_editor_create_delete_partitioned_model_range(): model = define_fake_partitioned_model( {"name": models.TextField(), "timestamp": models.DateTimeField()}, {"method": method, "key": key}, + {"db_tablespace": custom_tablespace if in_custom_tablespace else None}, ) schema_editor = PostgresSchemaEditor(connection) @@ -44,7 +53,14 @@ def test_schema_editor_create_delete_partitioned_model_range(): @pytest.mark.postgres_version(lt=110000) -def test_schema_editor_create_delete_partitioned_model_list(): +@pytest.mark.parametrize( + "in_custom_tablespace", + [False, True], + ids=["default_tablespace", "custom_tablespace"], +) +def test_schema_editor_create_delete_partitioned_model_list( + custom_tablespace, in_custom_tablespace +): """Tests whether creating a partitioned model and adding a range partition to it using the :see:PostgresSchemaEditor works.""" @@ -54,6 +70,7 @@ def test_schema_editor_create_delete_partitioned_model_list(): model = define_fake_partitioned_model( {"name": models.TextField(), "category": models.TextField()}, {"method": method, "key": key}, + {"db_tablespace": custom_tablespace if in_custom_tablespace else None}, ) schema_editor = PostgresSchemaEditor(connection) @@ -78,7 +95,14 @@ def test_schema_editor_create_delete_partitioned_model_list(): @pytest.mark.postgres_version(lt=110000) @pytest.mark.parametrize("key", [["name"], ["id", "name"]]) -def test_schema_editor_create_delete_partitioned_model_hash(key): +@pytest.mark.parametrize( + "in_custom_tablespace", + [False, True], + ids=["default_tablespace", "custom_tablespace"], +) +def test_schema_editor_create_delete_partitioned_model_hash( + key, custom_tablespace, in_custom_tablespace +): """Tests whether creating a partitioned model and adding a hash partition to it using the :see:PostgresSchemaEditor works.""" @@ -87,6 +111,7 @@ def test_schema_editor_create_delete_partitioned_model_hash(key): model = define_fake_partitioned_model( {"name": models.TextField()}, {"method": method, "key": key}, + {"db_tablespace": custom_tablespace if in_custom_tablespace else None}, ) schema_editor = PostgresSchemaEditor(connection) @@ -275,3 +300,114 @@ def test_schema_editor_add_default_partition(method, key): schema_editor.delete_partition(model, "mypartition") table = db_introspection.get_partitioned_table(model._meta.db_table) assert len(table.partitions) == 0 + + +@pytest.mark.postgres_version(lt=110000) +@pytest.mark.parametrize( + "in_custom_tablespace", + [False, True], + ids=["default_tablespace", "custom_tablespace"], +) +def test_schema_editor_create_partitioned_custom_primary_key( + custom_tablespace, in_custom_tablespace +): + model = define_fake_partitioned_model( + { + "custom_pk": models.IntegerField(primary_key=True), + "name": models.TextField(), + "timestamp": models.DateTimeField(), + }, + {"method": PostgresPartitioningMethod.RANGE, "key": ["timestamp"]}, + {"db_tablespace": custom_tablespace if in_custom_tablespace else None}, + ) + + schema_editor = PostgresSchemaEditor(connection) + schema_editor.create_partitioned_model(model) + + constraints = db_introspection.get_constraints(model._meta.db_table) + primary_key_constraint = next( + ( + constraint + for constraint in constraints.values() + if constraint["primary_key"] + ), + None, + ) + + assert primary_key_constraint + assert primary_key_constraint["columns"] == ["custom_pk", "timestamp"] + + +@pytest.mark.postgres_version(lt=110000) +@pytest.mark.parametrize( + "in_custom_tablespace", + [False, True], + ids=["default_tablespace", "custom_tablespace"], +) +def test_schema_editor_create_partitioned_partioning_key_is_primary_key( + custom_tablespace, in_custom_tablespace +): + model = define_fake_partitioned_model( + { + "name": models.TextField(), + "timestamp": models.DateTimeField(primary_key=True), + }, + {"method": PostgresPartitioningMethod.RANGE, "key": ["timestamp"]}, + {"db_tablespace": custom_tablespace if in_custom_tablespace else None}, + ) + + schema_editor = PostgresSchemaEditor(connection) + schema_editor.create_partitioned_model(model) + + constraints = db_introspection.get_constraints(model._meta.db_table) + primary_key_constraint = next( + ( + constraint + for constraint in constraints.values() + if constraint["primary_key"] + ), + None, + ) + + assert primary_key_constraint + assert primary_key_constraint["columns"] == ["timestamp"] + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +@pytest.mark.postgres_version(lt=110000) +@pytest.mark.parametrize( + "in_custom_tablespace", + [False, True], + ids=["default_tablespace", "custom_tablespace"], +) +def test_schema_editor_create_partitioned_custom_composite_primary_key( + custom_tablespace, in_custom_tablespace +): + model = define_fake_partitioned_model( + { + "pk": models.CompositePrimaryKey("name", "timestamp"), + "name": models.TextField(), + "timestamp": models.DateTimeField(), + }, + {"method": PostgresPartitioningMethod.RANGE, "key": ["timestamp"]}, + {"db_tablespace": custom_tablespace if in_custom_tablespace else None}, + ) + + schema_editor = PostgresSchemaEditor(connection) + schema_editor.create_partitioned_model(model) + + constraints = db_introspection.get_constraints(model._meta.db_table) + primary_key_constraint = next( + ( + constraint + for constraint in constraints.values() + if constraint["primary_key"] + ), + None, + ) + + assert primary_key_constraint + assert primary_key_constraint["columns"] == ["name", "timestamp"] From a5cde0a5e595126f298050798f9a2480082f8449 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 20:58:14 +0200 Subject: [PATCH 38/55] Allow partition manager to plan for a subset of models Fixes #252 --- docs/source/table_partitioning.rst | 3 +- psqlextra/management/commands/pgpartition.py | 18 ++++++++-- psqlextra/partitioning/manager.py | 17 +++++++++ tests/test_partitioning_manager.py | 36 +++++++++++++++++++- 4 files changed, 69 insertions(+), 5 deletions(-) diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst index 3c5ea31b..e06451ab 100644 --- a/docs/source/table_partitioning.rst +++ b/docs/source/table_partitioning.rst @@ -175,7 +175,8 @@ Command-line options Long flag Short flag Default Description ==================== ============= ================ ==================================================================================================== === === === === === === ``--yes`` ``-y`` ``False`` Specifies yes to all questions. You will NOT be asked for confirmation before partition deletion. - ``--using`` ``-u`` ``'default'`` Optional name of the database connection to use. + ``--using`` ``-u`` ``'default'`` Optionally, name of the database connection to use. + ``--model-names`` ``-m`` ``None`` Optionally, a list of model names to partition for. ``--skip-create`` ``False`` Whether to skip creating partitions. ``--skip-delete`` ``False`` Whether to skip deleting partitions. diff --git a/psqlextra/management/commands/pgpartition.py b/psqlextra/management/commands/pgpartition.py index 8a6fa636..ca621662 100644 --- a/psqlextra/management/commands/pgpartition.py +++ b/psqlextra/management/commands/pgpartition.py @@ -1,6 +1,6 @@ import sys -from typing import Optional +from typing import List, Optional from django.conf import settings from django.core.management.base import BaseCommand @@ -37,10 +37,18 @@ def add_arguments(self, parser): parser.add_argument( "--using", "-u", - help="Optional name of the database connection to use.", + help="Name of the database connection to use.", default="default", ) + parser.add_argument( + "--model-names", + "-m", + nargs="+", + help="A list of model names for which to partition.", + default=None, + ) + parser.add_argument( "--skip-create", action="https://github.com/store_true", @@ -64,13 +72,17 @@ def handle( # type: ignore[override] using: Optional[str], skip_create: bool, skip_delete: bool, + model_names: Optional[List[str]] = None, *args, **kwargs, ): partitioning_manager = self._partitioning_manager() plan = partitioning_manager.plan( - skip_create=skip_create, skip_delete=skip_delete, using=using + skip_create=skip_create, + skip_delete=skip_delete, + model_names=model_names, + using=using, ) creations_count = len(plan.creations) diff --git a/psqlextra/partitioning/manager.py b/psqlextra/partitioning/manager.py index 074cc1c6..01bac3b4 100644 --- a/psqlextra/partitioning/manager.py +++ b/psqlextra/partitioning/manager.py @@ -25,6 +25,7 @@ def plan( self, skip_create: bool = False, skip_delete: bool = False, + model_names: Optional[List[str]] = None, using: Optional[str] = None, ) -> PostgresPartitioningPlan: """Plans which partitions should be deleted/created. @@ -38,6 +39,10 @@ def plan( If set to True, no partitions will be marked for deletion, regardless of the configuration. + model_names: + Optionally, only plan for the models with + the specified name. + using: Optional name of the database connection to use. @@ -48,7 +53,19 @@ def plan( model_plans = [] + normalized_model_names = ( + [model_name.lower().strip() for model_name in model_names] + if model_names + else [] + ) + for config in self.configs: + if ( + model_names + and config.model.__name__.lower() not in normalized_model_names + ): + continue + model_plan = self._plan_for_config( config, skip_create=skip_create, diff --git a/tests/test_partitioning_manager.py b/tests/test_partitioning_manager.py index 979bd1af..2a8cf6b9 100644 --- a/tests/test_partitioning_manager.py +++ b/tests/test_partitioning_manager.py @@ -1,7 +1,8 @@ import pytest -from django.db import models +from django.db import connection, models +from psqlextra.backend.schema import PostgresSchemaEditor from psqlextra.partitioning import ( PostgresPartitioningError, PostgresPartitioningManager, @@ -49,6 +50,39 @@ def test_partitioning_manager_find_config_for_model(): assert manager.find_config_for_model(model2) == config2 +def test_partitioning_manager_plan_specific_model_names(): + """Tests that only planning for specific models works as expected.""" + + model1 = define_fake_partitioned_model( + {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]} + ) + + config1 = partition_by_current_time(model1, years=1, count=3) + + model2 = define_fake_partitioned_model( + {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]} + ) + + config2 = partition_by_current_time(model2, months=1, count=2) + + schema_editor = PostgresSchemaEditor(connection) + schema_editor.create_partitioned_model(model1) + schema_editor.create_partitioned_model(model2) + + manager = PostgresPartitioningManager([config1, config2]) + + plan = manager.plan() + assert len(plan.model_plans) == 2 + + plan = manager.plan(model_names=[model2.__name__]) + assert len(plan.model_plans) == 1 + assert plan.model_plans[0].config.model == model2 + + # make sure casing is irrelevant + plan = manager.plan(model_names=[model2.__name__.lower()]) + assert len(plan.model_plans) == 1 + + def test_partitioning_manager_plan_not_partitioned_model(): """Tests that the auto partitioner does not try to auto partition for non- partitioned models/tables.""" From 7bbb89f56bab3cce7c04ae3f8c36b451ee74f9c6 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 21:17:21 +0200 Subject: [PATCH 39/55] Document views & materialized views support --- README.md | 3 + docs/source/index.rst | 5 + docs/source/snippets/postgres_doc_links.rst | 3 + docs/source/table_partitioning.rst | 7 + docs/source/views.rst | 197 ++++++++++++++++++++ 5 files changed, 215 insertions(+) create mode 100644 docs/source/views.rst diff --git a/README.md b/README.md index 56854677..1731b9ca 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,9 @@ With seamless we mean that any features we add will work truly seamlessly. You s Adds support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. +* **Views & materialized views** + + Adds support for creating views & materialized views as any other model. Fully integrated into Django migrations. * **Locking models & tables** diff --git a/docs/source/index.rst b/docs/source/index.rst index d4248a39..0decf42c 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -19,6 +19,10 @@ Explore the documentation to learn about all features: Adds support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. +* :ref:`Views & materialized views ` + + Adds support for creating views & materialized views as any other model. Fully integrated into Django migrations. + * :ref:`Locking models & tables ` Support for explicit table-level locks. @@ -63,6 +67,7 @@ For Django 2.2 and older: conflict_handling deletion table_partitioning + views expressions annotations locking diff --git a/docs/source/snippets/postgres_doc_links.rst b/docs/source/snippets/postgres_doc_links.rst index ef65d0c6..537d056d 100644 --- a/docs/source/snippets/postgres_doc_links.rst +++ b/docs/source/snippets/postgres_doc_links.rst @@ -3,4 +3,7 @@ .. _hstore: https://www.postgresql.org/docs/11/hstore.html .. _PostgreSQL Declarative Table Partitioning: https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE .. _Explicit table-level locks: https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES +.. _PostgreSQL Views: https://www.postgresql.org/docs/current/sql-createview.html +.. _PostgreSQL Materialized Views: https://www.postgresql.org/docs/current/sql-creatematerializedview.html +.. _PostgreSQL Refresh Materialized Views: https://www.postgresql.org/docs/current/sql-refreshmaterializedview.html .. _PostgreSQL Table Partitioning Limitations: https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE-LIMITATIONS diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst index e06451ab..0e72e3b9 100644 --- a/docs/source/table_partitioning.rst +++ b/docs/source/table_partitioning.rst @@ -40,6 +40,13 @@ Although table partitioning is available in PostgreSQL 10.x, it is highly recomm PostgreSQL 10.x does not support creating foreign keys to/from partitioned tables and does not automatically create an index across all partitions. +Changing the partition key or partition method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There is **NO SUPPORT** whatsoever for changing the partitioning key or method on a partitioned model after the initial creation. + +Such changes are not detected by ``python manage.py pgmakemigrations`` and there are no pre-built operations for modifying them. + Transforming existing models into partitioned models ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/views.rst b/docs/source/views.rst new file mode 100644 index 00000000..ea6bfcb0 --- /dev/null +++ b/docs/source/views.rst @@ -0,0 +1,197 @@ +.. include:: ./snippets/postgres_doc_links.rst + +.. _views_page: + + +Views & materialized views +========================== + +:class:`~psqlextra.models.PostgresViewModel` and :class:`~psqlextra.models.PostgresMaterializedViewModel` add support for `PostgreSQL Views`_ and `PostgreSQL Materialized Views`_. + +.. note:: + + You can create indices and constraints on (materialized) views just like you would on normal PostgreSQL tables. This is fully supported. + + +Known limitations +----------------- + +Changing view query +******************* + +THere is **NO SUPPORT** whatsoever for changing the backing query of a view after the initial creation. + +Such changes are not detected by ``python manage.py pgmakemigrations`` and there are no pre-built operations for modifying them. + + +Creating a (materialized) view +------------------------------ + +Views are declared like regular Django models with a special base class and an extra option to specify the query backing the view. Once declared, they behave like regular Django models with the exception that you cannot write to them. + +Declaring the model +******************* + +.. warning:: + + All fields returned by the backing query must be declared as Django fields. Fields that are returned by the query that aren't declared as Django fields become + part of the view, but will not be visible from Django. + +With a queryset +~~~~~~~~~~~~~~~ + +.. code-block:: python + + from django.db import models + + from psqlextra.models import PostgresViewModel + + + class MyView(PostgresViewModel): + name = models.TextField() + somefk__name = models.TextField() + + class Meta: + indexes = [models.Index(fields=["name"])] + + class ViewMeta: + query = SomeOtherModel.objects.values('id', 'name', 'somefk__name') + + class MyMaterializedView(PostgresMaterializedViewModel): + name = models.TextField() + somefk__name = models.TextField() + + class Meta: + indexes = [models.Index(fields=["name"])] + + class ViewMeta: + query = SomeOtherModel.objects.values('id', 'name', 'somefk__name') + +With raw SQL +~~~~~~~~~~~~ + +Any raw SQL can be used as the backing query for a view. Specify a tuple to pass the values for placeholders. + +.. code-block:: python + + from django.db import models + + from psqlextra.models import PostgresViewModel + + + class MyView(PostgresViewModel): + name = models.TextField() + somefk__name = models.TextField() + + class Meta: + indexes = [models.Index(fields=["name"])] + + class ViewMeta: + query = "SELECT id, somefk.name AS somefk__name FROM mytable INNER JOIN somefk ON somefk.id = mytable.somefk_id" + + class MyMaterializedView(PostgresMaterializedViewModel): + name = models.TextField() + somefk__name = models.TextField() + + class Meta: + indexes = [models.Index(fields=["name"])] + + class ViewMeta: + query = ("SELECT id, somefk.name AS somefk__name FROM mytable INNER JOIN somefk ON somefk.id = mytable.somefk_id WHERE id > %s", 1) + + +With a callable +~~~~~~~~~~~~~~~ + +A callable can be used when your query depends on settings or other variables that aren't available at evaluation time. The callable can return raw SQL, raw SQL with params or a queryset. + +.. code-block:: python + + from django.db import models + + from psqlextra.models import PostgresViewModel + + def _generate_query(): + return ("SELECT * FROM sometable WHERE app_name = %s", settings.APP_NAME) + + def _build_query(): + return SomeTable.objects.filter(app_name=settings.APP_NAME) + + + class MyView(PostgresViewModel): + name = models.TextField() + somefk__name = models.TextField() + + class ViewMeta: + query = _generate_query + + class MyMaterializedView(PostgresMaterializedViewModel): + name = models.TextField() + somefk__name = models.TextField() + + class ViewMeta: + query = _generate_query + + +Generating a migration +********************** +Run the following command to automatically generate a migration: + +.. code-block:: bash + + python manage.py pgmakemigrations + +This will generate a migration that creates the view with the specified query as the base. + +.. warning:: + + Always use ``python manage.py pgmakemigrations`` for view models. + + The model must be created by the :class:`~psqlextra.backend.migrations.operations.PostgresCreateViewModel` or :class:`~psqlextra.backend.migrations.operations.PostgresCreateMaterializedViewModel` operation. + + Do not use the standard ``python manage.py makemigrations`` command for view models. Django will issue a standard :class:`~django:django.db.migrations.operations.CreateModel` operation. Doing this will not create a view and all subsequent operations will fail. + + +Refreshing a materialized view +------------------------------ + +Make sure to read the PostgreSQL documentation on refreshing materialized views for caveats: `PostgreSQL Refresh Materialized Views`_. + +.. code-block:: python + + # Takes an AccessExclusive lock and blocks till table is re-filled + MyViewModel.refresh() + + # Allows concurrent read, does block till table is re-filled. + # Warning: Only works if the view was refreshed at least once before. + MyViewModel.refresh(concurrently=True) + + +Creating a materialized view without data +----------------------------------------- + +.. warning:: + + You cannot query your materialized view until it has been refreshed at least once. After creating the materialized view without data, you must execute a refresh at some point. The first refresh cannot be ``CONCURRENTLY`` (PostgreSQL restriction). + +By default, the migration creates the materialized view and executes the first refresh. If you want to avoid this, pass the ``with_data=False`` flag in the :class:`~psqlextra.backend.migrations.operations.PostgresCreateMaterializedViewModel` operation in your generated migration. + +.. code-block:: python + + from django.db import migrations, models + + from psqlextra.backend.migrations.operations import PostgresCreateMaterializedViewModel + + class Migration(migrations.Migration): + operations = [ + PostgresCreateMaterializedViewModel( + name="myview", + fields=[...], + options={...}, + view_options={ + "query": ... + }, + # Not the default, creates materialized with `WITH NO DATA` + with_data=False, + ) + ] From ffdc9fb1ef3866f6e749be01c5b06f26afb10e29 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Mon, 9 Jun 2025 21:19:09 +0200 Subject: [PATCH 40/55] Don't lie about "fully" integrated with Django migrations in README --- README.md | 4 ++-- docs/source/index.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1731b9ca..603fef82 100644 --- a/README.md +++ b/README.md @@ -37,11 +37,11 @@ With seamless we mean that any features we add will work truly seamlessly. You s * **Table partitioning** - Adds support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. + Adds support for PostgreSQL 11.x declarative table partitioning. Integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. * **Views & materialized views** - Adds support for creating views & materialized views as any other model. Fully integrated into Django migrations. + Adds support for creating views & materialized views as any other model. Integrated into Django migrations. * **Locking models & tables** diff --git a/docs/source/index.rst b/docs/source/index.rst index 0decf42c..3fddca56 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -17,11 +17,11 @@ Explore the documentation to learn about all features: * :ref:`Table partitioning ` - Adds support for PostgreSQL 11.x declarative table partitioning. Fully integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. + Adds support for PostgreSQL 11.x declarative table partitioning. Integrated into Django migrations. Supports all types of partitioning. Includes a command to automatically create time-based partitions. * :ref:`Views & materialized views ` - Adds support for creating views & materialized views as any other model. Fully integrated into Django migrations. + Adds support for creating views & materialized views as any other model. Integrated into Django migrations. * :ref:`Locking models & tables ` From a5bd4a6df40a45abe09b86e55f3114c45be14218 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tudor=20V=C4=83ran?= Date: Thu, 10 Jul 2025 15:41:42 +0300 Subject: [PATCH 41/55] Remove introspection and ops checks on Django >= 5.0 (#264) --- psqlextra/backend/base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/psqlextra/backend/base.py b/psqlextra/backend/base.py index c8ae73c5..58222dd7 100644 --- a/psqlextra/backend/base.py +++ b/psqlextra/backend/base.py @@ -2,6 +2,7 @@ from typing import TYPE_CHECKING +from django import VERSION from django.conf import settings from django.contrib.postgres.signals import ( get_hstore_oids, @@ -45,6 +46,9 @@ class DatabaseWrapper(Wrapper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + if VERSION >= (5, 0): + return + # Some base back-ends such as the PostGIS back-end don't properly # set `ops_class` and `introspection_class` and initialize these # classes themselves. From 552fb271227677d6c91fa94a599a58437090cc9a Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Thu, 10 Jul 2025 21:23:24 +0200 Subject: [PATCH 42/55] Custom CompositePrimaryKey and foreign key support on partitioned models --- psqlextra/models/partitioned.py | 137 +++++++++++++++++++++- tests/test_partitioned_model.py | 195 +++++++++++++++++++++++++++++++- 2 files changed, 325 insertions(+), 7 deletions(-) diff --git a/psqlextra/models/partitioned.py b/psqlextra/models/partitioned.py index f0115367..d8331554 100644 --- a/psqlextra/models/partitioned.py +++ b/psqlextra/models/partitioned.py @@ -1,6 +1,10 @@ -from typing import Iterable +from typing import Iterable, List, Optional, Tuple +from django.core.exceptions import ImproperlyConfigured +from django.db import models from django.db.models.base import ModelBase +from django.db.models.fields.composite import CompositePrimaryKey +from django.db.models.options import Options from psqlextra.types import PostgresPartitioningMethod @@ -20,19 +24,140 @@ class PostgresPartitionedModelMeta(ModelBase): default_key: Iterable[str] = [] def __new__(cls, name, bases, attrs, **kwargs): - new_class = super().__new__(cls, name, bases, attrs, **kwargs) - meta_class = attrs.pop("PartitioningMeta", None) + partitioning_meta_class = attrs.pop("PartitioningMeta", None) + + partitioning_method = getattr(partitioning_meta_class, "method", None) + partitioning_key = getattr(partitioning_meta_class, "key", None) + special = getattr(partitioning_meta_class, "special", None) - method = getattr(meta_class, "method", None) - key = getattr(meta_class, "key", None) + if special: + cls._create_primary_key(attrs, partitioning_key) patitioning_meta = PostgresPartitionedModelOptions( - method=method or cls.default_method, key=key or cls.default_key + method=partitioning_method or cls.default_method, + key=partitioning_key or cls.default_key, ) + new_class = super().__new__(cls, name, bases, attrs, **kwargs) new_class.add_to_class("_partitioning_meta", patitioning_meta) return new_class + @classmethod + def _create_primary_key(cls, attrs, partitioning_key: Optional[List[str]]): + pk = cls._find_primary_key(attrs) + if pk and isinstance(pk[1], CompositePrimaryKey): + return + + if not pk: + attrs["id"] = attrs.get("id") or cls._create_auto_field(attrs) + pk_fields = ["id"] + else: + pk_fields = [pk[0]] + + unique_pk_fields = set(pk_fields + (partitioning_key or [])) + if len(unique_pk_fields) <= 1: + return + + auto_generated_pk = CompositePrimaryKey(*sorted(unique_pk_fields)) + attrs["pk"] = auto_generated_pk + + @classmethod + def _create_auto_field(cls, attrs): + app_label = attrs.get("app_label") + meta_class = attrs.get("Meta", None) + + pk_class = Options(meta_class, app_label)._get_default_pk_class() + return pk_class(verbose_name="ID", primary_key=True, auto_created=True) + + @classmethod + def _find_primary_key(cls, attrs) -> Optional[Tuple[str, models.Field]]: + """Gets the field that has been marked by the user as the primary key + field for this model. + + This is quite complex because Django allows a variety of options: + + 1. No PK at all. In this case, Django generates one named `id` + as an auto-increment integer (AutoField) + + 2. One field that has `primary_key=True`. Any field can have + this attribute, but Django would error if there were more. + + 3. One field named `pk`. + + 4. One field that has `primary_key=True` and a field that + is of type `CompositePrimaryKey` that includes that + field. + + Since a table can only have one primary key, our goal here + is to find the field (if any) that is going to become + the primary key of the table. + + Our logic is straight forward: + + 1. If there is a `CompositePrimaryKey`, that field becomes the primary key. + + 2. If there is a field with `primary_key=True`, that field becomes the primary key. + + 3. There is no primary key. + """ + + fields = { + name: value + for name, value in attrs.items() + if isinstance(value, models.Field) + } + + fields_marked_as_pk = { + name: value for name, value in fields.items() if value.primary_key + } + + # We cannot let the user define a field named `pk` that is not a CompositePrimaryKey + # already because when we generate a primary key, we want to name it `pk`. + field_named_pk = attrs.get("pk") + if field_named_pk and not field_named_pk.primary_key: + raise ImproperlyConfigured( + "You cannot define a field named `pk` that is not a primary key." + ) + + if field_named_pk: + if not isinstance(field_named_pk, CompositePrimaryKey): + raise ImproperlyConfigured( + "You cannot define a field named `pk` that is not a composite primary key on a partitioned model. Either make `pk` a CompositePrimaryKey or rename it." + ) + + return ("pk", field_named_pk) + + if not fields_marked_as_pk: + return None + + # Make sure the user didn't define N primary keys. Django would also warn + # about this. + # + # One exception is a set up such as: + # + # >>> id = models.AutoField(primary_key=True) + # >>> timestamp = models.DateTimeField() + # >>> pk = models.CompositePrimaryKey("id", "timestamp") + # + # In this case, both `id` and `pk` are marked as primary key. Django + # allows this and just ignores the `primary_key=True` attribute + # on all the other fields except the composite one. + # + # We also handle this as expected and treat the CompositePrimaryKey + # as the primary key. + sorted_fields_marked_as_pk = sorted( + list(fields_marked_as_pk.items()), + key=lambda pair: 0 + if isinstance(pair[1], CompositePrimaryKey) + else 1, + ) + if len(sorted_fields_marked_as_pk[1:]) > 1: + raise ImproperlyConfigured( + "You cannot mark more than one fields as a primary key." + ) + + return sorted_fields_marked_as_pk[0] + class PostgresPartitionedModel( PostgresModel, metaclass=PostgresPartitionedModelMeta diff --git a/tests/test_partitioned_model.py b/tests/test_partitioned_model.py index 89562730..f4fc3c6b 100644 --- a/tests/test_partitioned_model.py +++ b/tests/test_partitioned_model.py @@ -1,7 +1,13 @@ +import django +import pytest + +from django.core.exceptions import ImproperlyConfigured +from django.db import models + from psqlextra.models import PostgresPartitionedModel from psqlextra.types import PostgresPartitioningMethod -from .fake_model import define_fake_partitioned_model +from .fake_model import define_fake_model, define_fake_partitioned_model def test_partitioned_model_abstract(): @@ -70,3 +76,190 @@ def test_partitioned_model_key_option_none(): model = define_fake_partitioned_model(partitioning_options=dict(key=None)) assert model._partitioning_meta.key == [] + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_custom_composite_primary_key_with_auto_field(): + model = define_fake_partitioned_model( + fields={ + "auto_id": models.AutoField(), + "my_custom_pk": models.CompositePrimaryKey("auto_id", "timestamp"), + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + assert isinstance(model._meta.pk, models.CompositePrimaryKey) + assert model._meta.pk.name == "my_custom_pk" + assert model._meta.pk.columns == ("auto_id", "timestamp") + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_custom_composite_primary_key_with_id_field(): + model = define_fake_partitioned_model( + fields={ + "id": models.IntegerField(), + "my_custom_pk": models.CompositePrimaryKey("id", "timestamp"), + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + assert isinstance(model._meta.pk, models.CompositePrimaryKey) + assert model._meta.pk.name == "my_custom_pk" + assert model._meta.pk.columns == ("id", "timestamp") + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_custom_composite_primary_key_named_id(): + model = define_fake_partitioned_model( + fields={ + "other_field": models.TextField(), + "id": models.CompositePrimaryKey("other_field", "timestamp"), + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + assert isinstance(model._meta.pk, models.CompositePrimaryKey) + assert model._meta.pk.name == "id" + assert model._meta.pk.columns == ("other_field", "timestamp") + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_field_named_pk_not_composite_not_primary(): + with pytest.raises(ImproperlyConfigured): + define_fake_partitioned_model( + fields={ + "pk": models.TextField(), + "id": models.CompositePrimaryKey("other_field", "timestamp"), + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_field_named_pk_not_composite(): + with pytest.raises(ImproperlyConfigured): + define_fake_partitioned_model( + fields={ + "pk": models.AutoField(primary_key=True), + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_field_multiple_pks(): + with pytest.raises(ImproperlyConfigured): + define_fake_partitioned_model( + fields={ + "id": models.AutoField(primary_key=True), + "another_pk": models.TextField(primary_key=True), + "timestamp": models.DateTimeField(), + "real_pk": models.CompositePrimaryKey("id", "timestamp"), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_no_pk_defined(): + model = define_fake_partitioned_model( + fields={ + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + assert isinstance(model._meta.pk, models.CompositePrimaryKey) + assert model._meta.pk.name == "pk" + assert model._meta.pk.columns == ("id", "timestamp") + + id_field = model._meta.get_field("id") + assert id_field.name == "id" + assert id_field.column == "id" + assert isinstance(id_field, models.AutoField) + assert id_field.primary_key is True + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_composite_primary_key(): + model = define_fake_partitioned_model( + fields={ + "id": models.AutoField(primary_key=True), + "pk": models.CompositePrimaryKey("id", "timestamp"), + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + assert isinstance(model._meta.pk, models.CompositePrimaryKey) + assert model._meta.pk.name == "pk" + assert model._meta.pk.columns == ("id", "timestamp") + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_composite_primary_key_foreign_key(): + model = define_fake_partitioned_model( + fields={ + "timestamp": models.DateTimeField(), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + define_fake_model( + fields={ + "model": models.ForeignKey(model, on_delete=models.CASCADE), + }, + ) + + +@pytest.mark.skipif( + django.VERSION < (5, 2), + reason="Django < 5.2 doesn't implement composite primary keys", +) +def test_partitioned_model_custom_composite_primary_key_foreign_key(): + model = define_fake_partitioned_model( + fields={ + "id": models.TextField(primary_key=True), + "timestamp": models.DateTimeField(), + "custom": models.CompositePrimaryKey("id", "timestamp"), + }, + partitioning_options=dict(key=["timestamp"], special=True), + ) + + define_fake_model( + fields={ + "model": models.ForeignKey(model, on_delete=models.CASCADE), + }, + ) From 72a6070c7d7ec05f3fd54f7c26ea5e2edc8adbf1 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Fri, 11 Jul 2025 09:11:32 +0200 Subject: [PATCH 43/55] Very clearly specify that FK to partitioned models are not supported --- docs/source/table_partitioning.rst | 58 ++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 11 deletions(-) diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst index 0e72e3b9..9194c411 100644 --- a/docs/source/table_partitioning.rst +++ b/docs/source/table_partitioning.rst @@ -28,9 +28,7 @@ Known limitations Foreign keys ~~~~~~~~~~~~ -Support for foreign keys to partitioned models is limited in Django 5.1 and older. These are only suported under specific conditions. - -For full support for foreign keys to partitioned models, use Django 5.2 or newer. Django 5.2 supports composite primary and foreign keys native through :class:`~django:django.db.models.CompositePrimaryKey` to support. +There is no support for foreign keys **to** partitioned models. Even in Django 5.2 with the introduction of :class:`~django:django.db.models.CompositePrimaryKey`, there is no support for foreign keys. See: https://code.djangoproject.com/ticket/36034 Foreing keys **on** a partitioned models to other, non-partitioned models are always supported. @@ -118,27 +116,65 @@ Primary key PostgreSQL demands that the primary key is the same or is part of the partitioning key. See `PostgreSQL Table Partitioning Limitations`_. -TL;DR Foreign keys don't work in Django <5.2. Use Django 5.2 or newer for proper support. - **In Django <5.2, the behavior is as following:** - - If the primary key is the same as the partitioning key: - - Foreign keys to partitioned tables will work as you expect. + - If the primary key is the same as the partitioning key, standard Django behavior applies. - If the primary key is not the exact same as the partitioning key or the partitioning key consists of more than one field: An implicit composite primary key (not visible from Django) is created. - Foreign keys to partitioned tables will **NOT** work. - **In Django >5.2, the behavior is as following:** - If no explicit primary key is defined, a :class:`~django:django.db.models.CompositePrimaryKey` is created automatically that includes an auto-incrementing `id` primary key field and the partitioning keys. - If an explicit :class:`~django:django.db.models.CompositePrimaryKey` is specified, no modifications are made to it and it is your responsibility to make sure the partitioning keys are part of the primary key. - In Django 5.2 and newer, foreign keys to partitioned models always work. +Django 5.2 examples +******************* + +Custom composite primary key +"""""""""""""""""""""""""""" + +.. code-block:: python + + from django.db import models + + from psqlextra.types import PostgresPartitioningMethod + from psqlextra.models import PostgresPartitionedModel + + class MyModel(PostgresPartitionedModel): + class PartitioningMeta: + method = PostgresPartitioningMethod.RANGE + key = ["timestamp"] + + # WARNING: This overrides default primary key that includes a auto-increment `id` field. + pk = models.CompositePrimaryKey("name", "timestamp") + + name = models.TextField() + timestamp = models.DateTimeField() + + +Custom composite primary key with auto-incrementing ID +"""""""""""""""""""""""""""""""""""""""""""""""""""""" + +.. code-block:: python + + from django.db import models + + from psqlextra.types import PostgresPartitioningMethod + from psqlextra.models import PostgresPartitionedModel + + class MyModel(PostgresPartitionedModel): + class PartitioningMeta: + method = PostgresPartitioningMethod.RANGE + key = ["timestamp"] + + id = models.AutoField(primary_key=True) + pk = models.CompositePrimaryKey("id", "timestamp") + + name = models.TextField() + timestamp = models.DateTimeField() Generating a migration From b450c69cb11ecd023df38270c554ceb5a64f109b Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Fri, 11 Jul 2025 09:13:53 +0200 Subject: [PATCH 44/55] Make sure temp dir for tablespace tests exists --- tests/conftest.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index d0a379a9..03d01ab3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,4 @@ +import os import tempfile import uuid @@ -35,6 +36,9 @@ def django_db_setup(django_db_setup, django_db_blocker): qn = connection.ops.quote_name with tempfile.TemporaryDirectory() as temp_dir: + if not os.path.exists(temp_dir): + os.makedirs(temp_dir) + with connection.cursor() as cursor: cursor.execute( f"CREATE TABLESPACE {qn(custom_tablespace_name)} LOCATION %s", From 3a6229ce3dc73075095d0b1cf0ec29a6ced94f2f Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 21:52:25 +0200 Subject: [PATCH 45/55] Finalize `CompositePrimaryKey` support on `PostgresPartitionedModel` --- psqlextra/models/partitioned.py | 76 ++++++++++++++++++++++++++++++--- tests/test_partitioned_model.py | 24 +++++------ 2 files changed, 82 insertions(+), 18 deletions(-) diff --git a/psqlextra/models/partitioned.py b/psqlextra/models/partitioned.py index d8331554..3a206775 100644 --- a/psqlextra/models/partitioned.py +++ b/psqlextra/models/partitioned.py @@ -1,9 +1,10 @@ from typing import Iterable, List, Optional, Tuple +import django + from django.core.exceptions import ImproperlyConfigured from django.db import models from django.db.models.base import ModelBase -from django.db.models.fields.composite import CompositePrimaryKey from django.db.models.options import Options from psqlextra.types import PostgresPartitioningMethod @@ -28,9 +29,11 @@ def __new__(cls, name, bases, attrs, **kwargs): partitioning_method = getattr(partitioning_meta_class, "method", None) partitioning_key = getattr(partitioning_meta_class, "key", None) - special = getattr(partitioning_meta_class, "special", None) - if special: + if django.VERSION >= (5, 2): + for base in bases: + cls._delete_auto_created_fields(base) + cls._create_primary_key(attrs, partitioning_key) patitioning_meta = PostgresPartitionedModelOptions( @@ -43,21 +46,57 @@ def __new__(cls, name, bases, attrs, **kwargs): return new_class @classmethod - def _create_primary_key(cls, attrs, partitioning_key: Optional[List[str]]): + def _create_primary_key( + cls, attrs, partitioning_key: Optional[List[str]] + ) -> None: + from django.db.models.fields.composite import CompositePrimaryKey + + # Find any existing primary key the user might have declared. + # + # If it is a composite primary key, we will do nothing and + # keep it as it is. You're own your own. pk = cls._find_primary_key(attrs) if pk and isinstance(pk[1], CompositePrimaryKey): return + # Create an `id` field (auto-incrementing) if there is no + # primary key yet. + # + # This matches standard Django behavior. if not pk: attrs["id"] = attrs.get("id") or cls._create_auto_field(attrs) pk_fields = ["id"] else: pk_fields = [pk[0]] - unique_pk_fields = set(pk_fields + (partitioning_key or [])) + partitioning_keys = ( + partitioning_key + if isinstance(partitioning_key, list) + else list(filter(None, [partitioning_key])) + ) + + unique_pk_fields = set(pk_fields + (partitioning_keys or [])) if len(unique_pk_fields) <= 1: + if "id" in attrs: + attrs["id"].primary_key = True return + # You might have done something like this: + # + # id = models.AutoField(primary_key=True) + # pk = CompositePrimaryKey("id", "timestamp") + # + # The `primary_key` attribute has to be removed + # from the `id` field in the example above to + # avoid having two primary keys. + # + # Without this, the generated schema will + # have two primary keys, which is an error. + for field in attrs.values(): + is_pk = getattr(field, "primary_key", False) + if is_pk: + field.primary_key = False + auto_generated_pk = CompositePrimaryKey(*sorted(unique_pk_fields)) attrs["pk"] = auto_generated_pk @@ -67,7 +106,7 @@ def _create_auto_field(cls, attrs): meta_class = attrs.get("Meta", None) pk_class = Options(meta_class, app_label)._get_default_pk_class() - return pk_class(verbose_name="ID", primary_key=True, auto_created=True) + return pk_class(verbose_name="ID", auto_created=True) @classmethod def _find_primary_key(cls, attrs) -> Optional[Tuple[str, models.Field]]: @@ -101,6 +140,8 @@ def _find_primary_key(cls, attrs) -> Optional[Tuple[str, models.Field]]: 3. There is no primary key. """ + from django.db.models.fields.composite import CompositePrimaryKey + fields = { name: value for name, value in attrs.items() @@ -158,6 +199,29 @@ def _find_primary_key(cls, attrs) -> Optional[Tuple[str, models.Field]]: return sorted_fields_marked_as_pk[0] + @classmethod + def _delete_auto_created_fields(cls, model: models.Model): + """Base classes might be injecting an auto-generated `id` field before + we even have the chance of doing this ourselves. + + Delete any auto generated fields from the base class so that we + can declare our own. If there is no auto-generated field, one + will be added anyways by our own logic + """ + + fields = model._meta.local_fields + model._meta.local_many_to_many + for field in fields: + auto_created = getattr(field, "auto_created", False) + if auto_created: + if field in model._meta.local_fields: + model._meta.local_fields.remove(field) + + if field in model._meta.fields: + model._meta.fields.remove(field) # type: ignore [attr-defined] + + if hasattr(model, field.name): + delattr(model, field.name) + class PostgresPartitionedModel( PostgresModel, metaclass=PostgresPartitionedModelMeta diff --git a/tests/test_partitioned_model.py b/tests/test_partitioned_model.py index f4fc3c6b..55c66516 100644 --- a/tests/test_partitioned_model.py +++ b/tests/test_partitioned_model.py @@ -85,11 +85,11 @@ def test_partitioned_model_key_option_none(): def test_partitioned_model_custom_composite_primary_key_with_auto_field(): model = define_fake_partitioned_model( fields={ - "auto_id": models.AutoField(), + "auto_id": models.AutoField(primary_key=True), "my_custom_pk": models.CompositePrimaryKey("auto_id", "timestamp"), "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) assert isinstance(model._meta.pk, models.CompositePrimaryKey) @@ -108,7 +108,7 @@ def test_partitioned_model_custom_composite_primary_key_with_id_field(): "my_custom_pk": models.CompositePrimaryKey("id", "timestamp"), "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) assert isinstance(model._meta.pk, models.CompositePrimaryKey) @@ -127,7 +127,7 @@ def test_partitioned_model_custom_composite_primary_key_named_id(): "id": models.CompositePrimaryKey("other_field", "timestamp"), "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) assert isinstance(model._meta.pk, models.CompositePrimaryKey) @@ -147,7 +147,7 @@ def test_partitioned_model_field_named_pk_not_composite_not_primary(): "id": models.CompositePrimaryKey("other_field", "timestamp"), "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) @@ -162,7 +162,7 @@ def test_partitioned_model_field_named_pk_not_composite(): "pk": models.AutoField(primary_key=True), "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) @@ -179,7 +179,7 @@ def test_partitioned_model_field_multiple_pks(): "timestamp": models.DateTimeField(), "real_pk": models.CompositePrimaryKey("id", "timestamp"), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) @@ -192,7 +192,7 @@ def test_partitioned_model_no_pk_defined(): fields={ "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) assert isinstance(model._meta.pk, models.CompositePrimaryKey) @@ -203,7 +203,7 @@ def test_partitioned_model_no_pk_defined(): assert id_field.name == "id" assert id_field.column == "id" assert isinstance(id_field, models.AutoField) - assert id_field.primary_key is True + assert id_field.primary_key is False @pytest.mark.skipif( @@ -217,7 +217,7 @@ def test_partitioned_model_composite_primary_key(): "pk": models.CompositePrimaryKey("id", "timestamp"), "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) assert isinstance(model._meta.pk, models.CompositePrimaryKey) @@ -234,7 +234,7 @@ def test_partitioned_model_composite_primary_key_foreign_key(): fields={ "timestamp": models.DateTimeField(), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) define_fake_model( @@ -255,7 +255,7 @@ def test_partitioned_model_custom_composite_primary_key_foreign_key(): "timestamp": models.DateTimeField(), "custom": models.CompositePrimaryKey("id", "timestamp"), }, - partitioning_options=dict(key=["timestamp"], special=True), + partitioning_options=dict(key=["timestamp"]), ) define_fake_model( From d454875cbc203fc8ddc58d4507e0ca911a0fc664 Mon Sep 17 00:00:00 2001 From: Walison Filipe Date: Sun, 5 Oct 2025 16:53:30 -0300 Subject: [PATCH 46/55] Make multi-day partitions deterministic and aligned (#263) --- psqlextra/partitioning/time_partition_size.py | 13 ++++- tests/test_partitioning_time.py | 49 +++++++++++++++++-- 2 files changed, 57 insertions(+), 5 deletions(-) diff --git a/psqlextra/partitioning/time_partition_size.py b/psqlextra/partitioning/time_partition_size.py index 3d013bcd..46ef3691 100644 --- a/psqlextra/partitioning/time_partition_size.py +++ b/psqlextra/partitioning/time_partition_size.py @@ -1,6 +1,6 @@ import enum -from datetime import date, datetime +from datetime import date, datetime, timedelta, timezone from typing import Optional, Union from dateutil.relativedelta import relativedelta @@ -15,11 +15,15 @@ class PostgresTimePartitionUnit(enum.Enum): DAYS = "days" +UNIX_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc) + + class PostgresTimePartitionSize: """Size of a time-based range partition table.""" unit: PostgresTimePartitionUnit value: int + anchor: datetime def __init__( self, @@ -27,6 +31,7 @@ def __init__( months: Optional[int] = None, weeks: Optional[int] = None, days: Optional[int] = None, + anchor: datetime = UNIX_EPOCH ) -> None: sizes = [years, months, weeks, days] @@ -38,6 +43,7 @@ def __init__( "Partition can only have on size unit." ) + self.anchor = anchor if years: self.unit = PostgresTimePartitionUnit.YEARS self.value = years @@ -82,7 +88,10 @@ def start(self, dt: datetime) -> datetime: if self.unit == PostgresTimePartitionUnit.WEEKS: return self._ensure_datetime(dt - relativedelta(days=dt.weekday())) - return self._ensure_datetime(dt) + diff_days = (dt - self.anchor).days + partition_index = diff_days // self.value + start = self.anchor + timedelta(days=partition_index * self.value) + return self._ensure_datetime(start) @staticmethod def _ensure_datetime(dt: Union[date, datetime]) -> datetime: diff --git a/tests/test_partitioning_time.py b/tests/test_partitioning_time.py index 9f6b5bf1..6d190b3d 100644 --- a/tests/test_partitioning_time.py +++ b/tests/test_partitioning_time.py @@ -254,6 +254,49 @@ def test_partitioning_time_daily_apply(): assert table.partitions[6].name == "2019_jun_04" +@pytest.mark.postgres_version(lt=110000) +def test_partitioning_time_consistent_daily_apply(): + """Ensures that automatic daily partition creation is consistent and aligned + when the partition size spans multiple days (e.g., days > 1)""" + + model = define_fake_partitioned_model( + {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]} + ) + + schema_editor = connection.schema_editor() + schema_editor.create_partitioned_model(model) + + with freezegun.freeze_time("2025-06-20"): + manager = PostgresPartitioningManager( + [partition_by_current_time(model, days=5, count=3)] + ) + manager.plan().apply() + + table = _get_partitioned_table(model) + assert len(table.partitions) == 3 + + # Partitions are aligned based on the fixed anchor (Unix Epoch by default). + # 2025-06-20 falls within the partition starting at 2025-06-16, + # since it's the most recent multiple of 5 days since 1970-01-01. + assert table.partitions[0].name == "2025_jun_16" + assert table.partitions[1].name == "2025_jun_21" + assert table.partitions[2].name == "2025_jun_26" + + # re-running it another day only creates the next one needed. + with freezegun.freeze_time("2025-06-22"): + manager = PostgresPartitioningManager( + [partition_by_current_time(model, days=5, count=3)] + ) + manager.plan().apply() + + table = _get_partitioned_table(model) + assert len(table.partitions) == 4 + assert table.partitions[0].name == "2025_jun_16" + assert table.partitions[1].name == "2025_jun_21" + assert table.partitions[2].name == "2025_jun_26" + assert table.partitions[3].name == "2025_jul_01" + + @pytest.mark.postgres_version(lt=110000) def test_partitioning_time_monthly_apply_insert(): """Tests whether automatically created monthly partitions line up @@ -376,7 +419,7 @@ def test_partitioning_time_daily_apply_insert(): @pytest.mark.parametrize( "kwargs,partition_names", [ - (dict(days=2), ["2019_jan_01", "2019_jan_03"]), + (dict(days=2), ["2018_dec_31", "2019_jan_02"]), (dict(weeks=2), ["2018_week_53", "2019_week_02"]), (dict(months=2), ["2019_jan", "2019_mar"]), (dict(years=2), ["2019", "2021"]), @@ -422,7 +465,7 @@ def test_partitioning_time_multiple(kwargs, partition_names): dict(days=7, max_age=relativedelta(weeks=1)), [ ("2019-1-1", 6), - ("2019-1-4", 6), + ("2019-1-4", 5), ("2019-1-8", 5), ("2019-1-15", 4), ("2019-1-16", 4), @@ -450,7 +493,7 @@ def test_partitioning_time_delete(kwargs, timepoints): with freezegun.freeze_time(timepoints[0][0]): manager.plan().apply() - for index, (dt, partition_count) in enumerate(timepoints): + for (dt, partition_count) in timepoints: with freezegun.freeze_time(dt): manager.plan(skip_create=True).apply() From 182ff726ccd8d390be44f6cff8895f822f15680a Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 21:56:43 +0200 Subject: [PATCH 47/55] Re-format code after merging partition anchor support --- psqlextra/partitioning/time_partition_size.py | 2 +- tests/test_partitioning_time.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/psqlextra/partitioning/time_partition_size.py b/psqlextra/partitioning/time_partition_size.py index 46ef3691..bb0c7e7b 100644 --- a/psqlextra/partitioning/time_partition_size.py +++ b/psqlextra/partitioning/time_partition_size.py @@ -31,7 +31,7 @@ def __init__( months: Optional[int] = None, weeks: Optional[int] = None, days: Optional[int] = None, - anchor: datetime = UNIX_EPOCH + anchor: datetime = UNIX_EPOCH, ) -> None: sizes = [years, months, weeks, days] diff --git a/tests/test_partitioning_time.py b/tests/test_partitioning_time.py index 6d190b3d..1e6e21a0 100644 --- a/tests/test_partitioning_time.py +++ b/tests/test_partitioning_time.py @@ -256,8 +256,8 @@ def test_partitioning_time_daily_apply(): @pytest.mark.postgres_version(lt=110000) def test_partitioning_time_consistent_daily_apply(): - """Ensures that automatic daily partition creation is consistent and aligned - when the partition size spans multiple days (e.g., days > 1)""" + """Ensures that automatic daily partition creation is consistent and + aligned when the partition size spans multiple days (e.g., days > 1)""" model = define_fake_partitioned_model( {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]} From 67f20301d0038221d8cc1a921424f5a9537957ee Mon Sep 17 00:00:00 2001 From: Stuart Leitch Date: Thu, 5 Oct 2023 15:02:23 +0100 Subject: [PATCH 48/55] Add in Hourly Partition Support --- .gitignore | 3 + docs/source/table_partitioning.rst | 11 +++ .../partitioning/current_time_strategy.py | 3 +- psqlextra/partitioning/shorthands.py | 6 +- psqlextra/partitioning/time_partition.py | 5 +- psqlextra/partitioning/time_partition_size.py | 27 ++++-- tests/test_partitioning_time.py | 91 +++++++++++++++++++ 7 files changed, 136 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 63d6378d..52805a88 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,6 @@ build/ # Ignore PyCharm / IntelliJ files .idea/ +build/ +.python-version +docker-compose.yml \ No newline at end of file diff --git a/docs/source/table_partitioning.rst b/docs/source/table_partitioning.rst index 9194c411..1c36db0a 100644 --- a/docs/source/table_partitioning.rst +++ b/docs/source/table_partitioning.rst @@ -292,6 +292,17 @@ Time-based partitioning count=12, ), ), + + # 24 partitions ahead, each partition is 1 hour, for a total of 24 hours. Starting with hour 0 of current day + # old partitions are never deleted, `max_age` is not set + # partitions will be named `[table_name]_[year]_[month]_[month day number]_[hour (24h)]:00:00`. + PostgresPartitioningConfig( + model=MyPartitionedModel, + strategy=PostgresCurrentTimePartitioningStrategy( + size=PostgresTimePartitionSize(hours=1), + count=24, + ), + ), ]) diff --git a/psqlextra/partitioning/current_time_strategy.py b/psqlextra/partitioning/current_time_strategy.py index 114a1aaf..795f60ca 100644 --- a/psqlextra/partitioning/current_time_strategy.py +++ b/psqlextra/partitioning/current_time_strategy.py @@ -16,7 +16,8 @@ class PostgresCurrentTimePartitioningStrategy( All buckets will be equal in size and start at the start of the unit. With monthly partitioning, partitions start on the 1st and - with weekly partitioning, partitions start on monday. + with weekly partitioning, partitions start on monday, with hourly + partitioning, partitions start at 00:00. """ def __init__( diff --git a/psqlextra/partitioning/shorthands.py b/psqlextra/partitioning/shorthands.py index 30175273..f263e362 100644 --- a/psqlextra/partitioning/shorthands.py +++ b/psqlextra/partitioning/shorthands.py @@ -16,6 +16,7 @@ def partition_by_current_time( months: Optional[int] = None, weeks: Optional[int] = None, days: Optional[int] = None, + hours: Optional[int] = None, max_age: Optional[relativedelta] = None, name_format: Optional[str] = None, ) -> PostgresPartitioningConfig: @@ -43,6 +44,9 @@ def partition_by_current_time( days: The amount of days each partition should contain. + hours: + The amount of hours each partition should contain. + max_age: The maximum age of a partition (calculated from the start of the partition). @@ -56,7 +60,7 @@ def partition_by_current_time( """ size = PostgresTimePartitionSize( - years=years, months=months, weeks=weeks, days=days + years=years, months=months, weeks=weeks, days=days, hours=hours ) return PostgresPartitioningConfig( diff --git a/psqlextra/partitioning/time_partition.py b/psqlextra/partitioning/time_partition.py index 3c8a4d87..64a8cf8d 100644 --- a/psqlextra/partitioning/time_partition.py +++ b/psqlextra/partitioning/time_partition.py @@ -20,6 +20,7 @@ class PostgresTimePartition(PostgresRangePartition): PostgresTimePartitionUnit.MONTHS: "%Y_%b", PostgresTimePartitionUnit.WEEKS: "%Y_week_%W", PostgresTimePartitionUnit.DAYS: "%Y_%b_%d", + PostgresTimePartitionUnit.HOURS: "%Y_%b_%d_%H:00:00", } def __init__( @@ -31,8 +32,8 @@ def __init__( end_datetime = start_datetime + size.as_delta() super().__init__( - from_values=start_datetime.strftime("%Y-%m-%d"), - to_values=end_datetime.strftime("%Y-%m-%d"), + from_values=start_datetime.strftime("%Y-%m-%d %H:00:00"), + to_values=end_datetime.strftime("%Y-%m-%d %H:00:00"), ) self.size = size diff --git a/psqlextra/partitioning/time_partition_size.py b/psqlextra/partitioning/time_partition_size.py index bb0c7e7b..b8231ddc 100644 --- a/psqlextra/partitioning/time_partition_size.py +++ b/psqlextra/partitioning/time_partition_size.py @@ -13,6 +13,7 @@ class PostgresTimePartitionUnit(enum.Enum): MONTHS = "months" WEEKS = "weeks" DAYS = "days" + HOURS = "hours" UNIX_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc) @@ -31,9 +32,10 @@ def __init__( months: Optional[int] = None, weeks: Optional[int] = None, days: Optional[int] = None, + hours: Optional[int] = None, anchor: datetime = UNIX_EPOCH, ) -> None: - sizes = [years, months, weeks, days] + sizes = [years, months, weeks, days, hours] if not any(sizes): raise PostgresPartitioningError("Partition cannot be 0 in size.") @@ -56,6 +58,9 @@ def __init__( elif days: self.unit = PostgresTimePartitionUnit.DAYS self.value = days + elif hours: + self.unit = PostgresTimePartitionUnit.HOURS + self.value = hours else: raise PostgresPartitioningError( "Unsupported time partitioning unit" @@ -74,6 +79,9 @@ def as_delta(self) -> relativedelta: if self.unit == PostgresTimePartitionUnit.DAYS: return relativedelta(days=self.value) + if self.unit == PostgresTimePartitionUnit.HOURS: + return relativedelta(hours=self.value) + raise PostgresPartitioningError( "Unsupported time partitioning unit: %s" % self.unit ) @@ -88,14 +96,21 @@ def start(self, dt: datetime) -> datetime: if self.unit == PostgresTimePartitionUnit.WEEKS: return self._ensure_datetime(dt - relativedelta(days=dt.weekday())) - diff_days = (dt - self.anchor).days - partition_index = diff_days // self.value - start = self.anchor + timedelta(days=partition_index * self.value) - return self._ensure_datetime(start) + if self.unit == PostgresTimePartitionUnit.DAYS: + diff_days = (dt - self.anchor).days + partition_index = diff_days // self.value + start = self.anchor + timedelta(days=partition_index * self.value) + return self._ensure_datetime(start) + + if self.unit == PostgresTimePartitionUnit.HOURS: + return self._ensure_datetime(dt.replace(hour=0)) + + raise ValueError("Unknown unit") @staticmethod def _ensure_datetime(dt: Union[date, datetime]) -> datetime: - return datetime(year=dt.year, month=dt.month, day=dt.day) + hour = dt.hour if isinstance(dt, datetime) else 0 + return datetime(year=dt.year, month=dt.month, day=dt.day, hour=hour) def __repr__(self) -> str: return "PostgresTimePartitionSize<%s, %s>" % (self.unit, self.value) diff --git a/tests/test_partitioning_time.py b/tests/test_partitioning_time.py index 1e6e21a0..0ab0daf6 100644 --- a/tests/test_partitioning_time.py +++ b/tests/test_partitioning_time.py @@ -254,6 +254,56 @@ def test_partitioning_time_daily_apply(): assert table.partitions[6].name == "2019_jun_04" +@pytest.mark.postgres_version(lt=110000) +def test_partitioning_time_hourly_apply(): + """Tests whether automatically creating new partitions ahead hourly works + as expected.""" + + model = define_fake_partitioned_model( + {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]} + ) + + schema_editor = connection.schema_editor() + schema_editor.create_partitioned_model(model) + + # create partitions for the next 4 hours (including the current) + with freezegun.freeze_time("2019-1-23"): + manager = PostgresPartitioningManager( + [partition_by_current_time(model, hours=1, count=4)] + ) + manager.plan().apply() + + table = _get_partitioned_table(model) + assert len(table.partitions) == 4 + assert table.partitions[0].name == "2019_jan_23_00:00:00" + assert table.partitions[1].name == "2019_jan_23_01:00:00" + assert table.partitions[2].name == "2019_jan_23_02:00:00" + assert table.partitions[3].name == "2019_jan_23_03:00:00" + + # re-running it with 5, should just create one additional partition + with freezegun.freeze_time("2019-1-23"): + manager = PostgresPartitioningManager( + [partition_by_current_time(model, hours=1, count=5)] + ) + manager.plan().apply() + + table = _get_partitioned_table(model) + assert len(table.partitions) == 5 + assert table.partitions[4].name == "2019_jan_23_04:00:00" + + # it's june now, we want to partition two hours ahead + with freezegun.freeze_time("2019-06-03"): + manager = PostgresPartitioningManager( + [partition_by_current_time(model, hours=1, count=2)] + ) + manager.plan().apply() + + table = _get_partitioned_table(model) + assert len(table.partitions) == 7 + assert table.partitions[5].name == "2019_jun_03_00:00:00" + assert table.partitions[6].name == "2019_jun_03_01:00:00" + + @pytest.mark.postgres_version(lt=110000) def test_partitioning_time_consistent_daily_apply(): """Ensures that automatic daily partition creation is consistent and @@ -415,11 +465,52 @@ def test_partitioning_time_daily_apply_insert(): model.objects.create(timestamp=datetime.date(2019, 1, 10)) +@pytest.mark.postgres_version(lt=110000) +def test_partitioning_time_hourly_apply_insert(): + """Tests whether automatically created hourly partitions line up + perfectly.""" + + model = define_fake_partitioned_model( + {"timestamp": models.DateTimeField()}, {"key": ["timestamp"]} + ) + + schema_editor = connection.schema_editor() + schema_editor.create_partitioned_model(model) + + # that's a monday + with freezegun.freeze_time("2019-1-07"): + manager = PostgresPartitioningManager( + [partition_by_current_time(model, hours=1, count=2)] + ) + manager.plan().apply() + + table = _get_partitioned_table(model) + assert len(table.partitions) == 2 + + model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 0)) + model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 1)) + + with transaction.atomic(): + with pytest.raises(IntegrityError): + model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 2)) + model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 3)) + + with freezegun.freeze_time("2019-1-07"): + manager = PostgresPartitioningManager( + [partition_by_current_time(model, hours=1, count=4)] + ) + manager.plan().apply() + + model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 2)) + model.objects.create(timestamp=datetime.datetime(2019, 1, 7, 3)) + + @pytest.mark.postgres_version(lt=110000) @pytest.mark.parametrize( "kwargs,partition_names", [ (dict(days=2), ["2018_dec_31", "2019_jan_02"]), + (dict(hours=2), ["2019_jan_01_00:00:00", "2019_jan_01_02:00:00"]), (dict(weeks=2), ["2018_week_53", "2019_week_02"]), (dict(months=2), ["2019_jan", "2019_mar"]), (dict(years=2), ["2019", "2021"]), From 97413d4a028449b6ce06d4304d4c366c51155153 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 22:58:41 +0200 Subject: [PATCH 49/55] Fix temporary tablespace directory not existing in Postgres container on CI --- tests/conftest.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 03d01ab3..2df5e273 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,9 @@ -import os import tempfile import uuid import pytest +from django.conf import settings from django.contrib.postgres.signals import register_type_handlers from django.db import connection @@ -35,11 +35,24 @@ def django_db_setup(django_db_setup, django_db_blocker): with django_db_blocker.unblock(): qn = connection.ops.quote_name - with tempfile.TemporaryDirectory() as temp_dir: - if not os.path.exists(temp_dir): - os.makedirs(temp_dir) + db_hostname = settings.DATABASES[connection.alias]["HOST"] + with tempfile.TemporaryDirectory() as temp_dir: with connection.cursor() as cursor: + # If the database is remote, like in a CI environment, make + # sure the temporary directory exists in the container + # that PostgreSQL is running. + # + # Note that this only typically works in CI environments + # where we have utter control to execute arbitary commands. + if db_hostname and db_hostname not in ( + "127.0.0.1", + "localhost", + ): + cursor.execute( + f"COPY (select 1) TO PROGRAM 'mkdir --mode=777 -p {temp_dir}'" + ) + cursor.execute( f"CREATE TABLESPACE {qn(custom_tablespace_name)} LOCATION %s", (temp_dir,), From 663ecf63075a26d28b69c626f66a686d85653d8f Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 23:16:27 +0200 Subject: [PATCH 50/55] Move Docker images to bullseye --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a00cc61e..4549c360 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,7 +10,7 @@ executors: default: "16.0" debiandist: type: string - default: "buster" + default: "bullseye" docker: - image: python:<< parameters.pyversion >>-<< parameters.debiandist >> - image: postgres:<< parameters.pgversion >> From 37d67d759986a77058b9cda43046138c5a8e9db1 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 23:17:13 +0200 Subject: [PATCH 51/55] Add contrib module with utilities that have been helpful over the years --- psqlextra/contrib/README.md | 5 + psqlextra/contrib/__init__.py | 11 + psqlextra/contrib/expressions.py | 47 +++ psqlextra/contrib/model_data_migrator.py | 352 +++++++++++++++++++++++ psqlextra/contrib/static_row.py | 97 +++++++ psqlextra/contrib/transaction.py | 33 +++ 6 files changed, 545 insertions(+) create mode 100644 psqlextra/contrib/README.md create mode 100644 psqlextra/contrib/__init__.py create mode 100644 psqlextra/contrib/expressions.py create mode 100644 psqlextra/contrib/model_data_migrator.py create mode 100644 psqlextra/contrib/static_row.py create mode 100644 psqlextra/contrib/transaction.py diff --git a/psqlextra/contrib/README.md b/psqlextra/contrib/README.md new file mode 100644 index 00000000..296194e6 --- /dev/null +++ b/psqlextra/contrib/README.md @@ -0,0 +1,5 @@ +# psqlextra.contrib + +This module contains a arbitrary collection of utilities and snippets that build on top of core functionality provided by django-postgres-extra. + +This collection is UNTESTED, UNSUPPORTED and UNDOCUMENTED. They are only provided here as an inspiration. Use at your own risk. diff --git a/psqlextra/contrib/__init__.py b/psqlextra/contrib/__init__.py new file mode 100644 index 00000000..97794eb0 --- /dev/null +++ b/psqlextra/contrib/__init__.py @@ -0,0 +1,11 @@ +from .model_data_migrator import PostgresModelDataMigrator +from .static_row import StaticRowQueryCompiler, StaticRowQuerySet +from .transaction import no_transaction + +__all__ = [ + "PostgresModelDataMigrator", + "PostgresModelDataMigratorState" "StaticRowQuery", + "StaticRowQueryCompiler", + "StaticRowQuerySet", + "no_transaction", +] diff --git a/psqlextra/contrib/expressions.py b/psqlextra/contrib/expressions.py new file mode 100644 index 00000000..dfc57f75 --- /dev/null +++ b/psqlextra/contrib/expressions.py @@ -0,0 +1,47 @@ +from django.db import models +from django.db.models.expressions import CombinedExpression, Func + + +class Equals(CombinedExpression): + """Expression that constructs `{lhs} = {rhs}`. + + Used as an alternative to Django's `Q` object when the + left-hand side is a aliased field not known to Django. + """ + + connector: str = "=" + + def __init__(self, lhs, rhs) -> None: + super().__init__( + lhs, self.connector, rhs, output_field=models.BooleanField() + ) + + +class Is(Equals): + """Expression that constructs `{lhs} IS {rhs}`.""" + + connector: str = "IS" + + +class GreaterThen(Equals): + """Expression that constructs `{lhs} > {rhs}`.""" + + connector: str = ">" + + +class LowerThenOrEqual(Equals): + """Expression that constructs `{lhs} <= {rhs}`.""" + + connector: str = "<=" + + +class And(Equals): + """Expression that constructs `{lhs} AND {rhs}`.""" + + connector: str = "AND" + + +class Bool(Func): + """Cast to a boolean.""" + + function = "BOOL" diff --git a/psqlextra/contrib/model_data_migrator.py b/psqlextra/contrib/model_data_migrator.py new file mode 100644 index 00000000..35a2dcd3 --- /dev/null +++ b/psqlextra/contrib/model_data_migrator.py @@ -0,0 +1,352 @@ +# mypy: disable-error-code="attr-defined" + +import json +import os +import time + +from abc import abstractmethod +from contextlib import contextmanager +from dataclasses import dataclass +from datetime import timedelta +from typing import Any, Dict, Type + +from django.db import DEFAULT_DB_ALIAS, connections, models, transaction + +from psqlextra.locking import PostgresTableLockMode, postgres_lock_model +from psqlextra.schema import PostgresSchema +from psqlextra.settings import ( + postgres_prepend_local_search_path, + postgres_set_local, +) + +from .transaction import no_transaction + + +@dataclass +class PostgresModelDataMigratorState: + id: str + work_schema: PostgresSchema + backup_schema: PostgresSchema + default_schema: PostgresSchema + storage_settings: Dict[str, Any] + + +class PostgresModelDataMigrator: + """Helps altering/moving large amounts of data in a table quickly without + interruptions. + + In simple terms: This class temporarily drops all indices + and constraints from a table to speed up writes. + + In complicated terms: + + 1. Create copy of the table without indices or constraints + in a separate schema. + + The clone is made in a separate schema so that there + are no naming conflicts and there is no need to rename + anything. + + 2. Allow the caller to fill the copy. + + This will be an order of magnitude faster because + there are no indices to build or constraints to + statisfy. You are responsible for making sure the + data is ok and will statisfy the constraints when + they come back. + + 3. Add the indices and constraints to the table. + + This takes time, but it's still a lot faster than + the indices being built incrementally. + + 4. Allow the caller to clean up the copied table. + + With the indices back in place, filtering the copied + table should be fast. Perfect time to clean up + some data. + + 5. Vacuum+Analyze the table. + + Vacuuming ensures we don't risk transaction ID + wrap-around and analyzing ensures up-to-date + statistics. + + 6. Start a transaction. + + 7. Lock the real table in EXCLUSIVE mode. + + This blocks writes or modifications to the table, + but does not block readers. + + 8. Allow the caller to move some data from the real table + into the copied one. + + This is the perfect time to copy any data that was + written to the real table since the migration process + began. Since the original table is locked, you can + be sure no more rows are being added or modified. + + 9. Move the original table into a backup schema. + + This allows it to be quickly restored manually + if the migration is broken in any way. + + 10. Move the copied table in place of the real one. + + 11. Commit the transaction, which releases the lock. + + The process is very similiar to how pg_repack rewrites + an entire table without long-running locks on the table. + + Attributes: + model: The model to migrate. + using: Optional name of the database connection to use. + operation_timeout: Maximum amount of time a single statement + can take. + """ + + model: Type[models.Model] + using: str = DEFAULT_DB_ALIAS + operation_timeout: timedelta + + def __init__(self, logger) -> None: + self.logger = logger + self.connection = connections[self.using] + self.schema_editor = self.connection.schema_editor(atomic=False) + + @abstractmethod + def fill_cloned_table_lockless( + self, work_schema: PostgresSchema, default_schema: PostgresSchema + ) -> None: + """Moment to fill the cloned table with data.""" + + @abstractmethod + def clean_cloned_table( + self, work_schema: PostgresSchema, default_schema: PostgresSchema + ) -> None: + """Moment to clean the filled table after it has indices and validated + data.""" + + @abstractmethod + def fill_cloned_table_locked( + self, work_schema: PostgresSchema, default_schema: PostgresSchema + ) -> None: + """Moment to do final cleaning while the original table is locked for + writing.""" + + @no_transaction( + why="The transaction would be too big and some statements cannot be run in a transaction." + ) + def migrate(self) -> PostgresModelDataMigratorState: + start_time = time.time() + + with self.atomic(): + with self.connection.cursor() as cursor: + storage_settings = ( + self.connection.introspection.get_storage_settings( + cursor, self.table_name + ) + ) + + state = PostgresModelDataMigratorState( + id=os.urandom(4).hex(), + work_schema=PostgresSchema.create_random( + f"migrate_{self.table_name}", using=self.using + ), + backup_schema=PostgresSchema.create_random( + f"backup_{self.table_name}", using=self.using + ), + default_schema=PostgresSchema.default, + storage_settings=storage_settings, + ) + + logger = self.logger.bind(id=state.id) + logger.info( + f"Starting migration of {self.table_name}", + data=json.dumps( + { + "work_schema": state.work_schema.name, + "backup_schema": state.backup_schema.name, + "default_schema": state.default_schema.name, + "storage_settings": state.storage_settings, + } + ), + ) + + count = self.model.objects.using(self.using).count() + logger.info(f"Found {count} records in {self.table_name}") + + phases = [ + (self._migrate_phase_1, "cloning and filling table"), + (self._migrate_phase_2, "adding constraints and indexes"), + (self._migrate_phase_3, "cleaning up and vacuuming"), + (self._migrate_phase_4, "swapping"), + ] + + for index, (phase, description) in enumerate(phases): + phase_start_time = time.time() + logger.info( + f"Starting phase #{index + 1} of migrating {self.table_name}: {description}" + ) + phase(state) + logger.info( + f"Finished phase #{index + 1} of migrating {self.table_name}: {description}", + task_time=time.time() - phase_start_time, + ) + + state.work_schema.delete(cascade=True, using=self.using) + + logger.info( + f"Finished migrating {self.table_name}", + task_time=time.time() - start_time, + ) + + return state + + def _migrate_phase_1(self, state: PostgresModelDataMigratorState) -> None: + """Clone the table without constraints or indices.""" + + with self.atomic(): + self.schema_editor.clone_model_structure_to_schema( + self.model, schema_name=state.work_schema.name + ) + + # Disable auto-vacuum on the cloned table to prevent + # it from consuming excessive resources _while_ we're + # writing to it. We're running this manually before + # we turn it back on in the last phase. + with postgres_prepend_local_search_path( + [state.work_schema.name], using=self.using + ): + self.schema_editor.alter_model_storage_setting( + self.model, "autovacuum_enabled", "false" + ) + + # Let the derived class fill our cloned table + self.fill_cloned_table_lockless(state.work_schema, state.default_schema) + + def _migrate_phase_2(self, state: PostgresModelDataMigratorState) -> None: + """Add indices and constraints to the cloned table.""" + + # Add indices and constraints to the temporary table + # This could be speed up by increasing `maintenance_work_mem` + # and `max_parallel_workers_per_gather`, but we won't as + # it'll consume more I/O, potentially disturbing normal traffic. + with self.atomic(): + self.schema_editor.clone_model_constraints_and_indexes_to_schema( + self.model, schema_name=state.work_schema.name + ) + + # Validate foreign keys + # + # The foreign keys have been added in NOT VALID mode so they + # only validate new rows. Validate the existing rows. + # + # This is a two-step process to avoid a AccessExclusiveLock + # on the referenced tables. + with self.atomic(): + self.schema_editor.clone_model_foreign_keys_to_schema( + self.model, schema_name=state.work_schema.name + ) + + def _migrate_phase_3(self, state: PostgresModelDataMigratorState) -> None: + """Clean & finalize the cloned table.""" + + # Let the derived class do some clean up on the temporary + # table now that we have indices and constraints. + with self.atomic(): + self.clean_cloned_table(state.work_schema, state.default_schema) + + # Finalize the copy by vacuuming+analyzing it + # + # VACUUM: There should not be much bloat since the table + # is new, but the clean up phase might have generated some. + # + # We mostly VACUUM to reset the transaction ID and prevent + # transaction ID wraparound. + # + # ANALYZE: The table went from 0 to being filled, by running ANALYZE, + # we update the statistics, allowing the query planner to + # make good decisions. + with postgres_prepend_local_search_path( + [state.work_schema.name], using=self.using + ): + self.schema_editor.vacuum_model(self.model, analyze=True) + + # Re-enable autovacuum on the cloned table + with postgres_prepend_local_search_path( + [state.work_schema.name], using=self.using + ): + autovacuum_enabled = state.storage_settings.get( + "autovacuum_enabled" + ) + if autovacuum_enabled: + self.schema_editor.alter_model_storage_setting( + self.model, "autovacuum_enabled", autovacuum_enabled + ) + else: + self.schema_editor.reset_model_storage_setting( + self.model, "autovacuum_enabled" + ) + + def _migrate_phase_4(self, state: PostgresModelDataMigratorState) -> None: + """Replace the original table with the cloned one.""" + + with self.atomic(): + # Lock the original table for writing so that the caller + # is given a chance to do last-minute moving of data. + postgres_lock_model( + self.model, PostgresTableLockMode.EXCLUSIVE, using=self.using + ) + + # Let derived class finalize the temporary table while the + # original is locked. Not much work should happen here. + self.fill_cloned_table_locked( + state.work_schema, state.default_schema + ) + + # Move the original table into the backup schema. + # Disable autovacuum on it so we don't waste resources + # keeping it clean. + self.schema_editor.alter_model_storage_setting( + self.model, "autovacuum_enabled", "false" + ) + self.schema_editor.alter_model_schema( + self.model, state.backup_schema.name + ) + + # Move the cloned table in place of the original + with postgres_prepend_local_search_path( + [state.work_schema.name], using=self.using + ): + self.schema_editor.alter_model_schema( + self.model, state.default_schema.name + ) + + @property + def model_name(self) -> str: + return self.model.__name__ + + @property + def table_name(self) -> str: + return self.model._meta.db_table + + @contextmanager + def atomic(self): + """Creates a atomic transaction with run-time parameters tuned for a + live migration. + + - Statement/idle timeout set to prevent runaway queries + from continuing long after the migrator was killed. + - No parallel works to keep I/O under control. + """ + + with transaction.atomic(durable=True, using=self.using): + with postgres_set_local( + statement_timeout=f"{self.operation_timeout.total_seconds()}s", + idle_in_transaction_session_timeout=f"{self.operation_timeout.total_seconds()}s", + max_parallel_workers_per_gather=0, + using=self.using, + ): + yield diff --git a/psqlextra/contrib/static_row.py b/psqlextra/contrib/static_row.py new file mode 100644 index 00000000..a89905a1 --- /dev/null +++ b/psqlextra/contrib/static_row.py @@ -0,0 +1,97 @@ +from typing import Any, List, Optional, Tuple, Type, TypeVar, cast + +from django.db import DEFAULT_DB_ALIAS, connections, models +from django.db.models.expressions import Value +from django.db.models.query import RawQuerySet +from django.db.models.sql import Query +from django.db.models.sql.compiler import SQLCompiler + +TModel = TypeVar("TModel", bound=models.Model) + + +class StaticRowQueryCompiler(SQLCompiler): + has_extra_select = False + + def as_sql(self, *args, **kwargs): + cols = [] + params = [] + + select, _, _ = self.get_select() + + for _, (s_sql, s_params), s_alias in select: + cols.append( + "%s AS %s" + % ( + s_sql, + self.connection.ops.quote_name(s_alias), + ) + ) + + params.extend(s_params) + + return f"SELECT {', '.join(cols)}", tuple(params) + + +class StaticRowQuery(Query): + def __init__( + self, model: Type[models.Model], using: str = DEFAULT_DB_ALIAS + ): + self.using = using + + super().__init__(model) + + def get_columns(self): + return list(self.annotations.keys()) + + def get_compiler( + self, using: Optional[str] = None, connection=None, elide_empty=True + ): + using = using or self.using + + compiler = StaticRowQueryCompiler( + self, connection or connections[using], using + ) + compiler.setup_query() + + return compiler + + def __iter__(self): + compiler = self.get_compiler() + + cursor = compiler.connection.cursor() + cursor.execute(*compiler.as_sql()) + + return iter(cursor) + + +class StaticRowQuerySet(RawQuerySet): + """Query set that compiles queries that don't select from anything and have + their values hard-coded. + + Example: + + >>> SELECT 'mystring' AS something, -1 AS somethingelse; + + This is used when you want to add some rows to a result + set using UNION in SQL. + """ + + def __init__( + self, + model: Type[models.Model], + row: List[Tuple[str, Value]], + using: str = DEFAULT_DB_ALIAS, + ) -> None: + query = StaticRowQuery(model, using) + query.default_cols = False + query.annotations = dict(row) + + sql, params = query.sql_with_params() + + # cast(Tuple[Any], params) because `RawQuerySet.__init_` is mistyped + super().__init__( + raw_query=sql, + model=model, + query=query, + params=cast(Tuple[Any], params), + ) diff --git a/psqlextra/contrib/transaction.py b/psqlextra/contrib/transaction.py new file mode 100644 index 00000000..796246c7 --- /dev/null +++ b/psqlextra/contrib/transaction.py @@ -0,0 +1,33 @@ +from contextlib import contextmanager +from typing import Optional + +from django.conf import settings +from django.core.exceptions import SuspiciousOperation +from django.db import DEFAULT_DB_ALIAS, connections + + +def _is_in_test(): + return ( + getattr(settings, "TEST_MODE", False) + or getattr(settings, "TESTING", False) + or getattr(settings, "TEST", False) + ) + + +@contextmanager +def no_transaction(*, why: str, using: Optional[str] = None): + """Prevents a method or a block from running in a database transaction.""" + + # During tests, allow one level of transaction.atomic(..) nesting + # because tests themselves run in a transaction. If there's only + # one level of nesting, it's from the test itself and the code + # would actually run without a transaction outside the test. + + connection = connections[using or DEFAULT_DB_ALIAS] + + if connection.in_atomic_block and not ( + _is_in_test() and len(connection.savepoint_ids) <= 1 + ): + raise SuspiciousOperation(f"Unexpected database transaction: {why}") + + yield From 7eccfd3e6fa75ddde68fd57143e013a7ca509e28 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 23:25:44 +0200 Subject: [PATCH 52/55] Add additional check to try to distingush between running tests on CI vs locally --- tests/conftest.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 2df5e273..25996fab 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -35,6 +35,7 @@ def django_db_setup(django_db_setup, django_db_blocker): with django_db_blocker.unblock(): qn = connection.ops.quote_name + db_user = settings.DATABASES[connection.alias]["USER"] db_hostname = settings.DATABASES[connection.alias]["HOST"] with tempfile.TemporaryDirectory() as temp_dir: @@ -45,9 +46,13 @@ def django_db_setup(django_db_setup, django_db_blocker): # # Note that this only typically works in CI environments # where we have utter control to execute arbitary commands. - if db_hostname and db_hostname not in ( - "127.0.0.1", - "localhost", + if db_user or ( + db_hostname + and db_hostname + not in ( + "127.0.0.1", + "localhost", + ) ): cursor.execute( f"COPY (select 1) TO PROGRAM 'mkdir --mode=777 -p {temp_dir}'" From f59417abaf066b65ce673124a718223244fb83b4 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 23:27:28 +0200 Subject: [PATCH 53/55] Use explicit flag to detect database in container during tests --- .circleci/config.yml | 1 + settings.py | 2 ++ tests/conftest.py | 13 +------------ 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4549c360..c926b2c5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -45,6 +45,7 @@ commands: command: tox --listenvs | grep ^py<< parameters.pyversion >> | circleci tests split | xargs -n 1 tox -e environment: DATABASE_URL: 'postgres://psqlextra:psqlextra@localhost:5432/psqlextra' + DATABASE_IN_CONTAINER: 'true' jobs: test-python36: diff --git a/settings.py b/settings.py index 7ece1712..2a5e0fac 100644 --- a/settings.py +++ b/settings.py @@ -43,3 +43,5 @@ def _parse_db_url(/service/url: str): USE_TZ = True TIME_ZONE = 'UTC' + +DATABASE_IN_CONTAINER = os.environ.get('DATABASE_IN_CONTAINER') == 'true' diff --git a/tests/conftest.py b/tests/conftest.py index 25996fab..9620d123 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -34,10 +34,6 @@ def django_db_setup(django_db_setup, django_db_blocker): with django_db_blocker.unblock(): qn = connection.ops.quote_name - - db_user = settings.DATABASES[connection.alias]["USER"] - db_hostname = settings.DATABASES[connection.alias]["HOST"] - with tempfile.TemporaryDirectory() as temp_dir: with connection.cursor() as cursor: # If the database is remote, like in a CI environment, make @@ -46,14 +42,7 @@ def django_db_setup(django_db_setup, django_db_blocker): # # Note that this only typically works in CI environments # where we have utter control to execute arbitary commands. - if db_user or ( - db_hostname - and db_hostname - not in ( - "127.0.0.1", - "localhost", - ) - ): + if settings.DATABASE_IN_CONTAINER: cursor.execute( f"COPY (select 1) TO PROGRAM 'mkdir --mode=777 -p {temp_dir}'" ) From d7cd98e4f87125941ea120475b86fee763c6f04a Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sun, 5 Oct 2025 23:30:44 +0200 Subject: [PATCH 54/55] Pass through `DATABASE_IN_CONTAINER` in tox --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 697d1c44..a5a33ed8 100644 --- a/tox.ini +++ b/tox.ini @@ -27,5 +27,5 @@ deps = .[test] setenv = DJANGO_SETTINGS_MODULE=settings -passenv = DATABASE_URL +passenv = DATABASE_URL, DATABASE_IN_CONTAINER commands = poe test From 5b1f9f759bae6166e62cf57d898e5b578bba9b13 Mon Sep 17 00:00:00 2001 From: Swen Kooij Date: Sat, 18 Oct 2025 22:07:10 +0200 Subject: [PATCH 55/55] Make tox v3.x pass `DATABASE_URL` into env --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index c926b2c5..49eac3a8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -46,6 +46,7 @@ commands: environment: DATABASE_URL: 'postgres://psqlextra:psqlextra@localhost:5432/psqlextra' DATABASE_IN_CONTAINER: 'true' + TOX_TESTENV_PASSENV: 'DATABASE_URL DATABASE_IN_CONTAINER' jobs: test-python36: