From 46208923446651a58c49115d17b4de6da4934c71 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 16 Feb 2022 23:15:08 +0200 Subject: [PATCH 001/108] Fix bogus log message when starting from a cleanly shut down state. In commit 70e81861fa to split xlog.c, I moved the startup code that updates the state in the control file and prints out the "database system was not properly shut down" message to the log, but I accidentally removed the "if (InRecovery)" check around it. As a result, that message was printed even if the system was cleanly shut down, also during 'initdb'. Discussion: https://www.postgresql.org/message-id/3357075.1645031062@sss.pgh.pa.us --- src/backend/access/transam/xlogrecovery.c | 118 ++++++++++++---------- 1 file changed, 62 insertions(+), 56 deletions(-) diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index d5269ede80..f9f212680b 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -840,69 +840,75 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr, } /* - * Update pg_control to show that we are recovering and to show the - * selected checkpoint as the place we are starting from. We also mark - * pg_control with any minimum recovery stop point obtained from a backup - * history file. + * If recovery is needed, update our in-memory copy of pg_control to show + * that we are recovering and to show the selected checkpoint as the place + * we are starting from. We also mark pg_control with any minimum recovery + * stop point obtained from a backup history file. + * + * We don't write the changes to disk yet, though. Only do that after + * initializing various subsystems. */ - if (InArchiveRecovery) - { - ControlFile->state = DB_IN_ARCHIVE_RECOVERY; - } - else + if (InRecovery) { - ereport(LOG, - (errmsg("database system was not properly shut down; " - "automatic recovery in progress"))); - if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID) + if (InArchiveRecovery) + { + ControlFile->state = DB_IN_ARCHIVE_RECOVERY; + } + else + { ereport(LOG, - (errmsg("crash recovery starts in timeline %u " - "and has target timeline %u", - ControlFile->checkPointCopy.ThisTimeLineID, - recoveryTargetTLI))); - ControlFile->state = DB_IN_CRASH_RECOVERY; - } - ControlFile->checkPoint = CheckPointLoc; - ControlFile->checkPointCopy = checkPoint; - if (InArchiveRecovery) - { - /* initialize minRecoveryPoint if not set yet */ - if (ControlFile->minRecoveryPoint < checkPoint.redo) + (errmsg("database system was not properly shut down; " + "automatic recovery in progress"))); + if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID) + ereport(LOG, + (errmsg("crash recovery starts in timeline %u " + "and has target timeline %u", + ControlFile->checkPointCopy.ThisTimeLineID, + recoveryTargetTLI))); + ControlFile->state = DB_IN_CRASH_RECOVERY; + } + ControlFile->checkPoint = CheckPointLoc; + ControlFile->checkPointCopy = checkPoint; + if (InArchiveRecovery) { - ControlFile->minRecoveryPoint = checkPoint.redo; - ControlFile->minRecoveryPointTLI = checkPoint.ThisTimeLineID; + /* initialize minRecoveryPoint if not set yet */ + if (ControlFile->minRecoveryPoint < checkPoint.redo) + { + ControlFile->minRecoveryPoint = checkPoint.redo; + ControlFile->minRecoveryPointTLI = checkPoint.ThisTimeLineID; + } } - } - - /* - * Set backupStartPoint if we're starting recovery from a base backup. - * - * Also set backupEndPoint and use minRecoveryPoint as the backup end - * location if we're starting recovery from a base backup which was taken - * from a standby. In this case, the database system status in pg_control - * must indicate that the database was already in recovery. Usually that - * will be DB_IN_ARCHIVE_RECOVERY but also can be - * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted before - * reaching this point; e.g. because restore_command or primary_conninfo - * were faulty. - * - * Any other state indicates that the backup somehow became corrupted and - * we can't sensibly continue with recovery. - */ - if (haveBackupLabel) - { - ControlFile->backupStartPoint = checkPoint.redo; - ControlFile->backupEndRequired = backupEndRequired; - if (backupFromStandby) + /* + * Set backupStartPoint if we're starting recovery from a base backup. + * + * Also set backupEndPoint and use minRecoveryPoint as the backup end + * location if we're starting recovery from a base backup which was + * taken from a standby. In this case, the database system status in + * pg_control must indicate that the database was already in recovery. + * Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be + * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted + * before reaching this point; e.g. because restore_command or + * primary_conninfo were faulty. + * + * Any other state indicates that the backup somehow became corrupted + * and we can't sensibly continue with recovery. + */ + if (haveBackupLabel) { - if (dbstate_at_startup != DB_IN_ARCHIVE_RECOVERY && - dbstate_at_startup != DB_SHUTDOWNED_IN_RECOVERY) - ereport(FATAL, - (errmsg("backup_label contains data inconsistent with control file"), - errhint("This means that the backup is corrupted and you will " - "have to use another backup for recovery."))); - ControlFile->backupEndPoint = ControlFile->minRecoveryPoint; + ControlFile->backupStartPoint = checkPoint.redo; + ControlFile->backupEndRequired = backupEndRequired; + + if (backupFromStandby) + { + if (dbstate_at_startup != DB_IN_ARCHIVE_RECOVERY && + dbstate_at_startup != DB_SHUTDOWNED_IN_RECOVERY) + ereport(FATAL, + (errmsg("backup_label contains data inconsistent with control file"), + errhint("This means that the backup is corrupted and you will " + "have to use another backup for recovery."))); + ControlFile->backupEndPoint = ControlFile->minRecoveryPoint; + } } } From d61a361d1aef1231db61162d99b635b89c73169d Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Thu, 17 Feb 2022 09:52:02 +0900 Subject: [PATCH 002/108] Remove all traces of tuplestore_donestoring() in the C code This routine is a no-op since dd04e95 from 2003, with a macro kept around for compatibility purposes. This has led to the same code patterns being copy-pasted around for no effect, sometimes in confusing ways like in pg_logical_slot_get_changes_guts() from logical.c where the code was actually incorrect. This issue has been discussed on two different threads recently, so rather than living with this legacy, remove any uses of this routine in the C code to simplify things. The compatibility macro is kept to avoid breaking any out-of-core modules that depend on it. Reported-by: Tatsuhito Kasahara, Justin Pryzby Author: Tatsuhito Kasahara Discussion: https://postgr.es/m/20211217200419.GQ17618@telsasoft.com Discussion: https://postgr.es/m/CAP0=ZVJeeYfAeRfmzqAF2Lumdiv4S4FewyBnZd4DPTrsSQKJKw@mail.gmail.com --- contrib/dblink/dblink.c | 5 ----- contrib/pageinspect/brinfuncs.c | 2 -- contrib/pg_stat_statements/pg_stat_statements.c | 2 -- contrib/postgres_fdw/connection.c | 7 ------- contrib/tablefunc/tablefunc.c | 2 -- contrib/xml2/xpath.c | 2 -- src/backend/access/transam/xlogfuncs.c | 1 - src/backend/commands/event_trigger.c | 6 ------ src/backend/commands/extension.c | 9 --------- src/backend/commands/prepare.c | 3 --- src/backend/foreign/foreign.c | 3 --- src/backend/replication/logical/launcher.c | 3 --- src/backend/replication/logical/logicalfuncs.c | 2 -- src/backend/replication/logical/origin.c | 2 -- src/backend/replication/slotfuncs.c | 2 -- src/backend/replication/walsender.c | 3 --- src/backend/storage/ipc/shmem.c | 2 -- src/backend/utils/adt/mcxtfuncs.c | 3 --- src/backend/utils/adt/pgstatfuncs.c | 9 --------- src/backend/utils/adt/varlena.c | 2 -- src/backend/utils/misc/guc.c | 2 -- src/backend/utils/misc/pg_config.c | 1 - src/backend/utils/mmgr/portalmem.c | 3 --- src/include/utils/tuplestore.h | 2 +- 24 files changed, 1 insertion(+), 77 deletions(-) diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 5a37508c4b..efc4c94301 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -1005,8 +1005,6 @@ materializeResult(FunctionCallInfo fcinfo, PGconn *conn, PGresult *res) /* clean up GUC settings, if we changed any */ restoreLocalGucs(nestlevel); - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); } } PG_FINALLY(); @@ -1988,9 +1986,6 @@ dblink_get_notify(PG_FUNCTION_ARGS) PQconsumeInput(conn); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c index f1e64a39ef..50892b5cc2 100644 --- a/contrib/pageinspect/brinfuncs.c +++ b/contrib/pageinspect/brinfuncs.c @@ -325,9 +325,7 @@ brin_page_items(PG_FUNCTION_ARGS) break; } - /* clean up and return the tuplestore */ brin_free_desc(bdesc); - tuplestore_donestoring(tupstore); index_close(indexRel, AccessShareLock); return (Datum) 0; diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 082bfa8f77..9d7d0812ac 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -1803,13 +1803,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ LWLockRelease(pgss->lock); if (qbuffer) free(qbuffer); - tuplestore_donestoring(tupstore); } /* Number of output arguments (columns) for pg_stat_statements_info */ diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 29fcb6a76e..f753c6e232 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -1508,12 +1508,7 @@ postgres_fdw_get_connections(PG_FUNCTION_ARGS) /* If cache doesn't exist, we return no records */ if (!ConnectionHash) - { - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - PG_RETURN_VOID(); - } hash_seq_init(&scan, ConnectionHash); while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) @@ -1578,8 +1573,6 @@ postgres_fdw_get_connections(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); PG_RETURN_VOID(); } diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index afbbdfcf86..e308228bde 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -943,8 +943,6 @@ get_crosstab_tuplestore(char *sql, /* internal error */ elog(ERROR, "get_crosstab_tuplestore: SPI_finish() failed"); - tuplestore_donestoring(tupstore); - return tupstore; } diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index 7fdde8eb51..a2e5fb54e2 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -783,8 +783,6 @@ xpath_table(PG_FUNCTION_ARGS) pg_xml_done(xmlerrcxt, false); - tuplestore_donestoring(tupstore); - SPI_finish(); rsinfo->setResult = tupstore; diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index 2f900533cd..12e2bf4135 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -252,7 +252,6 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS) values[0] = LSNGetDatum(stoppoint); tuplestore_putvalues(tupstore, tupdesc, values, nulls); - tuplestore_donestoring(tupstore); return (Datum) 0; } diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 93c2099735..1e8587502e 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -1401,9 +1401,6 @@ pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } @@ -2061,9 +2058,6 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - PG_RETURN_VOID(); } diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index a2e77c418a..0e04304cb0 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -2021,9 +2021,6 @@ pg_available_extensions(PG_FUNCTION_ARGS) FreeDir(dir); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } @@ -2112,9 +2109,6 @@ pg_available_extension_versions(PG_FUNCTION_ARGS) FreeDir(dir); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } @@ -2417,9 +2411,6 @@ pg_extension_update_paths(PG_FUNCTION_ARGS) } } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 206d2bbbf9..e0c985ef8b 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -778,9 +778,6 @@ pg_prepared_statement(PG_FUNCTION_ARGS) } } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c index 294e22c78c..d910bc2fbe 100644 --- a/src/backend/foreign/foreign.c +++ b/src/backend/foreign/foreign.c @@ -555,9 +555,6 @@ deflist_to_tuplestore(ReturnSetInfo *rsinfo, List *options) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - MemoryContextSwitchTo(oldcontext); } diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 7b473903a6..5a68d6dead 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -1022,8 +1022,5 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS) LWLockRelease(LogicalRepWorkerLock); - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c index c29e82307f..3609fa7d5b 100644 --- a/src/backend/replication/logical/logicalfuncs.c +++ b/src/backend/replication/logical/logicalfuncs.c @@ -296,8 +296,6 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin CHECK_FOR_INTERRUPTS(); } - tuplestore_donestoring(tupstore); - /* * Logical decoding could have clobbered CurrentResourceOwner during * transaction management, so restore the executor's value. (This is diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index e91fa93d03..76055a8a03 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -1568,8 +1568,6 @@ pg_show_replication_origin_status(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - tuplestore_donestoring(tupstore); - LWLockRelease(ReplicationOriginLock); #undef REPLICATION_ORIGIN_PROGRESS_COLS diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 5149ebccb0..886899afd2 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -436,8 +436,6 @@ pg_get_replication_slots(PG_FUNCTION_ARGS) LWLockRelease(ReplicationSlotControlLock); - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index a1dadd4c6a..5a718b1fe9 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -3580,9 +3580,6 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index c682775db4..1f023a3460 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -605,7 +605,5 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) LWLockRelease(ShmemIndexLock); - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c index 28cb9d3ff1..c7c95adf97 100644 --- a/src/backend/utils/adt/mcxtfuncs.c +++ b/src/backend/utils/adt/mcxtfuncs.c @@ -152,9 +152,6 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS) PutMemoryContextsStatsTupleStore(tupstore, tupdesc, TopMemoryContext, NULL, 0); - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 15cb17ace4..30e8dfa7c1 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -555,9 +555,6 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } @@ -953,9 +950,6 @@ pg_stat_get_activity(PG_FUNCTION_ARGS) break; } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } @@ -1936,9 +1930,6 @@ pg_stat_get_slru(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index b73cebfdb5..eda9c1e42c 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -4855,8 +4855,6 @@ text_to_table(PG_FUNCTION_ARGS) (void) split_text(fcinfo, &tstate); - tuplestore_donestoring(tstate.tupstore); - rsi->returnMode = SFRM_Materialize; rsi->setResult = tstate.tupstore; rsi->setDesc = tstate.tupdesc; diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 568ac62c2a..9d0208ec98 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -10206,8 +10206,6 @@ show_all_file_settings(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/utils/misc/pg_config.c b/src/backend/utils/misc/pg_config.c index 7a13212f99..d916d7b2c4 100644 --- a/src/backend/utils/misc/pg_config.c +++ b/src/backend/utils/misc/pg_config.c @@ -85,7 +85,6 @@ pg_config(PG_FUNCTION_ARGS) */ ReleaseTupleDesc(tupdesc); - tuplestore_donestoring(tupstore); rsinfo->setResult = tupstore; /* diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 236f450a2b..7885344164 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -1204,9 +1204,6 @@ pg_cursor(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; diff --git a/src/include/utils/tuplestore.h b/src/include/utils/tuplestore.h index 399a8493cf..01716fb44e 100644 --- a/src/include/utils/tuplestore.h +++ b/src/include/utils/tuplestore.h @@ -56,7 +56,7 @@ extern void tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple); extern void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, Datum *values, bool *isnull); -/* tuplestore_donestoring() used to be required, but is no longer used */ +/* Backwards compatibility macro */ #define tuplestore_donestoring(state) ((void) 0) extern int tuplestore_alloc_read_pointer(Tuplestorestate *state, int eflags); From 74388a1ac36d2f0206c5477eeddc636d7947a5a4 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Wed, 16 Feb 2022 17:15:50 -0800 Subject: [PATCH 003/108] Avoid VACUUM reltuples distortion. Add a heuristic that avoids distortion in the pg_class.reltuples estimates used by VACUUM. Without the heuristic, successive manually run VACUUM commands (run against a table that is never modified after initial bulk loading) will scan the same page in each VACUUM operation. Eventually pg_class.reltuples may reach the point where one single heap page is accidentally considered highly representative of the entire table. This is likely to be completely wrong, since the last heap page typically has fewer tuples than average for the table. It's not obvious that this was a problem prior to commit 44fa8488, which made vacuumlazy.c consistently scan the last heap page (even when it is all-visible in the visibility map). It seems possible that there were more subtle variants of the same problem that went unnoticed for quite some time, though. Commit 44fa8488 simplified certain aspects of when and how relation truncation was considered, but it did not introduce the "scan the last page" behavior. Essentially the same behavior was introduced much earlier, in commit e8429082. It was conditioned on whether or not truncation looked promising towards the end of the initial heap pass by VACUUM until recently, which was at least somewhat protective. That doesn't seem like something that we should be relying on, though. Author: Peter Geoghegan Discussion: https://postgr.es/m/CAH2-WzkNKORurux459M64mR63Aw4Jq7MBRVcX=CvALqN3A88WA@mail.gmail.com --- src/backend/commands/vacuum.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index b6767a5ff8..50a4a612e5 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -1238,6 +1238,25 @@ vac_estimate_reltuples(Relation relation, if (scanned_pages == 0) return old_rel_tuples; + /* + * When successive VACUUM commands scan the same few pages again and + * again, without anything from the table really changing, there is a risk + * that our beliefs about tuple density will gradually become distorted. + * It's particularly important to avoid becoming confused in this way due + * to vacuumlazy.c implementation details. For example, the tendency for + * our caller to always scan the last heap page should not ever cause us + * to believe that every page in the table must be just like the last + * page. + * + * We apply a heuristic to avoid these problems: if the relation is + * exactly the same size as it was at the end of the last VACUUM, and only + * a few of its pages (less than a quasi-arbitrary threshold of 2%) were + * scanned by this VACUUM, assume that reltuples has not changed at all. + */ + if (old_rel_pages == total_pages && + scanned_pages < (double) total_pages * 0.02) + return old_rel_tuples; + /* * If old density is unknown, we can't do much except scale up * scanned_tuples to match total_pages. From 8f388f6f554b113f25a53fe3237238d2c58ed1eb Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Wed, 16 Feb 2022 18:41:52 -0800 Subject: [PATCH 004/108] Increase hash_mem_multiplier default to 2.0. Double the default setting for hash_mem_multiplier, from 1.0 to 2.0. This setting makes hash-based executor nodes use twice the usual work_mem limit. The PostgreSQL 15 release notes should have a compatibility note about this change. Author: Peter Geoghegan Discussion: https://postgr.es/m/CAH2-Wzndc_ROk6CY-bC6p9O53q974Y0Ey4WX8jcPbuTZYM4Q3A@mail.gmail.com --- doc/src/sgml/config.sgml | 7 +++---- src/backend/utils/init/globals.c | 2 +- src/backend/utils/misc/guc.c | 2 +- src/backend/utils/misc/postgresql.conf.sample | 2 +- src/test/regress/expected/groupingsets.out | 2 ++ src/test/regress/expected/join_hash.out | 18 ++++++++++++++++++ src/test/regress/expected/memoize.out | 4 +++- src/test/regress/sql/groupingsets.sql | 2 ++ src/test/regress/sql/join_hash.sql | 18 ++++++++++++++++++ src/test/regress/sql/memoize.sql | 4 +++- 10 files changed, 52 insertions(+), 9 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 53b361e7a9..d99bf38e67 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -1849,9 +1849,8 @@ include_dir 'conf.d' operations can use. The final limit is determined by multiplying work_mem by hash_mem_multiplier. The default value is - 1.0, which makes hash-based operations subject to the same - simple work_mem maximum as sort-based - operations. + 2.0, which makes hash-based operations use twice the usual + work_mem base amount. Consider increasing hash_mem_multiplier in @@ -1859,7 +1858,7 @@ include_dir 'conf.d' occurrence, especially when simply increasing work_mem results in memory pressure (memory pressure typically takes the form of intermittent out of - memory errors). A setting of 1.5 or 2.0 may be effective with + memory errors). The default setting of 2.0 is often effective with mixed workloads. Higher settings in the range of 2.0 - 8.0 or more may be effective in environments where work_mem has already been increased to 40MB diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c index c26a1a73df..3419c099b2 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -122,7 +122,7 @@ int IntervalStyle = INTSTYLE_POSTGRES; bool enableFsync = true; bool allowSystemTableMods = false; int work_mem = 4096; -double hash_mem_multiplier = 1.0; +double hash_mem_multiplier = 2.0; int maintenance_work_mem = 65536; int max_parallel_maintenance_workers = 2; diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 9d0208ec98..01f373815e 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -3762,7 +3762,7 @@ static struct config_real ConfigureNamesReal[] = GUC_EXPLAIN }, &hash_mem_multiplier, - 1.0, 1.0, 1000.0, + 2.0, 1.0, 1000.0, NULL, NULL, NULL }, diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index 56d0bee6d9..4a094bb38b 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -136,7 +136,7 @@ # Caution: it is not advisable to set max_prepared_transactions nonzero unless # you actively intend to use prepared transactions. #work_mem = 4MB # min 64kB -#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem #maintenance_work_mem = 64MB # min 1MB #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem #logical_decoding_work_mem = 64MB # min 64kB diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out index 4c467c1b15..58a25b691a 100644 --- a/src/test/regress/expected/groupingsets.out +++ b/src/test/regress/expected/groupingsets.out @@ -1574,6 +1574,7 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou -- test the knapsack set enable_indexscan = false; +set hash_mem_multiplier = 1.0; set work_mem = '64kB'; explain (costs off) select unique1, @@ -1919,6 +1920,7 @@ select g100, g10, sum(g::numeric), count(*), max(g::text) from gs_data_1 group by cube (g1000, g100,g10); set enable_sort = true; set work_mem to default; +set hash_mem_multiplier to default; -- Compare results (select * from gs_hash_1 except select * from gs_group_1) union all diff --git a/src/test/regress/expected/join_hash.out b/src/test/regress/expected/join_hash.out index 3a91c144a2..3ec07bc1af 100644 --- a/src/test/regress/expected/join_hash.out +++ b/src/test/regress/expected/join_hash.out @@ -86,6 +86,7 @@ alter table wide set (parallel_workers = 2); savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) from simple r join simple s using (id); QUERY PLAN @@ -119,6 +120,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join simple s using (id); @@ -156,6 +158,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join simple s using (id); @@ -196,6 +199,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) from simple r join simple s using (id); QUERY PLAN @@ -229,6 +233,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join simple s using (id); @@ -266,6 +271,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '192kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join simple s using (id); @@ -307,6 +313,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); QUERY PLAN @@ -340,6 +347,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join bigger_than_it_looks s using (id); @@ -377,6 +385,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 1; set local work_mem = '192kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join bigger_than_it_looks s using (id); @@ -419,6 +428,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) from simple r join extremely_skewed s using (id); QUERY PLAN @@ -451,6 +461,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join extremely_skewed s using (id); @@ -486,6 +497,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 1; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join extremely_skewed s using (id); @@ -523,6 +535,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; set local parallel_leader_participation = off; select * from hash_join_batches( $$ @@ -551,6 +564,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '64kB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -602,6 +616,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '4MB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -653,6 +668,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '64kB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -704,6 +720,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '4MB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -843,6 +860,7 @@ savepoint settings; set max_parallel_workers_per_gather = 2; set enable_parallel_hash = on; set work_mem = '128kB'; +set hash_mem_multiplier = 1.0; explain (costs off) select length(max(s.t)) from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); diff --git a/src/test/regress/expected/memoize.out b/src/test/regress/expected/memoize.out index 4ca0bd1f1e..00438eb1ea 100644 --- a/src/test/regress/expected/memoize.out +++ b/src/test/regress/expected/memoize.out @@ -90,8 +90,9 @@ WHERE t1.unique1 < 1000; 1000 | 9.5000000000000000 (1 row) --- Reduce work_mem so that we see some cache evictions +-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions SET work_mem TO '64kB'; +SET hash_mem_multiplier TO 1.0; SET enable_mergejoin TO off; -- Ensure we get some evictions. We're unable to validate the hits and misses -- here as the number of entries that fit in the cache at once will vary @@ -238,6 +239,7 @@ WHERE unique1 < 3 RESET enable_seqscan; RESET enable_mergejoin; RESET work_mem; +RESET hash_mem_multiplier; RESET enable_bitmapscan; RESET enable_hashjoin; -- Test parallel plans with Memoize diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql index 3944944704..473d21f6b9 100644 --- a/src/test/regress/sql/groupingsets.sql +++ b/src/test/regress/sql/groupingsets.sql @@ -424,6 +424,7 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou -- test the knapsack set enable_indexscan = false; +set hash_mem_multiplier = 1.0; set work_mem = '64kB'; explain (costs off) select unique1, @@ -519,6 +520,7 @@ from gs_data_1 group by cube (g1000, g100,g10); set enable_sort = true; set work_mem to default; +set hash_mem_multiplier to default; -- Compare results diff --git a/src/test/regress/sql/join_hash.sql b/src/test/regress/sql/join_hash.sql index 68c1a8c7b6..77dbc182d5 100644 --- a/src/test/regress/sql/join_hash.sql +++ b/src/test/regress/sql/join_hash.sql @@ -95,6 +95,7 @@ alter table wide set (parallel_workers = 2); savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) from simple r join simple s using (id); select count(*) from simple r join simple s using (id); @@ -109,6 +110,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join simple s using (id); @@ -124,6 +126,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join simple s using (id); @@ -143,6 +146,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) from simple r join simple s using (id); select count(*) from simple r join simple s using (id); @@ -157,6 +161,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join simple s using (id); @@ -172,6 +177,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '192kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join simple s using (id); @@ -192,6 +198,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); @@ -206,6 +213,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join bigger_than_it_looks s using (id); @@ -221,6 +229,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 1; set local work_mem = '192kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join bigger_than_it_looks s using (id); @@ -242,6 +251,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 0; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; explain (costs off) select count(*) from simple r join extremely_skewed s using (id); select count(*) from simple r join extremely_skewed s using (id); @@ -255,6 +265,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = off; explain (costs off) select count(*) from simple r join extremely_skewed s using (id); @@ -269,6 +280,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 1; set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; set local enable_parallel_hash = on; explain (costs off) select count(*) from simple r join extremely_skewed s using (id); @@ -285,6 +297,7 @@ rollback to settings; savepoint settings; set local max_parallel_workers_per_gather = 2; set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; set local parallel_leader_participation = off; select * from hash_join_batches( $$ @@ -311,6 +324,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '64kB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -338,6 +352,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '4MB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -365,6 +380,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '64kB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -392,6 +408,7 @@ set max_parallel_workers_per_gather = 2; set enable_material = off; set enable_mergejoin = off; set work_mem = '4MB'; +set hash_mem_multiplier = 1.0; explain (costs off) select count(*) from join_foo left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss @@ -454,6 +471,7 @@ savepoint settings; set max_parallel_workers_per_gather = 2; set enable_parallel_hash = on; set work_mem = '128kB'; +set hash_mem_multiplier = 1.0; explain (costs off) select length(max(s.t)) from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); diff --git a/src/test/regress/sql/memoize.sql b/src/test/regress/sql/memoize.sql index c6ed5a2aa6..0979bcdf76 100644 --- a/src/test/regress/sql/memoize.sql +++ b/src/test/regress/sql/memoize.sql @@ -55,8 +55,9 @@ SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 WHERE t1.unique1 < 1000; --- Reduce work_mem so that we see some cache evictions +-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions SET work_mem TO '64kB'; +SET hash_mem_multiplier TO 1.0; SET enable_mergejoin TO off; -- Ensure we get some evictions. We're unable to validate the hits and misses -- here as the number of entries that fit in the cache at once will vary @@ -126,6 +127,7 @@ WHERE unique1 < 3 RESET enable_seqscan; RESET enable_mergejoin; RESET work_mem; +RESET hash_mem_multiplier; RESET enable_bitmapscan; RESET enable_hashjoin; From 19252e8ec938bf07897c1519f367d0467a39242c Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Wed, 16 Feb 2022 22:47:35 -0800 Subject: [PATCH 005/108] plpython: Reject Python 2 during build configuration. Python 2.7 went EOL 2020-01-01 and the support for Python 2 requires a fair bit of infrastructure. Therefore we are removing Python 2 support in plpython. This patch just rejects Python 2 during configure / mkvcbuild.pl. Future commits will remove the code and infrastructure for Python 2 support and adjust more of the documentation. This way we can see the buildfarm state after the removal sooner and we can be sure that failures are due to desupporting Python 2, rather than caused by infrastructure cleanup. Reviewed-By: Peter Eisentraut Discussion: https://postgr.es/m/20211031184548.g4sxfe47n2kyi55r@alap3.anarazel.de --- config/python.m4 | 9 ++++++--- configure | 6 +++--- doc/src/sgml/install-windows.sgml | 2 +- doc/src/sgml/installation.sgml | 18 +++++------------- src/tools/msvc/Mkvcbuild.pm | 4 ++++ 5 files changed, 19 insertions(+), 20 deletions(-) diff --git a/config/python.m4 b/config/python.m4 index c7310ee37d..52f34070dd 100644 --- a/config/python.m4 +++ b/config/python.m4 @@ -9,6 +9,9 @@ # Look for Python and set the output variable 'PYTHON' if found, # fail otherwise. # +# Since we are supporting only Python 3.x, prefer python3 to plain python. If +# the latter exists at all, it very possibly points to python2. + # As the Python 3 transition happens and PEP 394 isn't updated, we # need to cater to systems that don't have unversioned "python" by # default. Some systems ship with "python3" by default and perhaps @@ -16,7 +19,7 @@ # "python2" and "python3", in which case it's reasonable to prefer the # newer version. AC_DEFUN([PGAC_PATH_PYTHON], -[PGAC_PATH_PROGS(PYTHON, [python python3 python2]) +[PGAC_PATH_PROGS(PYTHON, [python3 python]) AC_ARG_VAR(PYTHON, [Python program])dnl if test x"$PYTHON" = x""; then AC_MSG_ERROR([Python not found]) @@ -37,8 +40,8 @@ python_majorversion=`echo "$python_fullversion" | sed '[s/^\([0-9]*\).*/\1/]'` python_minorversion=`echo "$python_fullversion" | sed '[s/^[0-9]*\.\([0-9]*\).*/\1/]'` python_version=`echo "$python_fullversion" | sed '[s/^\([0-9]*\.[0-9]*\).*/\1/]'` # Reject unsupported Python versions as soon as practical. -if test "$python_majorversion" -lt 3 -a "$python_minorversion" -lt 7; then - AC_MSG_ERROR([Python version $python_version is too old (version 2.7 or later is required)]) +if test "$python_majorversion" -lt 3; then + AC_MSG_ERROR([Python version $python_version is too old (version 3 or later is required)]) fi AC_MSG_CHECKING([for Python sysconfig module]) diff --git a/configure b/configure index 9305555658..ba635a0062 100755 --- a/configure +++ b/configure @@ -10280,7 +10280,7 @@ fi if test "$with_python" = yes; then if test -z "$PYTHON"; then - for ac_prog in python python3 python2 + for ac_prog in python3 python do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 @@ -10346,8 +10346,8 @@ python_majorversion=`echo "$python_fullversion" | sed 's/^\([0-9]*\).*/\1/'` python_minorversion=`echo "$python_fullversion" | sed 's/^[0-9]*\.\([0-9]*\).*/\1/'` python_version=`echo "$python_fullversion" | sed 's/^\([0-9]*\.[0-9]*\).*/\1/'` # Reject unsupported Python versions as soon as practical. -if test "$python_majorversion" -lt 3 -a "$python_minorversion" -lt 7; then - as_fn_error $? "Python version $python_version is too old (version 2.7 or later is required)" "$LINENO" 5 +if test "$python_majorversion" -lt 3; then + as_fn_error $? "Python version $python_version is too old (version 3 or later is required)" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Python sysconfig module" >&5 diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml index 30dd0c7f75..b3435eabc4 100644 --- a/doc/src/sgml/install-windows.sgml +++ b/doc/src/sgml/install-windows.sgml @@ -136,7 +136,7 @@ to specify the location of your Python installation, put the following in config.pl: -$config->{python} = 'c:\python26'; +$config->{python} = 'c:\python310'; You only need to specify those parameters that are different from what's in config_default.pl. diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 655095f3b1..094d23c292 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -196,11 +196,7 @@ su - postgres language, you need a Python installation with the header files and the sysconfig module. The minimum - required version is Python 2.7. - Python 3 is supported if it's - version 3.2 or later; but see - - when using Python 3. + required version is Python 3.2. @@ -1868,14 +1864,10 @@ build-postgresql: PYTHON - Python interpreter program. This will be used to - determine the dependencies for building PL/Python. Also, - whether Python 2 or 3 is specified here (or otherwise - implicitly chosen) determines which variant of the PL/Python - language becomes available. See - - for more information. If this is not set, the following are probed - in this order: python python3 python2. + Python interpreter program. This will be used to determine the + dependencies for building PL/Python. If this is not set, the + following are probed in this order: + python3 python. diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index bab81bd459..105f5c72a2 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -490,6 +490,10 @@ sub mkvcbuild if (!(defined($pyprefix) && defined($pyver))); my $pymajorver = substr($pyver, 0, 1); + + die "Python version $pyver is too old (version 3 or later is required)" + if int($pymajorver) < 3; + my $plpython = $solution->AddProject('plpython' . $pymajorver, 'dll', 'PLs', 'src/pl/plpython'); $plpython->AddIncludeDir($pyprefix . '/include'); From f1ac4a74dee5ac0c89612fe2ac6e48082edbec23 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Thu, 17 Feb 2022 09:59:59 -0500 Subject: [PATCH 006/108] Disable perl2host() processing in TAP tests This is a preliminary step towards removing it altogether, but this lets us double check that nothing breaks in the buildfarm before we do. Discussion: https://postgr.es/m/0ba775a2-8aa0-0d56-d780-69427cf6f33d@dunslane.net --- src/test/perl/PostgreSQL/Test/Utils.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index 57fcb24089..31e2b0315e 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -311,7 +311,7 @@ The returned path uses forward slashes but has no trailing slash. sub perl2host { my ($subject) = @_; - return $subject unless $Config{osname} eq 'msys'; + return $subject; if ($is_msys2) { # get absolute, windows type path From 138c51b72168e7b57c9edb4e9935274d3abf6bed Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Thu, 17 Feb 2022 10:53:51 -0500 Subject: [PATCH 007/108] Add missing binary-upgrade guard. Commit 9a974cbcba005256a19991203583a94b4f9a21a9 arranged for pg_dumpall to preserve tablespace OIDs, but it should only do that in binary upgrade mode, not all the time. Reported by Christoph Berg. Discussion: http://postgr.es/m/YgjwrkEvNEqoz4Vm@msg.df7cb.de --- src/bin/pg_dump/pg_dumpall.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 10383c713f..9c9f7c6d63 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -1066,8 +1066,11 @@ dumpTablespaces(PGconn *conn) /* needed for buildACLCommands() */ fspcname = pg_strdup(fmtId(spcname)); - appendPQExpBufferStr(buf, "\n-- For binary upgrade, must preserve pg_tablespace oid\n"); - appendPQExpBuffer(buf, "SELECT pg_catalog.binary_upgrade_set_next_pg_tablespace_oid('%u'::pg_catalog.oid);\n", spcoid); + if (binary_upgrade) + { + appendPQExpBufferStr(buf, "\n-- For binary upgrade, must preserve pg_tablespace oid\n"); + appendPQExpBuffer(buf, "SELECT pg_catalog.binary_upgrade_set_next_pg_tablespace_oid('%u'::pg_catalog.oid);\n", spcoid); + } appendPQExpBuffer(buf, "CREATE TABLESPACE %s", fspcname); appendPQExpBuffer(buf, " OWNER %s", fmtId(spcowner)); From 62cb7427d1e491faf8612a82c2e3711a8cd65422 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 17 Feb 2022 15:03:40 -0500 Subject: [PATCH 008/108] Avoid dangling-pointer usage in pg_basebackup progress reports. Ill-considered refactoring in 23a1c6578 led to progress_filename sometimes pointing to data that had gone out of scope. The most bulletproof fix is to hang onto a copy of whatever's passed in. Compared to the work spent elsewhere per file, that's not very expensive, plus we can skip it except in verbose logging mode. Per buildfarm. Discussion: https://postgr.es/m/20220212211316.GK31460@telsasoft.com --- src/bin/pg_basebackup/pg_basebackup.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 0003b59615..08b07d5a06 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -164,7 +164,7 @@ static bool found_tablespace_dirs = false; static uint64 totalsize_kb; static uint64 totaldone; static int tablespacecount; -static const char *progress_filename; +static char *progress_filename = NULL; /* Pipe to communicate with background wal receiver process */ #ifndef WIN32 @@ -775,11 +775,22 @@ verify_dir_is_empty_or_create(char *dirname, bool *created, bool *found) /* * Callback to update our notion of the current filename. + * + * No other code should modify progress_filename! */ static void progress_update_filename(const char *filename) { - progress_filename = filename; + /* We needn't maintain this variable if not doing verbose reports. */ + if (showprogress && verbose) + { + if (progress_filename) + free(progress_filename); + if (filename) + progress_filename = pg_strdup(filename); + else + progress_filename = NULL; + } } /* @@ -1258,7 +1269,7 @@ CreateBackupStreamer(char *archive_name, char *spclocation, */ if (must_parse_archive) streamer = bbstreamer_tar_archiver_new(streamer); - progress_filename = archive_filename; + progress_update_filename(archive_filename); } /* @@ -1662,7 +1673,7 @@ ReceiveTarFile(PGconn *conn, char *archive_name, char *spclocation, expect_unterminated_tarfile); state.tablespacenum = tablespacenum; ReceiveCopyData(conn, ReceiveTarCopyChunk, &state); - progress_filename = NULL; + progress_update_filename(NULL); /* * The decision as to whether we need to inject the backup manifest into @@ -2161,7 +2172,7 @@ BaseBackup(void) if (showprogress) { - progress_filename = NULL; + progress_update_filename(NULL); progress_report(PQntuples(res), true, true); } From c476f380e296bab57fecada1ea96c86d575bf160 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Fri, 18 Feb 2022 07:44:24 +0530 Subject: [PATCH 009/108] Fix a comment in worker.c. The comment incorrectly states that worker gets killed during ALTER SUBSCRIPTION ... DISABLE. Remove that part of the comment. Author: Masahiko Sawada Discussion: https://postgr.es/m/CAD21AoCbEN==oH7BhP3U6WPHg3zgH6sDOeKhJjy4W2dx-qoVCw@mail.gmail.com --- src/backend/replication/logical/worker.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index d77bb32bb9..5d9acc6173 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -2933,10 +2933,7 @@ maybe_reread_subscription(void) proc_exit(0); } - /* - * Exit if the subscription was disabled. This normally should not happen - * as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE. - */ + /* Exit if the subscription was disabled. */ if (!newsub->enabled) { ereport(LOG, From 94c49d53402240ad7ddbcae9049ff2840a54b9c6 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Fri, 18 Feb 2022 11:38:12 +0900 Subject: [PATCH 010/108] postgres_fdw: Make postgres_fdw.application_name support more escape sequences. Commit 6e0cb3dec1 allowed postgres_fdw.application_name to include escape sequences %a (application name), %d (database name), %u (user name) and %p (pid). In addition to them, this commit makes it support the escape sequences for session ID (%c) and cluster name (%C). These are helpful to investigate where each remote transactions came from. Author: Fujii Masao Reviewed-by: Ryohei Takahashi, Kyotaro Horiguchi Discussion: https://postgr.es/m/1041dc9a-c976-049f-9f14-e7d94c29c4b2@oss.nttdata.com --- .../postgres_fdw/expected/postgres_fdw.out | 20 +++++++++++++++++++ contrib/postgres_fdw/option.c | 6 ++++++ contrib/postgres_fdw/sql/postgres_fdw.sql | 11 ++++++++++ doc/src/sgml/postgres-fdw.sgml | 14 +++++++++++++ src/include/utils/guc.h | 2 +- 5 files changed, 52 insertions(+), 1 deletion(-) diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index b2e02caefe..057342083c 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -10910,6 +10910,26 @@ SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity t (1 row) +-- Test %c (session ID) and %C (cluster name) escape sequences. +SET postgres_fdw.application_name TO 'fdw_%C%c'; +SELECT 1 FROM ft6 LIMIT 1; + ?column? +---------- + 1 +(1 row) + +SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity + WHERE application_name = + substring('fdw_' || current_setting('cluster_name') || + to_hex(trunc(EXTRACT(EPOCH FROM (SELECT backend_start FROM + pg_stat_get_activity(pg_backend_pid()))))::integer) || '.' || + to_hex(pg_backend_pid()) + for current_setting('max_identifier_length')::int); + pg_terminate_backend +---------------------- + t +(1 row) + --Clean up RESET postgres_fdw.application_name; RESET debug_discard_caches; diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index fc3ce6a53a..af38e956e7 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -489,6 +489,12 @@ process_pgfdw_appname(const char *appname) case 'a': appendStringInfoString(&buf, application_name); break; + case 'c': + appendStringInfo(&buf, "%lx.%x", (long) (MyStartTime), MyProcPid); + break; + case 'C': + appendStringInfoString(&buf, cluster_name); + break; case 'd': appendStringInfoString(&buf, MyProcPort->database_name); break; diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index e050639b57..6c9f579c41 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -3501,6 +3501,17 @@ SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity substring('fdw_' || current_setting('application_name') || CURRENT_USER || '%' for current_setting('max_identifier_length')::int); +-- Test %c (session ID) and %C (cluster name) escape sequences. +SET postgres_fdw.application_name TO 'fdw_%C%c'; +SELECT 1 FROM ft6 LIMIT 1; +SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity + WHERE application_name = + substring('fdw_' || current_setting('cluster_name') || + to_hex(trunc(EXTRACT(EPOCH FROM (SELECT backend_start FROM + pg_stat_get_activity(pg_backend_pid()))))::integer) || '.' || + to_hex(pg_backend_pid()) + for current_setting('max_identifier_length')::int); + --Clean up RESET postgres_fdw.application_name; RESET debug_discard_caches; diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml index 7bb6e525a4..dc57fe4b0d 100644 --- a/doc/src/sgml/postgres-fdw.sgml +++ b/doc/src/sgml/postgres-fdw.sgml @@ -984,6 +984,20 @@ postgres=# SELECT postgres_fdw_disconnect_all(); %a Application name on local server + + %c + + Session ID on local server + (see for details) + + + + %C + + Cluster name in local server + (see for details) + + %u User name on local server diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index 6bb81707b0..f1bfe79feb 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -271,7 +271,7 @@ extern int temp_file_limit; extern int num_temp_buffers; -extern char *cluster_name; +extern PGDLLIMPORT char *cluster_name; extern PGDLLIMPORT char *ConfigFileName; extern char *HbaFileName; extern char *IdentFileName; From f927a6ec3ef710ad2bd7d9c63f524b7a22d7e664 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Fri, 18 Feb 2022 12:19:10 +0900 Subject: [PATCH 011/108] Fix comment in CheckIndexCompatible(). Commit 5f173040 removed the parameter "heapRelation" from CheckIndexCompatible(), but forgot to remove the mention of it from the comment. This commit removes that unnecessary mention. Also this commit adds the missing mention of the parameter "oldId" in the comment. Author: Yugo Nagata Reviewed-by: Nathan Bossart, Fujii Masao Discussion: https://postgr.es/m/20220204014634.b39314f278ff4ae3de96e201@sraoss.co.jp --- src/backend/commands/indexcmds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 560dcc87a2..cd30f15eba 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -129,7 +129,7 @@ typedef struct ReindexErrorInfo * prospective index definition, such that the existing index storage * could become the storage of the new index, avoiding a rebuild. * - * 'heapRelation': the relation the index would apply to. + * 'oldId': the OID of the existing index * 'accessMethodName': name of the AM to use. * 'attributeList': a list of IndexElem specifying columns and expressions * to index on. From de447bb8e6fbbad19f964a2d7f04c9ccc1d06903 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 17 Feb 2022 22:45:34 -0500 Subject: [PATCH 012/108] Suppress warning about stack_base_ptr with late-model GCC. GCC 12 complains that set_stack_base is storing the address of a local variable in a long-lived pointer. This is an entirely reasonable warning (indeed, it just helped us find a bug); but that behavior is intentional here. We can work around it by using __builtin_frame_address(0) instead of a specific local variable; that produces an address a dozen or so bytes different, in my testing, but we don't care about such a small difference. Maybe someday a compiler lacking that function will start to issue a similar warning, but we'll worry about that when it happens. Patch by me, per a suggestion from Andres Freund. Back-patch to v12, which is as far back as the patch will go without some pain. (Recently-established project policy would permit a back-patch as far as 9.2, but I'm disinclined to expend the work until GCC 12 is much more widespread.) Discussion: https://postgr.es/m/3773792.1645141467@sss.pgh.pa.us --- config/c-compiler.m4 | 22 ++++++++++++++++ configure | 40 +++++++++++++++++++++++++++++ configure.ac | 3 +++ src/backend/postmaster/postmaster.c | 2 +- src/backend/tcop/postgres.c | 20 ++++++++++----- src/backend/utils/init/miscinit.c | 11 ++++---- src/include/pg_config.h.in | 3 +++ src/tools/msvc/Solution.pm | 1 + 8 files changed, 89 insertions(+), 13 deletions(-) diff --git a/config/c-compiler.m4 b/config/c-compiler.m4 index 780e906ecc..d3562d6fee 100644 --- a/config/c-compiler.m4 +++ b/config/c-compiler.m4 @@ -381,6 +381,28 @@ fi])# PGAC_CHECK_BUILTIN_FUNC +# PGAC_CHECK_BUILTIN_FUNC_PTR +# ----------------------- +# Like PGAC_CHECK_BUILTIN_FUNC, except that the function is assumed to +# return a pointer type, and the argument(s) should be given literally. +# This handles some cases that PGAC_CHECK_BUILTIN_FUNC doesn't. +AC_DEFUN([PGAC_CHECK_BUILTIN_FUNC_PTR], +[AC_CACHE_CHECK(for $1, pgac_cv$1, +[AC_LINK_IFELSE([AC_LANG_PROGRAM([ +void * +call$1(void) +{ + return $1($2); +}], [])], +[pgac_cv$1=yes], +[pgac_cv$1=no])]) +if test x"${pgac_cv$1}" = xyes ; then +AC_DEFINE_UNQUOTED(AS_TR_CPP([HAVE$1]), 1, + [Define to 1 if your compiler understands $1.]) +fi])# PGAC_CHECK_BUILTIN_FUNC_PTR + + + # PGAC_PROG_VARCC_VARFLAGS_OPT # ---------------------------- # Given a compiler, variable name and a string, check if the compiler diff --git a/configure b/configure index ba635a0062..df72560277 100755 --- a/configure +++ b/configure @@ -15944,6 +15944,46 @@ cat >>confdefs.h <<_ACEOF #define HAVE__BUILTIN_POPCOUNT 1 _ACEOF +fi +# __builtin_frame_address may draw a diagnostic for non-constant argument, +# so it needs a different test function. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_frame_address" >&5 +$as_echo_n "checking for __builtin_frame_address... " >&6; } +if ${pgac_cv__builtin_frame_address+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +void * +call__builtin_frame_address(void) +{ + return __builtin_frame_address(0); +} +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv__builtin_frame_address=yes +else + pgac_cv__builtin_frame_address=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_frame_address" >&5 +$as_echo "$pgac_cv__builtin_frame_address" >&6; } +if test x"${pgac_cv__builtin_frame_address}" = xyes ; then + +cat >>confdefs.h <<_ACEOF +#define HAVE__BUILTIN_FRAME_ADDRESS 1 +_ACEOF + fi # We require 64-bit fseeko() to be available, but run this check anyway diff --git a/configure.ac b/configure.ac index 16167329fc..91a28cb50b 100644 --- a/configure.ac +++ b/configure.ac @@ -1776,6 +1776,9 @@ PGAC_CHECK_BUILTIN_FUNC([__builtin_bswap64], [long int x]) PGAC_CHECK_BUILTIN_FUNC([__builtin_clz], [unsigned int x]) PGAC_CHECK_BUILTIN_FUNC([__builtin_ctz], [unsigned int x]) PGAC_CHECK_BUILTIN_FUNC([__builtin_popcount], [unsigned int x]) +# __builtin_frame_address may draw a diagnostic for non-constant argument, +# so it needs a different test function. +PGAC_CHECK_BUILTIN_FUNC_PTR([__builtin_frame_address], [0]) # We require 64-bit fseeko() to be available, but run this check anyway # in case it finds that _LARGEFILE_SOURCE has to be #define'd for that. diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 735fed490b..80bb269599 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -1083,7 +1083,7 @@ PostmasterMain(int argc, char *argv[]) /* * Set reference point for stack-depth checking. */ - set_stack_base(); + (void) set_stack_base(); /* * Initialize pipe (or process handle on Windows) that allows children to diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 38d8b97894..3c7d08209f 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -129,17 +129,15 @@ static long max_stack_depth_bytes = 100 * 1024L; /* * Stack base pointer -- initialized by PostmasterMain and inherited by - * subprocesses. This is not static because old versions of PL/Java modify - * it directly. Newer versions use set_stack_base(), but we want to stay - * binary-compatible for the time being. + * subprocesses (but see also InitPostmasterChild). */ -char *stack_base_ptr = NULL; +static char *stack_base_ptr = NULL; /* * On IA64 we also have to remember the register stack base. */ #if defined(__ia64__) || defined(__ia64) -char *register_stack_base_ptr = NULL; +static char *register_stack_base_ptr = NULL; #endif /* @@ -3416,7 +3414,9 @@ ia64_get_bsp(void) pg_stack_base_t set_stack_base(void) { +#ifndef HAVE__BUILTIN_FRAME_ADDRESS char stack_base; +#endif pg_stack_base_t old; #if defined(__ia64__) || defined(__ia64) @@ -3426,8 +3426,16 @@ set_stack_base(void) old = stack_base_ptr; #endif - /* Set up reference point for stack depth checking */ + /* + * Set up reference point for stack depth checking. On recent gcc we use + * __builtin_frame_address() to avoid a warning about storing a local + * variable's address in a long-lived variable. + */ +#ifdef HAVE__BUILTIN_FRAME_ADDRESS + stack_base_ptr = __builtin_frame_address(0); +#else stack_base_ptr = &stack_base; +#endif #if defined(__ia64__) || defined(__ia64) register_stack_base_ptr = ia64_get_bsp(); #endif diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 0868e5a24f..bdc77af719 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -106,13 +106,12 @@ InitPostmasterChild(void) #endif /* - * Set reference point for stack-depth checking. We re-do that even in the - * !EXEC_BACKEND case, because there are some edge cases where processes - * are started with an alternative stack (e.g. starting bgworkers when - * running postgres using the rr debugger, as bgworkers are launched from - * signal handlers). + * Set reference point for stack-depth checking. This might seem + * redundant in !EXEC_BACKEND builds; but it's not because the postmaster + * launches its children from signal handlers, so we might be running on + * an alternative stack. */ - set_stack_base(); + (void) set_stack_base(); InitProcessGlobals(); diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index 28a1f0e9f0..12aac8616e 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -739,6 +739,9 @@ /* Define to 1 if your compiler understands __builtin_ctz. */ #undef HAVE__BUILTIN_CTZ +/* Define to 1 if your compiler understands __builtin_frame_address. */ +#undef HAVE__BUILTIN_FRAME_ADDRESS + /* Define to 1 if your compiler understands __builtin_$op_overflow. */ #undef HAVE__BUILTIN_OP_OVERFLOW diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index e6f20679dc..439809fcd0 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -439,6 +439,7 @@ sub GenerateFiles HAVE__BUILTIN_CLZ => undef, HAVE__BUILTIN_CONSTANT_P => undef, HAVE__BUILTIN_CTZ => undef, + HAVE__BUILTIN_FRAME_ADDRESS => undef, HAVE__BUILTIN_OP_OVERFLOW => undef, HAVE__BUILTIN_POPCOUNT => undef, HAVE__BUILTIN_TYPES_COMPATIBLE_P => undef, From ce1e7a2f716919652c280937087b24937677f8b3 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 18 Feb 2022 11:37:27 -0500 Subject: [PATCH 013/108] Don't let libpq "event" procs break the state of PGresult objects. As currently implemented, failure of a PGEVT_RESULTCREATE callback causes the PGresult to be converted to an error result. This is intellectually inconsistent (shouldn't a failing callback likewise prevent creation of the error result? what about side-effects on the behavior seen by other event procs? why does PQfireResultCreateEvents act differently from PQgetResult?), but more importantly it destroys any promises we might wish to make about the behavior of libpq in nontrivial operating modes, such as pipeline mode. For example, it's not possible to promise that PGRES_PIPELINE_SYNC results will be returned if an event callback fails on those. With this definition, expecting applications to behave sanely in the face of possibly-failing callbacks seems like a very big lift. Hence, redefine the result of a callback failure as being simply that that event procedure won't be called any more for this PGresult (which was true already). Event procedures can still signal failure back to the application through out-of-band mechanisms, for example via their passthrough arguments. Similarly, don't let failure of a PGEVT_RESULTCOPY callback prevent PQcopyResult from succeeding. That definition allowed a misbehaving event proc to break single-row mode (our sole internal use of PQcopyResult), and it probably had equally deleterious effects for outside uses. Discussion: https://postgr.es/m/3185105.1644960083@sss.pgh.pa.us --- doc/src/sgml/libpq.sgml | 31 ++++++++++++------------ src/interfaces/libpq/fe-exec.c | 37 ++++++----------------------- src/interfaces/libpq/libpq-events.c | 14 ++++++----- 3 files changed, 31 insertions(+), 51 deletions(-) diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index e0ab7cd555..40c39feb7d 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -6831,6 +6831,7 @@ PGresult *PQcopyResult(const PGresult *src, int flags); PG_COPYRES_EVENTS specifies copying the source result's events. (But any instance data associated with the source is not copied.) + The event procedures receive PGEVT_RESULTCOPY events. @@ -7126,7 +7127,7 @@ defaultNoticeProcessor(void *arg, const char *message) , , and - PQsetResultInstanceData functions. Note that + functions. Note that unlike the pass-through pointer, instance data of a PGconn is not automatically inherited by PGresults created from it. libpq does not know what pass-through @@ -7154,7 +7155,7 @@ defaultNoticeProcessor(void *arg, const char *message) is called. It is the ideal time to initialize any instanceData an event procedure may need. Only one register event will be fired per event handler per connection. If the - event procedure fails, the registration is aborted. + event procedure fails (returns zero), the registration is cancelled. typedef struct @@ -7261,11 +7262,11 @@ typedef struct conn is the connection used to generate the result. This is the ideal place to initialize any instanceData that needs to be associated with the - result. If the event procedure fails, the result will be cleared and - the failure will be propagated. The event procedure must not try to - the result object for itself. When returning a - failure code, all cleanup must be performed as no - PGEVT_RESULTDESTROY event will be sent. + result. If an event procedure fails (returns zero), that event + procedure will be ignored for the remaining lifetime of the result; + that is, it will not receive PGEVT_RESULTCOPY + or PGEVT_RESULTDESTROY events for this result or + results copied from it. @@ -7295,12 +7296,12 @@ typedef struct src result is what was copied while the dest result is the copy destination. This event can be used to provide a deep copy of instanceData, - since PQcopyResult cannot do that. If the event - procedure fails, the entire copy operation will fail and the - dest result will be cleared. When returning a - failure code, all cleanup must be performed as no - PGEVT_RESULTDESTROY event will be sent for the - destination result. + since PQcopyResult cannot do that. If an event + procedure fails (returns zero), that event procedure will be + ignored for the remaining lifetime of the new result; that is, it + will not receive PGEVT_RESULTCOPY + or PGEVT_RESULTDESTROY events for that result or + results copied from it. @@ -7618,7 +7619,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) mydata *res_data = dup_mydata(conn_data); /* associate app specific data with result (copy it from conn) */ - PQsetResultInstanceData(e->result, myEventProc, res_data); + PQresultSetInstanceData(e->result, myEventProc, res_data); break; } @@ -7629,7 +7630,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) mydata *dest_data = dup_mydata(src_data); /* associate app specific data with result (copy it from a result) */ - PQsetResultInstanceData(e->dest, myEventProc, dest_data); + PQresultSetInstanceData(e->dest, myEventProc, dest_data); break; } diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index 9afd4d88b4..c7c48d07dc 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -363,19 +363,16 @@ PQcopyResult(const PGresult *src, int flags) /* Okay, trigger PGEVT_RESULTCOPY event */ for (i = 0; i < dest->nEvents; i++) { + /* We don't fire events that had some previous failure */ if (src->events[i].resultInitialized) { PGEventResultCopy evt; evt.src = src; evt.dest = dest; - if (!dest->events[i].proc(PGEVT_RESULTCOPY, &evt, - dest->events[i].passThrough)) - { - PQclear(dest); - return NULL; - } - dest->events[i].resultInitialized = true; + if (dest->events[i].proc(PGEVT_RESULTCOPY, &evt, + dest->events[i].passThrough)) + dest->events[i].resultInitialized = true; } } @@ -2124,29 +2121,9 @@ PQgetResult(PGconn *conn) break; } - if (res) - { - int i; - - for (i = 0; i < res->nEvents; i++) - { - PGEventResultCreate evt; - - evt.conn = conn; - evt.result = res; - if (!res->events[i].proc(PGEVT_RESULTCREATE, &evt, - res->events[i].passThrough)) - { - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n"), - res->events[i].name); - pqSetResultError(res, &conn->errorMessage); - res->resultStatus = PGRES_FATAL_ERROR; - break; - } - res->events[i].resultInitialized = true; - } - } + /* Time to fire PGEVT_RESULTCREATE events, if there are any */ + if (res && res->nEvents > 0) + (void) PQfireResultCreateEvents(conn, res); return res; } diff --git a/src/interfaces/libpq/libpq-events.c b/src/interfaces/libpq/libpq-events.c index 7754c37748..1ec86b1d64 100644 --- a/src/interfaces/libpq/libpq-events.c +++ b/src/interfaces/libpq/libpq-events.c @@ -184,6 +184,7 @@ PQresultInstanceData(const PGresult *result, PGEventProc proc) int PQfireResultCreateEvents(PGconn *conn, PGresult *res) { + int result = true; int i; if (!res) @@ -191,19 +192,20 @@ PQfireResultCreateEvents(PGconn *conn, PGresult *res) for (i = 0; i < res->nEvents; i++) { + /* It's possible event was already fired, if so don't repeat it */ if (!res->events[i].resultInitialized) { PGEventResultCreate evt; evt.conn = conn; evt.result = res; - if (!res->events[i].proc(PGEVT_RESULTCREATE, &evt, - res->events[i].passThrough)) - return false; - - res->events[i].resultInitialized = true; + if (res->events[i].proc(PGEVT_RESULTCREATE, &evt, + res->events[i].passThrough)) + res->events[i].resultInitialized = true; + else + result = false; } } - return true; + return result; } From 2e372869aa38a9d6e4552c192da4454b17e01e38 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 18 Feb 2022 11:43:04 -0500 Subject: [PATCH 014/108] Don't let libpq PGEVT_CONNRESET callbacks break a PGconn. As currently implemented, failure of a PGEVT_CONNRESET callback forces the PGconn into the CONNECTION_BAD state (without closing the socket, which is inconsistent with other failure paths), and prevents later callbacks from being called. This seems highly questionable, and indeed is questioned by comments in the source. Instead, let's just ignore the result value of PGEVT_CONNRESET calls. Like the preceding commit, this converts event callbacks into "pure observers" that cannot affect libpq's processing logic. Discussion: https://postgr.es/m/3185105.1644960083@sss.pgh.pa.us --- doc/src/sgml/libpq.sgml | 11 +++++------ src/interfaces/libpq/fe-connect.c | 28 ++++++---------------------- 2 files changed, 11 insertions(+), 28 deletions(-) diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 40c39feb7d..64e17401cd 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -7183,12 +7183,11 @@ typedef struct The connection reset event is fired on completion of or PQresetPoll. In - both cases, the event is only fired if the reset was successful. If - the event procedure fails, the entire connection reset will fail; the - PGconn is put into - CONNECTION_BAD status and - PQresetPoll will return - PGRES_POLLING_FAILED. + both cases, the event is only fired if the reset was successful. + The return value of the event procedure is ignored + in PostgreSQL v15 and later. + With earlier versions, however, it's important to return success + (nonzero) or the connection will be aborted. typedef struct diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index 30d6b7b377..9c9416e8ff 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -4276,8 +4276,7 @@ PQreset(PGconn *conn) if (connectDBStart(conn) && connectDBComplete(conn)) { /* - * Notify event procs of successful reset. We treat an event proc - * failure as disabling the connection ... good idea? + * Notify event procs of successful reset. */ int i; @@ -4286,15 +4285,8 @@ PQreset(PGconn *conn) PGEventConnReset evt; evt.conn = conn; - if (!conn->events[i].proc(PGEVT_CONNRESET, &evt, - conn->events[i].passThrough)) - { - conn->status = CONNECTION_BAD; - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n"), - conn->events[i].name); - break; - } + (void) conn->events[i].proc(PGEVT_CONNRESET, &evt, + conn->events[i].passThrough); } } } @@ -4336,8 +4328,7 @@ PQresetPoll(PGconn *conn) if (status == PGRES_POLLING_OK) { /* - * Notify event procs of successful reset. We treat an event proc - * failure as disabling the connection ... good idea? + * Notify event procs of successful reset. */ int i; @@ -4346,15 +4337,8 @@ PQresetPoll(PGconn *conn) PGEventConnReset evt; evt.conn = conn; - if (!conn->events[i].proc(PGEVT_CONNRESET, &evt, - conn->events[i].passThrough)) - { - conn->status = CONNECTION_BAD; - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n"), - conn->events[i].name); - return PGRES_POLLING_FAILED; - } + (void) conn->events[i].proc(PGEVT_CONNRESET, &evt, + conn->events[i].passThrough); } } From 6c417bbcc8ff98875234ca269979fc7defde58e5 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Fri, 18 Feb 2022 13:40:31 -0500 Subject: [PATCH 015/108] Add support for building with ZSTD. This commit doesn't actually add anything that uses ZSTD; that will be done separately. It just puts the basic infrastructure into place. Jeevan Ladhe, Robert Haas, and Michael Paquier. Reviewed by Justin Pryzby and Andres Freund. Discussion: http://postgr.es/m/CA+TgmoatQKGd+8SjcV+bzvw4XaoEwminHjU83yG12+NXtQzTTQ@mail.gmail.com --- configure | 265 ++++++++++++++++++++++++++++++ configure.ac | 33 ++++ doc/src/sgml/install-windows.sgml | 9 + doc/src/sgml/installation.sgml | 9 + src/Makefile.global.in | 1 + src/include/pg_config.h.in | 6 + src/tools/msvc/Solution.pm | 13 ++ src/tools/msvc/config_default.pl | 1 + src/tools/msvc/vcregress.pl | 1 + 9 files changed, 338 insertions(+) diff --git a/configure b/configure index df72560277..ca890b8b07 100755 --- a/configure +++ b/configure @@ -650,6 +650,7 @@ CFLAGS_ARMV8_CRC32C CFLAGS_SSE42 have_win32_dbghelp LIBOBJS +ZSTD LZ4 UUID_LIBS LDAP_LIBS_BE @@ -700,6 +701,9 @@ with_gnu_ld LD LDFLAGS_SL LDFLAGS_EX +ZSTD_LIBS +ZSTD_CFLAGS +with_zstd LZ4_LIBS LZ4_CFLAGS with_lz4 @@ -869,6 +873,7 @@ with_libxslt with_system_tzdata with_zlib with_lz4 +with_zstd with_gnu_ld with_ssl with_openssl @@ -898,6 +903,8 @@ XML2_CFLAGS XML2_LIBS LZ4_CFLAGS LZ4_LIBS +ZSTD_CFLAGS +ZSTD_LIBS LDFLAGS_EX LDFLAGS_SL PERL @@ -1577,6 +1584,7 @@ Optional Packages: use system time zone data in DIR --without-zlib do not use Zlib --with-lz4 build with LZ4 support + --with-zstd build with ZSTD support --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-ssl=LIB use LIB for SSL/TLS support (openssl) --with-openssl obsolete spelling of --with-ssl=openssl @@ -1606,6 +1614,8 @@ Some influential environment variables: XML2_LIBS linker flags for XML2, overriding pkg-config LZ4_CFLAGS C compiler flags for LZ4, overriding pkg-config LZ4_LIBS linker flags for LZ4, overriding pkg-config + ZSTD_CFLAGS C compiler flags for ZSTD, overriding pkg-config + ZSTD_LIBS linker flags for ZSTD, overriding pkg-config LDFLAGS_EX extra linker flags for linking executables only LDFLAGS_SL extra linker flags for linking shared libraries only PERL Perl program @@ -9034,6 +9044,146 @@ fi done fi +# +# ZSTD +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build with ZSTD support" >&5 +$as_echo_n "checking whether to build with ZSTD support... " >&6; } + + + +# Check whether --with-zstd was given. +if test "${with_zstd+set}" = set; then : + withval=$with_zstd; + case $withval in + yes) + +$as_echo "#define USE_ZSTD 1" >>confdefs.h + + ;; + no) + : + ;; + *) + as_fn_error $? "no argument expected for --with-zstd option" "$LINENO" 5 + ;; + esac + +else + with_zstd=no + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_zstd" >&5 +$as_echo "$with_zstd" >&6; } + + +if test "$with_zstd" = yes; then + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for libzstd" >&5 +$as_echo_n "checking for libzstd... " >&6; } + +if test -n "$ZSTD_CFLAGS"; then + pkg_cv_ZSTD_CFLAGS="$ZSTD_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libzstd\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libzstd") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_ZSTD_CFLAGS=`$PKG_CONFIG --cflags "libzstd" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$ZSTD_LIBS"; then + pkg_cv_ZSTD_LIBS="$ZSTD_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libzstd\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libzstd") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_ZSTD_LIBS=`$PKG_CONFIG --libs "libzstd" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + ZSTD_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libzstd" 2>&1` + else + ZSTD_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libzstd" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$ZSTD_PKG_ERRORS" >&5 + + as_fn_error $? "Package requirements (libzstd) were not met: + +$ZSTD_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +Alternatively, you may set the environment variables ZSTD_CFLAGS +and ZSTD_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details." "$LINENO" 5 +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +Alternatively, you may set the environment variables ZSTD_CFLAGS +and ZSTD_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details. + +To get pkg-config, see . +See \`config.log' for more details" "$LINENO" 5; } +else + ZSTD_CFLAGS=$pkg_cv_ZSTD_CFLAGS + ZSTD_LIBS=$pkg_cv_ZSTD_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +fi + # We only care about -I, -D, and -L switches; + # note that -lzstd will be added by AC_CHECK_LIB below. + for pgac_option in $ZSTD_CFLAGS; do + case $pgac_option in + -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + esac + done + for pgac_option in $ZSTD_LIBS; do + case $pgac_option in + -L*) LDFLAGS="$LDFLAGS $pgac_option";; + esac + done +fi # # Assignments # @@ -13130,6 +13280,56 @@ fi fi +if test "$with_zstd" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ZSTD_compress in -lzstd" >&5 +$as_echo_n "checking for ZSTD_compress in -lzstd... " >&6; } +if ${ac_cv_lib_zstd_ZSTD_compress+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lzstd $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ZSTD_compress (); +int +main () +{ +return ZSTD_compress (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_zstd_ZSTD_compress=yes +else + ac_cv_lib_zstd_ZSTD_compress=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_zstd_ZSTD_compress" >&5 +$as_echo "$ac_cv_lib_zstd_ZSTD_compress" >&6; } +if test "x$ac_cv_lib_zstd_ZSTD_compress" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBZSTD 1 +_ACEOF + + LIBS="-lzstd $LIBS" + +else + as_fn_error $? "library 'zstd' is required for ZSTD support" "$LINENO" 5 +fi + +fi + # Note: We can test for libldap_r only after we know PTHREAD_LIBS; # also, on AIX, we may need to have openssl in LIBS for this step. if test "$with_ldap" = yes ; then @@ -13902,6 +14102,71 @@ fi done +fi + +if test -z "$ZSTD"; then + for ac_prog in zstd +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_ZSTD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $ZSTD in + [\\/]* | ?:[\\/]*) + ac_cv_path_ZSTD="$ZSTD" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_ZSTD="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +ZSTD=$ac_cv_path_ZSTD +if test -n "$ZSTD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ZSTD" >&5 +$as_echo "$ZSTD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ZSTD" && break +done + +else + # Report the value of ZSTD in configure's output in all cases. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ZSTD" >&5 +$as_echo_n "checking for ZSTD... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ZSTD" >&5 +$as_echo "$ZSTD" >&6; } +fi + +if test "$with_zstd" = yes; then + ac_fn_c_check_header_mongrel "$LINENO" "zstd.h" "ac_cv_header_zstd_h" "$ac_includes_default" +if test "x$ac_cv_header_zstd_h" = xyes; then : + +else + as_fn_error $? "zstd.h header file is required for ZSTD" "$LINENO" 5 +fi + + fi if test "$with_gssapi" = yes ; then diff --git a/configure.ac b/configure.ac index 91a28cb50b..331683b336 100644 --- a/configure.ac +++ b/configure.ac @@ -1056,6 +1056,30 @@ if test "$with_lz4" = yes; then done fi +# +# ZSTD +# +AC_MSG_CHECKING([whether to build with ZSTD support]) +PGAC_ARG_BOOL(with, zstd, no, [build with ZSTD support], + [AC_DEFINE([USE_ZSTD], 1, [Define to 1 to build with ZSTD support. (--with-zstd)])]) +AC_MSG_RESULT([$with_zstd]) +AC_SUBST(with_zstd) + +if test "$with_zstd" = yes; then + PKG_CHECK_MODULES(ZSTD, libzstd) + # We only care about -I, -D, and -L switches; + # note that -lzstd will be added by AC_CHECK_LIB below. + for pgac_option in $ZSTD_CFLAGS; do + case $pgac_option in + -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + esac + done + for pgac_option in $ZSTD_LIBS; do + case $pgac_option in + -L*) LDFLAGS="$LDFLAGS $pgac_option";; + esac + done +fi # # Assignments # @@ -1325,6 +1349,10 @@ if test "$with_lz4" = yes ; then AC_CHECK_LIB(lz4, LZ4_compress_default, [], [AC_MSG_ERROR([library 'lz4' is required for LZ4 support])]) fi +if test "$with_zstd" = yes ; then + AC_CHECK_LIB(zstd, ZSTD_compress, [], [AC_MSG_ERROR([library 'zstd' is required for ZSTD support])]) +fi + # Note: We can test for libldap_r only after we know PTHREAD_LIBS; # also, on AIX, we may need to have openssl in LIBS for this step. if test "$with_ldap" = yes ; then @@ -1490,6 +1518,11 @@ if test "$with_lz4" = yes; then AC_CHECK_HEADERS(lz4.h, [], [AC_MSG_ERROR([lz4.h header file is required for LZ4])]) fi +PGAC_PATH_PROGS(ZSTD, zstd) +if test "$with_zstd" = yes; then + AC_CHECK_HEADER(zstd.h, [], [AC_MSG_ERROR([zstd.h header file is required for ZSTD])]) +fi + if test "$with_gssapi" = yes ; then AC_CHECK_HEADERS(gssapi/gssapi.h, [], [AC_CHECK_HEADERS(gssapi.h, [], [AC_MSG_ERROR([gssapi.h header file is required for GSSAPI])])]) diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml index b3435eabc4..e08c9514d4 100644 --- a/doc/src/sgml/install-windows.sgml +++ b/doc/src/sgml/install-windows.sgml @@ -307,6 +307,15 @@ $ENV{MSBFLAGS}="/m"; + + ZSTD + + Required for supporting ZSTD compression + method. Binaries and source can be downloaded from + . + + + OpenSSL diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 094d23c292..311f7f261d 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -985,6 +985,15 @@ build-postgresql: + + + + + Build with ZSTD compression support. + + + + diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 9dcd54fcbd..c980444233 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -351,6 +351,7 @@ XGETTEXT = @XGETTEXT@ GZIP = gzip BZIP2 = bzip2 LZ4 = @LZ4@ +ZSTD = @ZSTD@ DOWNLOAD = wget -O $@ --no-use-server-timestamps #DOWNLOAD = curl -o $@ diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index 12aac8616e..635fbb2181 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -352,6 +352,9 @@ /* Define to 1 if you have the `z' library (-lz). */ #undef HAVE_LIBZ +/* Define to 1 if you have the `zstd' library (-lzstd). */ +#undef HAVE_LIBZSTD + /* Define to 1 if you have the `link' function. */ #undef HAVE_LINK @@ -952,6 +955,9 @@ /* Define to select Win32-style shared memory. */ #undef USE_WIN32_SHARED_MEMORY +/* Define to 1 to build with ZSTD support. (--with-zstd) */ +#undef USE_ZSTD + /* Define to 1 if `wcstombs_l' requires . */ #undef WCSTOMBS_L_IN_XLOCALE diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index 439809fcd0..a21ea9bef9 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -311,6 +311,7 @@ sub GenerateFiles HAVE_LIBXML2 => undef, HAVE_LIBXSLT => undef, HAVE_LIBZ => $self->{options}->{zlib} ? 1 : undef, + HAVE_LIBZSTD => undef, HAVE_LINK => undef, HAVE_LOCALE_T => 1, HAVE_LONG_INT_64 => undef, @@ -507,6 +508,7 @@ sub GenerateFiles USE_UNNAMED_POSIX_SEMAPHORES => undef, USE_WIN32_SEMAPHORES => 1, USE_WIN32_SHARED_MEMORY => 1, + USE_ZSTD => undef, WCSTOMBS_L_IN_XLOCALE => undef, WORDS_BIGENDIAN => undef, XLOG_BLCKSZ => 1024 * $self->{options}->{wal_blocksize}, @@ -540,6 +542,11 @@ sub GenerateFiles $define{HAVE_LZ4_H} = 1; $define{USE_LZ4} = 1; } + if ($self->{options}->{zstd}) + { + $define{HAVE_LIBZSTD} = 1; + $define{USE_ZSTD} = 1; + } if ($self->{options}->{openssl}) { $define{USE_OPENSSL} = 1; @@ -1082,6 +1089,11 @@ sub AddProject $proj->AddIncludeDir($self->{options}->{lz4} . '\include'); $proj->AddLibrary($self->{options}->{lz4} . '\lib\liblz4.lib'); } + if ($self->{options}->{zstd}) + { + $proj->AddIncludeDir($self->{options}->{zstd} . '\include'); + $proj->AddLibrary($self->{options}->{zstd} . '\lib\libzstd.lib'); + } if ($self->{options}->{uuid}) { $proj->AddIncludeDir($self->{options}->{uuid} . '\include'); @@ -1194,6 +1206,7 @@ sub GetFakeConfigure $cfg .= ' --with-libxml' if ($self->{options}->{xml}); $cfg .= ' --with-libxslt' if ($self->{options}->{xslt}); $cfg .= ' --with-lz4' if ($self->{options}->{lz4}); + $cfg .= ' --with-zstd' if ($self->{options}->{zstd}); $cfg .= ' --with-gssapi' if ($self->{options}->{gss}); $cfg .= ' --with-icu' if ($self->{options}->{icu}); $cfg .= ' --with-tcl' if ($self->{options}->{tcl}); diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl index 7a9b00be72..186849a09a 100644 --- a/src/tools/msvc/config_default.pl +++ b/src/tools/msvc/config_default.pl @@ -15,6 +15,7 @@ gss => undef, # --with-gssapi= icu => undef, # --with-icu= lz4 => undef, # --with-lz4= + zstd => undef, # --with-zstd= nls => undef, # --enable-nls= tap_tests => undef, # --enable-tap-tests tcl => undef, # --with-tcl= diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index a994626239..e2b0db0879 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -36,6 +36,7 @@ $ENV{GZIP_PROGRAM} ||= 'gzip'; $ENV{LZ4} ||= 'lz4'; $ENV{TAR} ||= 'tar'; +$ENV{ZSTD} ||= 'zstd'; # buildenv.pl is for specifying the build environment settings # it should contain lines like: From 618c16707a6d6e8f5c83ede2092975e4670201ad Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 18 Feb 2022 15:35:15 -0500 Subject: [PATCH 016/108] Rearrange libpq's error reporting to avoid duplicated error text. Since commit ffa2e4670, libpq accumulates text in conn->errorMessage across a whole query cycle. In some situations, we may report more than one error event within a cycle: the easiest case to reach is where we report a FATAL error message from the server, and then a bit later we detect loss of connection. Since, historically, each error PGresult bears the entire content of conn->errorMessage, this results in duplication of the FATAL message in any output that concatenates the contents of the PGresults. Accumulation in errorMessage still seems like a good idea, especially in view of the number of places that did ad-hoc error concatenation before ffa2e4670. So to fix this, let's track how much of conn->errorMessage has been read out into error PGresults, and only include new text in later PGresults. The tricky part of that is to be sure that we never discard an error PGresult once made (else we'd risk dropping some text, a problem much worse than duplication). While libpq formerly did that in some code paths, a little bit of rearrangement lets us postpone making an error PGresult at all until we are about to return it. A side benefit of that postponement is that it now becomes practical to return a dummy static PGresult in cases where we hit out-of-memory while trying to manufacture an error PGresult. This eliminates the admittedly-very-rare case where we'd return NULL from PQgetResult, indicating successful query completion, even though what actually happened was an OOM failure. Discussion: https://postgr.es/m/ab4288f8-be5c-57fb-2400-e3e857f53e46@enterprisedb.com --- .../expected/slot_creation_error.out | 1 - src/interfaces/libpq/fe-auth.c | 2 +- src/interfaces/libpq/fe-connect.c | 8 +- src/interfaces/libpq/fe-exec.c | 180 ++++++++++++++---- src/interfaces/libpq/fe-lobj.c | 17 +- src/interfaces/libpq/fe-protocol3.c | 55 ++++-- src/interfaces/libpq/libpq-int.h | 26 ++- 7 files changed, 224 insertions(+), 65 deletions(-) diff --git a/contrib/test_decoding/expected/slot_creation_error.out b/contrib/test_decoding/expected/slot_creation_error.out index 321648c339..043bdae0a2 100644 --- a/contrib/test_decoding/expected/slot_creation_error.out +++ b/contrib/test_decoding/expected/slot_creation_error.out @@ -98,7 +98,6 @@ t step s2_init: <... completed> FATAL: terminating connection due to administrator command -FATAL: terminating connection due to administrator command server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c index f8f4111fef..6fceff561b 100644 --- a/src/interfaces/libpq/fe-auth.c +++ b/src/interfaces/libpq/fe-auth.c @@ -1237,7 +1237,7 @@ PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user, if (!conn) return NULL; - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); /* If no algorithm was given, ask the server. */ if (algorithm == NULL) diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index 9c9416e8ff..2a3d68b4d1 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -3685,7 +3685,7 @@ PQconnectPoll(PGconn *conn) * (and it seems some clients expect it to be empty after a * successful connection). */ - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); /* We are open for business! */ conn->status = CONNECTION_OK; @@ -4231,7 +4231,7 @@ closePGconn(PGconn *conn) /* * Close the connection, reset all transient state, flush I/O buffers. - * Note that this includes clearing conn->errorMessage; we're no longer + * Note that this includes clearing conn's error state; we're no longer * interested in any failures associated with the old connection, and we * want a clean slate for any new connection attempt. */ @@ -4241,7 +4241,7 @@ closePGconn(PGconn *conn) conn->xactStatus = PQTRANS_IDLE; conn->pipelineStatus = PQ_PIPELINE_OFF; pqClearAsyncResult(conn); /* deallocate result */ - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); release_conn_addrinfo(conn); /* Reset all state obtained from server, too */ @@ -5236,7 +5236,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options, * Returns 0 on success, nonzero on failure. On failure, if errorMessage * isn't null, also store an error message there. (Note: the only reason * this function and related ones don't dump core on errorMessage == NULL - * is the undocumented fact that printfPQExpBuffer does nothing when passed + * is the undocumented fact that appendPQExpBuffer does nothing when passed * a null PQExpBuffer pointer.) */ static int diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index c7c48d07dc..45dddaf556 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -44,6 +44,13 @@ char *const pgresStatus[] = { "PGRES_PIPELINE_ABORTED" }; +/* We return this if we're unable to make a PGresult at all */ +static const PGresult OOM_result = { + .resultStatus = PGRES_FATAL_ERROR, + .client_encoding = PG_SQL_ASCII, + .errMsg = "out of memory\n", +}; + /* * static state needed by PQescapeString and PQescapeBytea; initialize to * values that result in backward-compatible behavior @@ -141,6 +148,10 @@ static int pqPipelineFlush(PGconn *conn); * returns a newly allocated, initialized PGresult with given status. * If conn is not NULL and status indicates an error, the conn's * errorMessage is copied. Also, any PGEvents are copied from the conn. + * + * Note: the logic to copy the conn's errorMessage is now vestigial; + * no internal caller uses it. However, that behavior is documented for + * outside callers, so we'd better keep it. */ PGresult * PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status) @@ -191,7 +202,8 @@ PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status) /* non-error cases */ break; default: - pqSetResultError(result, &conn->errorMessage); + /* we intentionally do not use or modify errorReported here */ + pqSetResultError(result, &conn->errorMessage, 0); break; } @@ -235,8 +247,12 @@ PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs) { int i; + /* Fail if argument is NULL or OOM_result */ + if (!res || (const PGresult *) res == &OOM_result) + return false; + /* If attrs already exist, they cannot be overwritten. */ - if (!res || res->numAttributes > 0) + if (res->numAttributes > 0) return false; /* ignore no-op request */ @@ -435,7 +451,11 @@ PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len) PGresAttValue *attval; const char *errmsg = NULL; - /* Note that this check also protects us against null "res" */ + /* Fail if argument is NULL or OOM_result */ + if (!res || (const PGresult *) res == &OOM_result) + return false; + + /* Invalid field_num? */ if (!check_field_number(res, field_num)) return false; @@ -519,6 +539,10 @@ PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len) void * PQresultAlloc(PGresult *res, size_t nBytes) { + /* Fail if argument is NULL or OOM_result */ + if (!res || (const PGresult *) res == &OOM_result) + return NULL; + return pqResultAlloc(res, nBytes, true); } @@ -657,9 +681,12 @@ pqResultStrdup(PGresult *res, const char *str) /* * pqSetResultError - * assign a new error message to a PGresult + * + * Copy text from errorMessage buffer beginning at given offset + * (it's caller's responsibility that offset is valid) */ void -pqSetResultError(PGresult *res, PQExpBuffer errorMessage) +pqSetResultError(PGresult *res, PQExpBuffer errorMessage, int offset) { char *msg; @@ -674,7 +701,7 @@ pqSetResultError(PGresult *res, PQExpBuffer errorMessage) * at a constant "out of memory" string. */ if (!PQExpBufferBroken(errorMessage)) - msg = pqResultStrdup(res, errorMessage->data); + msg = pqResultStrdup(res, errorMessage->data + offset); else msg = NULL; if (msg) @@ -693,9 +720,14 @@ PQclear(PGresult *res) PGresult_data *block; int i; + /* As a convenience, do nothing for a NULL pointer */ if (!res) return; + /* Also, do nothing if the argument is OOM_result */ + if ((const PGresult *) res == &OOM_result) + return; + /* Close down any events we may have */ for (i = 0; i < res->nEvents; i++) { /* only send DESTROY to successfully-initialized event procs */ @@ -748,24 +780,39 @@ pqClearAsyncResult(PGconn *conn) if (conn->result) PQclear(conn->result); conn->result = NULL; + conn->error_result = false; if (conn->next_result) PQclear(conn->next_result); conn->next_result = NULL; } /* - * This subroutine deletes any existing async result, sets conn->result - * to a PGresult with status PGRES_FATAL_ERROR, and stores the current - * contents of conn->errorMessage into that result. + * pqSaveErrorResult - + * remember that we have an error condition + * + * In much of libpq, reporting an error just requires appending text to + * conn->errorMessage and returning a failure code to one's caller. + * Where returning a failure code is impractical, instead call this + * function to remember that an error needs to be reported. + * + * (It might seem that appending text to conn->errorMessage should be + * sufficient, but we can't rely on that working under out-of-memory + * conditions. The OOM hazard is also why we don't try to make a new + * PGresult right here.) */ void pqSaveErrorResult(PGconn *conn) { + /* Drop any pending result ... */ pqClearAsyncResult(conn); - conn->result = PQmakeEmptyPGresult(conn, PGRES_FATAL_ERROR); + /* ... and set flag to remember to make an error result later */ + conn->error_result = true; } /* + * pqSaveWriteError - + * report a write failure + * * As above, after appending conn->write_err_msg to whatever other error we * have. This is used when we've detected a write failure and have exhausted * our chances of reporting something else instead. @@ -792,24 +839,79 @@ pqSaveWriteError(PGconn *conn) } /* - * This subroutine prepares an async result object for return to the caller. + * pqPrepareAsyncResult - + * prepare the current async result object for return to the caller + * * If there is not already an async result object, build an error object * using whatever is in conn->errorMessage. In any case, clear the async - * result storage. + * result storage, and update our notion of how much error text has been + * returned to the application. */ PGresult * pqPrepareAsyncResult(PGconn *conn) { PGresult *res; - /* - * conn->result is the PGresult to return. If it is NULL (which probably - * shouldn't happen) we assume there is an appropriate error message in - * conn->errorMessage. - */ res = conn->result; - if (!res) - res = PQmakeEmptyPGresult(conn, PGRES_FATAL_ERROR); + if (res) + { + /* + * If the pre-existing result is an ERROR (presumably something + * received from the server), assume that it represents whatever is in + * conn->errorMessage, and advance errorReported. + */ + if (res->resultStatus == PGRES_FATAL_ERROR) + conn->errorReported = conn->errorMessage.len; + } + else + { + /* + * We get here after internal-to-libpq errors. We should probably + * always have error_result = true, but if we don't, gin up some error + * text. + */ + if (!conn->error_result) + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("no error text available\n")); + + /* Paranoia: be sure errorReported offset is sane */ + if (conn->errorReported < 0 || + conn->errorReported >= conn->errorMessage.len) + conn->errorReported = 0; + + /* + * Make a PGresult struct for the error. We temporarily lie about the + * result status, so that PQmakeEmptyPGresult doesn't uselessly copy + * all of conn->errorMessage. + */ + res = PQmakeEmptyPGresult(conn, PGRES_EMPTY_QUERY); + if (res) + { + /* + * Report whatever new error text we have, and advance + * errorReported. + */ + res->resultStatus = PGRES_FATAL_ERROR; + pqSetResultError(res, &conn->errorMessage, conn->errorReported); + conn->errorReported = conn->errorMessage.len; + } + else + { + /* + * Ouch, not enough memory for a PGresult. Fortunately, we have a + * card up our sleeve: we can use the static OOM_result. Casting + * away const here is a bit ugly, but it seems best to declare + * OOM_result as const, in hopes it will be allocated in read-only + * storage. + */ + res = unconstify(PGresult *, &OOM_result); + + /* + * Don't advance errorReported. Perhaps we'll be able to report + * the text later. + */ + } + } /* * Replace conn->result with next_result, if any. In the normal case @@ -818,6 +920,7 @@ pqPrepareAsyncResult(PGconn *conn) * it was before we created the current single-row result. */ conn->result = conn->next_result; + conn->error_result = false; /* next_result is never an error */ conn->next_result = NULL; return res; @@ -1278,7 +1381,7 @@ pqAppendCmdQueueEntry(PGconn *conn, PGcmdQueueEntry *entry) */ if (conn->asyncStatus == PGASYNC_IDLE) { - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); pqPipelineProcessQueue(conn); } break; @@ -1626,10 +1729,10 @@ PQsendQueryStart(PGconn *conn, bool newQuery) return false; /* - * If this is the beginning of a query cycle, reset the error buffer. + * If this is the beginning of a query cycle, reset the error state. */ if (newQuery) - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); /* Don't try to send if we know there's no live connection. */ if (conn->status != CONNECTION_OK) @@ -1687,8 +1790,8 @@ PQsendQueryStart(PGconn *conn, bool newQuery) /* reset single-row processing mode */ conn->singleRowMode = false; - } + /* ready to send command message */ return true; } @@ -1884,7 +1987,7 @@ PQsetSingleRowMode(PGconn *conn) (conn->cmd_queue_head->queryclass != PGQUERY_SIMPLE && conn->cmd_queue_head->queryclass != PGQUERY_EXTENDED)) return 0; - if (conn->result) + if (conn->result || conn->error_result) return 0; /* OK, set flag */ @@ -2015,10 +2118,7 @@ PQgetResult(PGconn *conn) pqWait(true, false, conn) || pqReadData(conn) < 0) { - /* - * conn->errorMessage has been set by pqWait or pqReadData. We - * want to append it to any already-received error message. - */ + /* Report the error saved by pqWait or pqReadData */ pqSaveErrorResult(conn); conn->asyncStatus = PGASYNC_IDLE; return pqPrepareAsyncResult(conn); @@ -2053,7 +2153,7 @@ PQgetResult(PGconn *conn) * is the start of the results of the next query, clear any * prior error message. */ - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); pqPipelineProcessQueue(conn); } break; @@ -2117,7 +2217,9 @@ PQgetResult(PGconn *conn) appendPQExpBuffer(&conn->errorMessage, libpq_gettext("unexpected asyncStatus: %d\n"), (int) conn->asyncStatus); - res = PQmakeEmptyPGresult(conn, PGRES_FATAL_ERROR); + pqSaveErrorResult(conn); + conn->asyncStatus = PGASYNC_IDLE; /* try to restore valid state */ + res = pqPrepareAsyncResult(conn); break; } @@ -2268,9 +2370,9 @@ PQexecStart(PGconn *conn) } /* - * Since this is the beginning of a query cycle, reset the error buffer. + * Since this is the beginning of a query cycle, reset the error state. */ - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); /* * Silently discard any prior query result that application didn't eat. @@ -2825,9 +2927,9 @@ PQfn(PGconn *conn, return NULL; /* - * Since this is the beginning of a query cycle, reset the error buffer. + * Since this is the beginning of a query cycle, reset the error state. */ - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); if (conn->pipelineStatus != PQ_PIPELINE_OFF) { @@ -2837,7 +2939,7 @@ PQfn(PGconn *conn, } if (conn->sock == PGINVALID_SOCKET || conn->asyncStatus != PGASYNC_IDLE || - conn->result != NULL) + conn->result || conn->error_result) { appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("connection in wrong state\n")); @@ -3707,9 +3809,9 @@ PQsetnonblocking(PGconn *conn, int arg) * behavior. this is ok because either they are making a transition _from_ * or _to_ blocking mode, either way we can block them. * - * Clear errorMessage in case pqFlush adds to it. + * Clear error state in case pqFlush adds to it. */ - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); /* if we are going from blocking to non-blocking flush here */ if (pqFlush(conn)) @@ -3901,7 +4003,7 @@ PQescapeStringConn(PGconn *conn, return 0; } - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); return PQescapeStringInternal(conn, to, from, length, error, conn->client_encoding, @@ -3939,7 +4041,7 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident) if (!conn) return NULL; - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); /* Scan the string for characters that must be escaped. */ for (s = str; (s - str) < len && *s != '\0'; ++s) @@ -4204,7 +4306,7 @@ PQescapeByteaConn(PGconn *conn, if (!conn) return NULL; - resetPQExpBuffer(&conn->errorMessage); + pqClearConnErrorState(conn); return PQescapeByteaInternal(conn, from, from_length, to_length, conn->std_strings, diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c index 48399a90cb..075a5ed85b 100644 --- a/src/interfaces/libpq/fe-lobj.c +++ b/src/interfaces/libpq/fe-lobj.c @@ -665,8 +665,8 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) if (conn == NULL) return InvalidOid; - /* Since this is the beginning of a query cycle, reset the error buffer */ - resetPQExpBuffer(&conn->errorMessage); + /* Since this is the beginning of a query cycle, reset the error state */ + pqClearConnErrorState(conn); /* * open the file to be read in @@ -730,7 +730,8 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) (void) lo_close(conn, lobj); (void) close(fd); /* deliberately overwrite any error from lo_close */ - printfPQExpBuffer(&conn->errorMessage, + pqClearConnErrorState(conn); + appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not read from file \"%s\": %s\n"), filename, strerror_r(save_errno, sebuf, sizeof(sebuf))); @@ -785,7 +786,8 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) (void) lo_close(conn, lobj); /* deliberately overwrite any error from lo_close */ - printfPQExpBuffer(&conn->errorMessage, + pqClearConnErrorState(conn); + appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not open file \"%s\": %s\n"), filename, strerror_r(save_errno, sebuf, sizeof(sebuf))); @@ -806,7 +808,8 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) (void) lo_close(conn, lobj); (void) close(fd); /* deliberately overwrite any error from lo_close */ - printfPQExpBuffer(&conn->errorMessage, + pqClearConnErrorState(conn); + appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not write to file \"%s\": %s\n"), filename, strerror_r(save_errno, sebuf, sizeof(sebuf))); @@ -863,8 +866,8 @@ lo_initialize(PGconn *conn) if (conn == NULL) return -1; - /* Since this is the beginning of a query cycle, reset the error buffer */ - resetPQExpBuffer(&conn->errorMessage); + /* Since this is the beginning of a query cycle, reset the error state */ + pqClearConnErrorState(conn); /* Nothing else to do if we already collected info */ if (conn->lobjfuncs != NULL) diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index 26dbeaed97..94b4a448b9 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -316,8 +316,9 @@ pqParseInput3(PGconn *conn) return; break; case 'T': /* Row Description */ - if (conn->result != NULL && - conn->result->resultStatus == PGRES_FATAL_ERROR) + if (conn->error_result || + (conn->result != NULL && + conn->result->resultStatus == PGRES_FATAL_ERROR)) { /* * We've already choked for some reason. Just discard @@ -387,8 +388,9 @@ pqParseInput3(PGconn *conn) if (getAnotherTuple(conn, msgLength)) return; } - else if (conn->result != NULL && - conn->result->resultStatus == PGRES_FATAL_ERROR) + else if (conn->error_result || + (conn->result != NULL && + conn->result->resultStatus == PGRES_FATAL_ERROR)) { /* * We've already choked for some reason. Just discard @@ -966,10 +968,18 @@ pqGetErrorNotice3(PGconn *conn, bool isError) */ if (isError) { - if (res) - pqSetResultError(res, &workBuf); pqClearAsyncResult(conn); /* redundant, but be safe */ - conn->result = res; + if (res) + { + pqSetResultError(res, &workBuf, 0); + conn->result = res; + } + else + { + /* Fall back to using the internal-error processing paths */ + conn->error_result = true; + } + if (PQExpBufferDataBroken(workBuf)) appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("out of memory\n")); @@ -2116,10 +2126,33 @@ pqFunctionCall3(PGconn *conn, Oid fnid, continue; /* consume the message and exit */ conn->inStart += 5 + msgLength; - /* if we saved a result object (probably an error), use it */ - if (conn->result) - return pqPrepareAsyncResult(conn); - return PQmakeEmptyPGresult(conn, status); + + /* + * If we already have a result object (probably an error), use + * that. Otherwise, if we saw a function result message, + * report COMMAND_OK. Otherwise, the backend violated the + * protocol, so complain. + */ + if (!(conn->result || conn->error_result)) + { + if (status == PGRES_COMMAND_OK) + { + conn->result = PQmakeEmptyPGresult(conn, status); + if (!conn->result) + { + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("out of memory\n")); + pqSaveErrorResult(conn); + } + } + else + { + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("protocol error: no function result\n")); + pqSaveErrorResult(conn); + } + } + return pqPrepareAsyncResult(conn); case 'S': /* parameter status */ if (getParameterStatus(conn)) continue; diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index 4290553482..e0cee4b142 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -496,8 +496,17 @@ struct pg_conn PGdataValue *rowBuf; /* array for passing values to rowProcessor */ int rowBufLen; /* number of entries allocated in rowBuf */ - /* Status for asynchronous result construction */ + /* + * Status for asynchronous result construction. If result isn't NULL, it + * is a result being constructed or ready to return. If result is NULL + * and error_result is true, then we need to return a PGRES_FATAL_ERROR + * result, but haven't yet constructed it; text for the error has been + * appended to conn->errorMessage. (Delaying construction simplifies + * dealing with out-of-memory cases.) If next_result isn't NULL, it is a + * PGresult that will replace "result" after we return that one. + */ PGresult *result; /* result being constructed */ + bool error_result; /* do we need to make an ERROR result? */ PGresult *next_result; /* next result (used in single-row mode) */ /* Assorted state for SASL, SSL, GSS, etc */ @@ -567,8 +576,14 @@ struct pg_conn * Buffer for current error message. This is cleared at the start of any * connection attempt or query cycle; after that, all code should append * messages to it, never overwrite. + * + * In some situations we might report an error more than once in a query + * cycle. If so, errorMessage accumulates text from all the errors, and + * errorReported tracks how much we've already reported, so that the + * individual error PGresult objects don't contain duplicative text. */ PQExpBufferData errorMessage; /* expansible string */ + int errorReported; /* # bytes of string already reported */ /* Buffer for receiving various parts of messages */ PQExpBufferData workBuffer; /* expansible string */ @@ -644,7 +659,7 @@ extern pgthreadlock_t pg_g_threadlock; /* === in fe-exec.c === */ -extern void pqSetResultError(PGresult *res, PQExpBuffer errorMessage); +extern void pqSetResultError(PGresult *res, PQExpBuffer errorMessage, int offset); extern void *pqResultAlloc(PGresult *res, size_t nBytes, bool isBinary); extern char *pqResultStrdup(PGresult *res, const char *str); extern void pqClearAsyncResult(PGconn *conn); @@ -830,6 +845,13 @@ extern void pqTraceOutputNoTypeByteMessage(PGconn *conn, const char *message); /* === miscellaneous macros === */ +/* + * Reset the conn's error-reporting state. + */ +#define pqClearConnErrorState(conn) \ + (resetPQExpBuffer(&(conn)->errorMessage), \ + (conn)->errorReported = 0) + /* * this is so that we can check if a connection is non-blocking internally * without the overhead of a function call From 07daca53bfcad59618a9c6fad304e380cc9d2bc1 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Sat, 19 Feb 2022 14:58:51 +0900 Subject: [PATCH 017/108] Fix inconsistencies in SRF checks of pg_config() and string_to_table() The execution paths of those functions have been using a set of checks inconsistent with any other SRF function: - string_to_table() missed a check on expectedDesc, the tuple descriptor expected by the caller, that should never be NULL. Introduced in 66f1630. - pg_config() should check for a ReturnSetInfo, and expectedDesc cannot be NULL. Its error messages were also inconsistent. Introduced in a5c43b8. Extracted from a larger patch by the same author, in preparation for a larger patch set aimed at refactoring the way tuplestores are created and checked in SRF functions. Author: Melanie Plageman Reviewed-by: Justin Pryzby Discussion: https://postgr.es/m/CAAKRu_azyd1Z3W_r7Ou4sorTjRCs+PxeHw1CWJeXKofkE6TuZg@mail.gmail.com --- src/backend/utils/adt/varlena.c | 3 ++- src/backend/utils/misc/pg_config.c | 12 ++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index eda9c1e42c..b2003f5672 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -4839,7 +4839,8 @@ text_to_table(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsi->allowedModes & SFRM_Materialize)) + if (!(rsi->allowedModes & SFRM_Materialize) || + rsi->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); diff --git a/src/backend/utils/misc/pg_config.c b/src/backend/utils/misc/pg_config.c index d916d7b2c4..2dc875ebfb 100644 --- a/src/backend/utils/misc/pg_config.c +++ b/src/backend/utils/misc/pg_config.c @@ -37,11 +37,15 @@ pg_config(PG_FUNCTION_ARGS) int i = 0; /* check to see if caller supports us returning a tuplestore */ - if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize)) + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not " - "allowed in this context"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize) || + rsinfo->expectedDesc == NULL) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); From d7a978601d4e469f1a8f19122c049bb25fd7f096 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Sat, 19 Feb 2022 15:06:53 +0900 Subject: [PATCH 018/108] doc: Simplify description of --with-lz4 LZ4 is used in much more areas of the system now than just WAL and table data. This commit simplifies the installation documentation of Windows and *nix by removing any details of the areas extended when building with LZ4. Author: Jeevan Ladhe Discussion: https://postgr.es/m/CANm22Cgny8AF76pitomXp603NagwKXbA4dyN2Fac4yHPebqdqg@mail.gmail.com --- doc/src/sgml/install-windows.sgml | 5 ++--- doc/src/sgml/installation.sgml | 2 -- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml index e08c9514d4..98fa6962f6 100644 --- a/doc/src/sgml/install-windows.sgml +++ b/doc/src/sgml/install-windows.sgml @@ -300,9 +300,8 @@ $ENV{MSBFLAGS}="/m"; LZ4 - Required for supporting LZ4 compression - method for compressing table or WAL data. Binaries and source can be - downloaded from + Required for supporting LZ4 compression. + Binaries and source can be downloaded from . diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 311f7f261d..0f74252590 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -979,8 +979,6 @@ build-postgresql: Build with LZ4 compression support. - This allows the use of LZ4 for - compression of table and WAL data. From 4b35408f1ed59dd590f683ae0f015bbaf3b84d3d Mon Sep 17 00:00:00 2001 From: John Naylor Date: Sun, 20 Feb 2022 13:22:08 +0700 Subject: [PATCH 019/108] Use bitwise rotate functions in more places There were a number of places in the code that used bespoke bit-twiddling expressions to do bitwise rotation. While we've had pg_rotate_right32() for a while now, we hadn't gotten around to standardizing on that. Do so now. Since many potential call sites look more natural with the "left" equivalent, add that function too. Reviewed by Tom Lane and Yugo Nagata Discussion: https://www.postgresql.org/message-id/CAFBsxsH7c1LC0CGZ0ADCBXLHU5-%3DKNXx-r7tHYPAW51b2HK4Qw%40mail.gmail.com --- src/backend/executor/execGrouping.c | 4 ++-- src/backend/executor/nodeHash.c | 4 ++-- src/backend/executor/nodeMemoize.c | 8 ++++---- src/backend/utils/adt/jsonb_util.c | 3 ++- src/backend/utils/adt/multirangetypes.c | 3 ++- src/backend/utils/adt/rangetypes.c | 3 ++- src/backend/utils/cache/catcache.c | 14 ++++---------- src/common/hashfn.c | 4 ++-- src/include/port/pg_bitutils.h | 10 ++++++++-- 9 files changed, 28 insertions(+), 25 deletions(-) diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index af6e9c42d8..5da4b37530 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -459,8 +459,8 @@ TupleHashTableHash_internal(struct tuplehash_hash *tb, Datum attr; bool isNull; - /* rotate hashkey left 1 bit at each step */ - hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0); + /* combine successive hashkeys by rotating */ + hashkey = pg_rotate_left32(hashkey, 1); attr = slot_getattr(slot, att, &isNull); diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 4d68a8b97b..3510a4247c 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -1840,8 +1840,8 @@ ExecHashGetHashValue(HashJoinTable hashtable, Datum keyval; bool isNull; - /* rotate hashkey left 1 bit at each step */ - hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0); + /* combine successive hashkeys by rotating */ + hashkey = pg_rotate_left32(hashkey, 1); /* * Get the join attribute value of the tuple diff --git a/src/backend/executor/nodeMemoize.c b/src/backend/executor/nodeMemoize.c index 55cdd5c4d9..23441e33ca 100644 --- a/src/backend/executor/nodeMemoize.c +++ b/src/backend/executor/nodeMemoize.c @@ -166,8 +166,8 @@ MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key) { for (int i = 0; i < numkeys; i++) { - /* rotate hashkey left 1 bit at each step */ - hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0); + /* combine successive hashkeys by rotating */ + hashkey = pg_rotate_left32(hashkey, 1); if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */ { @@ -189,8 +189,8 @@ MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key) for (int i = 0; i < numkeys; i++) { - /* rotate hashkey left 1 bit at each step */ - hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0); + /* combine successive hashkeys by rotating */ + hashkey = pg_rotate_left32(hashkey, 1); if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */ { diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c index 291fb722e2..60442758b3 100644 --- a/src/backend/utils/adt/jsonb_util.c +++ b/src/backend/utils/adt/jsonb_util.c @@ -18,6 +18,7 @@ #include "common/hashfn.h" #include "common/jsonapi.h" #include "miscadmin.h" +#include "port/pg_bitutils.h" #include "utils/builtins.h" #include "utils/datetime.h" #include "utils/json.h" @@ -1342,7 +1343,7 @@ JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash) * the previous value left 1 bit, then XOR'ing in the new * key/value/element's hash value. */ - *hash = (*hash << 1) | (*hash >> 31); + *hash = pg_rotate_left32(*hash, 1); *hash ^= tmp; } diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c index 7b86421465..c474b24431 100644 --- a/src/backend/utils/adt/multirangetypes.c +++ b/src/backend/utils/adt/multirangetypes.c @@ -38,6 +38,7 @@ #include "lib/stringinfo.h" #include "libpq/pqformat.h" #include "miscadmin.h" +#include "port/pg_bitutils.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/rangetypes.h" @@ -2772,7 +2773,7 @@ hash_multirange(PG_FUNCTION_ARGS) /* Merge hashes of flags and bounds */ range_hash = hash_uint32((uint32) flags); range_hash ^= lower_hash; - range_hash = (range_hash << 1) | (range_hash >> 31); + range_hash = pg_rotate_left32(range_hash, 1); range_hash ^= upper_hash; /* diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c index c3e6c721e6..cbff4e93d5 100644 --- a/src/backend/utils/adt/rangetypes.c +++ b/src/backend/utils/adt/rangetypes.c @@ -35,6 +35,7 @@ #include "lib/stringinfo.h" #include "libpq/pqformat.h" #include "miscadmin.h" +#include "port/pg_bitutils.h" #include "utils/builtins.h" #include "utils/date.h" #include "utils/lsyscache.h" @@ -1363,7 +1364,7 @@ hash_range(PG_FUNCTION_ARGS) /* Merge hashes of flags and bounds */ result = hash_uint32((uint32) flags); result ^= lower_hash; - result = (result << 1) | (result >> 31); + result = pg_rotate_left32(result, 1); result ^= upper_hash; PG_RETURN_INT32(result); diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index eb83088089..ec073e1ed0 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -26,6 +26,7 @@ #include "catalog/pg_type.h" #include "common/hashfn.h" #include "miscadmin.h" +#include "port/pg_bitutils.h" #ifdef CATCACHE_STATS #include "storage/ipc.h" /* for on_proc_exit */ #endif @@ -281,25 +282,18 @@ CatalogCacheComputeHashValue(CatCache *cache, int nkeys, { case 4: oneHash = (cc_hashfunc[3]) (v4); - - hashValue ^= oneHash << 24; - hashValue ^= oneHash >> 8; + hashValue ^= pg_rotate_left32(oneHash, 24); /* FALLTHROUGH */ case 3: oneHash = (cc_hashfunc[2]) (v3); - - hashValue ^= oneHash << 16; - hashValue ^= oneHash >> 16; + hashValue ^= pg_rotate_left32(oneHash, 16); /* FALLTHROUGH */ case 2: oneHash = (cc_hashfunc[1]) (v2); - - hashValue ^= oneHash << 8; - hashValue ^= oneHash >> 24; + hashValue ^= pg_rotate_left32(oneHash, 8); /* FALLTHROUGH */ case 1: oneHash = (cc_hashfunc[0]) (v1); - hashValue ^= oneHash; break; default: diff --git a/src/common/hashfn.c b/src/common/hashfn.c index b7a322073d..8779575b99 100644 --- a/src/common/hashfn.c +++ b/src/common/hashfn.c @@ -24,6 +24,7 @@ #include "postgres.h" #include "common/hashfn.h" +#include "port/pg_bitutils.h" /* @@ -44,8 +45,7 @@ /* Get a bit mask of the bits set in non-uint32 aligned addresses */ #define UINT32_ALIGN_MASK (sizeof(uint32) - 1) -/* Rotate a uint32 value left by k bits - note multiple evaluation! */ -#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) +#define rot(x,k) pg_rotate_left32(x, k) /*---------- * mix -- mix 3 32-bit values reversibly. diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h index 44c74fb974..04e58cd1c4 100644 --- a/src/include/port/pg_bitutils.h +++ b/src/include/port/pg_bitutils.h @@ -285,12 +285,18 @@ extern int pg_popcount64(uint64 word); extern uint64 pg_popcount(const char *buf, int bytes); /* - * Rotate the bits of "word" to the right by n bits. + * Rotate the bits of "word" to the right/left by n bits. */ static inline uint32 pg_rotate_right32(uint32 word, int n) { - return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n)); + return (word >> n) | (word << (32 - n)); +} + +static inline uint32 +pg_rotate_left32(uint32 word, int n) +{ + return (word << n) | (word >> (32 - n)); } #endif /* PG_BITUTILS_H */ From 69639e2b5c12c6f1eafa9db1a6b7d16e6471ac61 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Sun, 20 Feb 2022 18:33:09 +0200 Subject: [PATCH 020/108] Fix uninitialized variable. I'm very surprised the compiler didn't warn about it. But Coverity and Valgrind did. --- src/backend/access/transam/xlog.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index ce78ac413e..0d2bd7a357 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5286,6 +5286,8 @@ StartupXLOG(void) PerformWalRecovery(); performedWalRecovery = true; } + else + performedWalRecovery = false; /* * Finish WAL recovery. From cf12541f2bd5fd34425ecbb99f056a30ca5b7cae Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Fri, 18 Feb 2022 16:59:30 -0500 Subject: [PATCH 021/108] Ensure the right perl is used for TAP tests on msys In particular, perl with $Config{osname} = msys should only be used if the build target is msys (which is currently buildable but not usable). For builds targeted at native Windows, perl from the ucrt64 toolchain is suitable. Discussion: https://postgr.es/m/20220216210141.5glt5isg5qtwty4c@alap3.anarazel.de --- config/check_modules.pl | 5 +++++ configure | 1 + configure.ac | 1 + 3 files changed, 7 insertions(+) diff --git a/config/check_modules.pl b/config/check_modules.pl index cc0a7ab0e7..470c3e9c14 100644 --- a/config/check_modules.pl +++ b/config/check_modules.pl @@ -6,6 +6,7 @@ # use strict; use warnings; +use Config; use IPC::Run 0.79; @@ -19,5 +20,9 @@ diag("Test::More::VERSION: $Test::More::VERSION"); diag("Time::HiRes::VERSION: $Time::HiRes::VERSION"); +# Check that if prove is using msys perl it is for an msys target +ok(($ENV{__CONFIG_HOST_OS__} || "") eq 'msys', + "Msys perl used for correct target") + if $Config{osname} eq 'msys'; ok(1); done_testing(); diff --git a/configure b/configure index ca890b8b07..f3cb5c2b51 100755 --- a/configure +++ b/configure @@ -19758,6 +19758,7 @@ fi # installation than perl, eg on MSys, so we have to check using prove. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Perl modules required for TAP tests" >&5 $as_echo_n "checking for Perl modules required for TAP tests... " >&6; } + __CONFIG_HOST_OS__=$host_os; export __CONFIG_HOST_OS__ modulestderr=`"$PROVE" "$srcdir/config/check_modules.pl" 2>&1 >/dev/null` if test $? -eq 0; then # log the module version details, but don't show them interactively diff --git a/configure.ac b/configure.ac index 331683b336..19d1a80367 100644 --- a/configure.ac +++ b/configure.ac @@ -2432,6 +2432,7 @@ if test "$enable_tap_tests" = yes; then # AX_PROG_PERL_MODULES here, but prove might be part of a different Perl # installation than perl, eg on MSys, so we have to check using prove. AC_MSG_CHECKING(for Perl modules required for TAP tests) + __CONFIG_HOST_OS__=$host_os; export __CONFIG_HOST_OS__ [modulestderr=`"$PROVE" "$srcdir/config/check_modules.pl" 2>&1 >/dev/null`] if test $? -eq 0; then # log the module version details, but don't show them interactively From 95d981338b241ce1d1b2346cfe2df509bb7243ca Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Fri, 18 Feb 2022 17:00:03 -0500 Subject: [PATCH 022/108] Remove PostgreSQL::Test::Utils::perl2host completely Commit f1ac4a74de disabled this processing, and as nothing has broken (as expected) here we proceed to remove the routine and adjust all the call sites. Backpatch to release 10 Discussion: https://postgr.es/m/0ba775a2-8aa0-0d56-d780-69427cf6f33d@dunslane.net Discussion: https://postgr.es/m/20220125023609.5ohu3nslxgoygihl@alap3.anarazel.de --- src/bin/pg_basebackup/t/010_pg_basebackup.pl | 18 +++--- src/bin/pg_checksums/t/002_actions.pl | 1 - src/bin/pg_verifybackup/t/003_corruption.pl | 4 +- src/bin/pg_verifybackup/t/008_untar.pl | 3 +- src/bin/pgbench/t/001_pgbench_with_server.pl | 4 +- src/bin/scripts/t/090_reindexdb.pl | 1 - .../modules/test_misc/t/002_tablespace.pl | 4 +- src/test/perl/PostgreSQL/Test/Cluster.pm | 4 +- src/test/perl/PostgreSQL/Test/Utils.pm | 58 ------------------- src/test/recovery/t/014_unlogged_reinit.pl | 4 +- src/test/recovery/t/017_shm.pl | 2 +- src/test/recovery/t/018_wal_optimize.pl | 2 - .../recovery/t/025_stuck_on_old_timeline.pl | 2 +- src/test/recovery/t/027_stream_regress.pl | 4 +- src/test/ssl/t/001_ssltests.pl | 4 +- src/test/ssl/t/002_scram.pl | 2 +- src/test/ssl/t/003_sslinfo.pl | 2 +- 17 files changed, 25 insertions(+), 94 deletions(-) diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index 8c70e5b32b..75d6810d3e 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -261,13 +261,11 @@ # for the tablespace directories, which hopefully won't run afoul of # the 99 character length limit. my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short; -my $real_sys_tempdir = PostgreSQL::Test::Utils::perl2host($sys_tempdir) . "/tempdir"; -my $shorter_tempdir = $sys_tempdir . "/tempdir"; -dir_symlink "$tempdir", $shorter_tempdir; +my $real_sys_tempdir = "$sys_tempdir/tempdir"; +dir_symlink "$tempdir", $real_sys_tempdir; mkdir "$tempdir/tblspc1"; my $realTsDir = "$real_sys_tempdir/tblspc1"; -my $real_tempdir = PostgreSQL::Test::Utils::perl2host($tempdir); $node->safe_psql('postgres', "CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';"); $node->safe_psql('postgres', @@ -346,7 +344,7 @@ foreach my $filename (@tempRelationFiles) { append_to_file( - "$shorter_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename", + "$real_sys_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename", 'TEMP_RELATION'); } @@ -358,7 +356,7 @@ [ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp', - "-T$realTsDir=$real_tempdir/tbackup/tblspc1", + "-T$realTsDir=$tempdir/tbackup/tblspc1", ], 'plain format with tablespaces succeeds with tablespace mapping'); ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated'); @@ -406,7 +404,7 @@ # Also remove temp relation files or tablespace drop will fail. my $filepath = - "$shorter_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename"; + "$real_sys_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename"; unlink($filepath) or BAIL_OUT("unable to unlink $filepath"); @@ -428,7 +426,7 @@ [ @pg_basebackup_defs, '-D', "$tempdir/backup3", '-Fp', - "-T$realTsDir=$real_tempdir/tbackup/tbl\\=spc2", + "-T$realTsDir=$tempdir/tbackup/tbl\\=spc2", ], 'mapping tablespace with = sign in path'); ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated'); @@ -517,7 +515,7 @@ [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ], 'backup target blackhole'); $node->command_ok( - [ @pg_basebackup_defs, '--target', "server:$real_tempdir/backuponserver", '-X', 'none' ], + [ @pg_basebackup_defs, '--target', "server:$tempdir/backuponserver", '-X', 'none' ], 'backup target server'); ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created'); rmtree("$tempdir/backuponserver"); @@ -526,7 +524,7 @@ [qw(createuser --replication --role=pg_write_server_files backupuser)], 'create backup user'); $node->command_ok( - [ @pg_basebackup_defs, '-U', 'backupuser', '--target', "server:$real_tempdir/backuponserver", '-X', 'none' ], + [ @pg_basebackup_defs, '-U', 'backupuser', '--target', "server:$tempdir/backuponserver", '-X', 'none' ], 'backup target server'); ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created as non-superuser'); rmtree("$tempdir/backuponserver"); diff --git a/src/bin/pg_checksums/t/002_actions.pl b/src/bin/pg_checksums/t/002_actions.pl index 5563244f11..751f732451 100644 --- a/src/bin/pg_checksums/t/002_actions.pl +++ b/src/bin/pg_checksums/t/002_actions.pl @@ -207,7 +207,6 @@ sub check_relation_corruption my $basedir = $node->basedir; my $tablespace_dir = "$basedir/ts_corrupt_dir"; mkdir($tablespace_dir); -$tablespace_dir = PostgreSQL::Test::Utils::perl2host($tablespace_dir); $node->safe_psql('postgres', "CREATE TABLESPACE ts_corrupt LOCATION '$tablespace_dir';"); check_relation_corruption($node, 'corrupt2', 'ts_corrupt'); diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl index f402d301ac..406c0c9877 100644 --- a/src/bin/pg_verifybackup/t/003_corruption.pl +++ b/src/bin/pg_verifybackup/t/003_corruption.pl @@ -18,7 +18,7 @@ # Include a user-defined tablespace in the hopes of detecting problems in that # area. -my $source_ts_path = PostgreSQL::Test::Utils::perl2host(PostgreSQL::Test::Utils::tempdir_short()); +my $source_ts_path =PostgreSQL::Test::Utils::tempdir_short(); my $source_ts_prefix = $source_ts_path; $source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!; @@ -107,7 +107,7 @@ # Take a backup and check that it verifies OK. my $backup_path = $primary->backup_dir . '/' . $name; - my $backup_ts_path = PostgreSQL::Test::Utils::perl2host(PostgreSQL::Test::Utils::tempdir_short()); + my $backup_ts_path = PostgreSQL::Test::Utils::tempdir_short(); # The tablespace map parameter confuses Msys2, which tries to mangle # it. Tell it not to. # See https://www.msys2.org/wiki/Porting/#filesystem-namespaces diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl index 6927ca4c74..383203d0b8 100644 --- a/src/bin/pg_verifybackup/t/008_untar.pl +++ b/src/bin/pg_verifybackup/t/008_untar.pl @@ -18,7 +18,6 @@ $primary->start; my $backup_path = $primary->backup_dir . '/server-backup'; -my $real_backup_path = PostgreSQL::Test::Utils::perl2host($backup_path); my $extract_path = $primary->backup_dir . '/extracted-backup'; my @test_configuration = ( @@ -61,7 +60,7 @@ # Take a server-side backup. my @backup = ( 'pg_basebackup', '--no-sync', '-cfast', '--target', - "server:$real_backup_path", '-Xfetch' + "server:$backup_path", '-Xfetch' ); push @backup, @{$tc->{'backup_flags'}}; $primary->command_ok(\@backup, diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index 8b03900f32..f1341092fe 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -19,12 +19,10 @@ # for partitioned tables. my $ts = $node->basedir . '/regress_pgbench_tap_1_ts_dir'; mkdir $ts or die "cannot create directory $ts"; -# this takes care of WIN-specific path issues -my $ets = PostgreSQL::Test::Utils::perl2host($ts); # the next commands will issue a syntax error if the path contains a "'" $node->safe_psql('postgres', - "CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ets';"); + "CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ts';"); # Test concurrent OID generation via pg_enum_oid_index. This indirectly # exercises LWLock and spinlock concurrency. diff --git a/src/bin/scripts/t/090_reindexdb.pl b/src/bin/scripts/t/090_reindexdb.pl index 70cd7606dd..398fc4e6bb 100644 --- a/src/bin/scripts/t/090_reindexdb.pl +++ b/src/bin/scripts/t/090_reindexdb.pl @@ -21,7 +21,6 @@ # Create a tablespace for testing. my $tbspace_path = $node->basedir . '/regress_reindex_tbspace'; mkdir $tbspace_path or die "cannot create directory $tbspace_path"; -$tbspace_path = PostgreSQL::Test::Utils::perl2host($tbspace_path); my $tbspace_name = 'reindex_tbspace'; $node->safe_psql('postgres', "CREATE TABLESPACE $tbspace_name LOCATION '$tbspace_path';"); diff --git a/src/test/modules/test_misc/t/002_tablespace.pl b/src/test/modules/test_misc/t/002_tablespace.pl index 6fea419bb8..04e54394c1 100644 --- a/src/test/modules/test_misc/t/002_tablespace.pl +++ b/src/test/modules/test_misc/t/002_tablespace.pl @@ -14,10 +14,10 @@ # Create a couple of directories to use as tablespaces. my $basedir = $node->basedir(); -my $TS1_LOCATION = PostgreSQL::Test::Utils::perl2host("$basedir/ts1"); +my $TS1_LOCATION = "$basedir/ts1"; $TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar mkdir($TS1_LOCATION); -my $TS2_LOCATION = PostgreSQL::Test::Utils::perl2host("$basedir/ts2"); +my $TS2_LOCATION = "$basedir/ts2"; $TS2_LOCATION =~ s/\/\.\//\//g; mkdir($TS2_LOCATION); diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index ed70eff374..702b4c2b1c 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -1076,7 +1076,7 @@ primary_conninfo='$root_connstr' sub enable_restoring { my ($self, $root_node, $standby) = @_; - my $path = PostgreSQL::Test::Utils::perl2host($root_node->archive_dir); + my $path = $root_node->archive_dir; my $name = $self->name; print "### Enabling WAL restore for node \"$name\"\n"; @@ -1144,7 +1144,7 @@ sub set_standby_mode sub enable_archiving { my ($self) = @_; - my $path = PostgreSQL::Test::Utils::perl2host($self->archive_dir); + my $path = $self->archive_dir; my $name = $self->name; print "### Enabling WAL archiving for node \"$name\"\n"; diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index 31e2b0315e..fc8ca74194 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -24,7 +24,6 @@ PostgreSQL::Test::Utils - helper module for writing PostgreSQL's C tests. # Miscellanea print "on Windows" if $PostgreSQL::Test::Utils::windows_os; - my $path = PostgreSQL::Test::Utils::perl2host($backup_dir); ok(check_mode_recursive($stream_dir, 0700, 0600), "check stream dir permissions"); PostgreSQL::Test::Utils::system_log('pg_ctl', 'kill', 'QUIT', $slow_pid); @@ -297,61 +296,6 @@ sub tempdir_short =pod -=item perl2host() - -Translate a virtual file name to a host file name. Currently, this is a no-op -except for the case of Perl=msys and host=mingw32. The subject need not -exist, but its parent or grandparent directory must exist unless cygpath is -available. - -The returned path uses forward slashes but has no trailing slash. - -=cut - -sub perl2host -{ - my ($subject) = @_; - return $subject; - if ($is_msys2) - { - # get absolute, windows type path - my $path = qx{cygpath -a -m "$subject"}; - if (!$?) - { - chomp $path; - $path =~ s!/$!!; - return $path if $path; - } - # fall through if this didn't work. - } - my $here = cwd; - my $leaf; - if (chdir $subject) - { - $leaf = ''; - } - else - { - $leaf = '/' . basename $subject; - my $parent = dirname $subject; - if (!chdir $parent) - { - $leaf = '/' . basename($parent) . $leaf; - $parent = dirname $parent; - chdir $parent or die "could not chdir \"$parent\": $!"; - } - } - - # this odd way of calling 'pwd -W' is the only way that seems to work. - my $dir = qx{sh -c "pwd -W"}; - chomp $dir; - $dir =~ s!/$!!; - chdir $here; - return $dir . $leaf; -} - -=pod - =item has_wal_read_bug() Returns true if $tmp_check is subject to a sparc64+ext4 bug that causes WAL @@ -727,8 +671,6 @@ sub dir_symlink my $newname = shift; if ($windows_os) { - $oldname = perl2host($oldname); - $newname = perl2host($newname); $oldname =~ s,/,\\,g; $newname =~ s,/,\\,g; my $cmd = qq{mklink /j "$newname" "$oldname"}; diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl index da77c1211f..f3199fbd2e 100644 --- a/src/test/recovery/t/014_unlogged_reinit.pl +++ b/src/test/recovery/t/014_unlogged_reinit.pl @@ -33,9 +33,7 @@ my $tablespaceDir = PostgreSQL::Test::Utils::tempdir; -my $realTSDir = PostgreSQL::Test::Utils::perl2host($tablespaceDir); - -$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$realTSDir'"); +$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'"); $node->safe_psql('postgres', 'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1'); diff --git a/src/test/recovery/t/017_shm.pl b/src/test/recovery/t/017_shm.pl index 678a252165..88f9e2b9cd 100644 --- a/src/test/recovery/t/017_shm.pl +++ b/src/test/recovery/t/017_shm.pl @@ -112,7 +112,7 @@ sub log_ipcs $gnat->start; log_ipcs(); -my $regress_shlib = PostgreSQL::Test::Utils::perl2host($ENV{REGRESS_SHLIB}); +my $regress_shlib = $ENV{REGRESS_SHLIB}; $gnat->safe_psql('postgres', <basedir . '/tablespace_other'; mkdir($tablespace_dir); - $tablespace_dir = PostgreSQL::Test::Utils::perl2host($tablespace_dir); my $result; # Test redo of CREATE TABLESPACE. @@ -152,7 +151,6 @@ sub run_wal_optimize $copy_file, qq(20000,30000 20001,30001 20002,30002)); - $copy_file = PostgreSQL::Test::Utils::perl2host($copy_file); # Test truncation with inserted tuples using both INSERT and COPY. Tuples # inserted after the truncation should be seen. diff --git a/src/test/recovery/t/025_stuck_on_old_timeline.pl b/src/test/recovery/t/025_stuck_on_old_timeline.pl index d113c8cc9c..fd821242e8 100644 --- a/src/test/recovery/t/025_stuck_on_old_timeline.pl +++ b/src/test/recovery/t/025_stuck_on_old_timeline.pl @@ -28,7 +28,7 @@ # Note: consistent use of forward slashes here avoids any escaping problems # that arise from use of backslashes. That means we need to double-quote all # the paths in the archive_command -my $perlbin = PostgreSQL::Test::Utils::perl2host($^X); +my $perlbin = $^X; $perlbin =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os; my $archivedir_primary = $node_primary->archive_dir; $archivedir_primary =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os; diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl index 4f82a54f93..c40951b7ba 100644 --- a/src/test/recovery/t/027_stream_regress.pl +++ b/src/test/recovery/t/027_stream_regress.pl @@ -48,8 +48,8 @@ 'max_standby_streaming_delay = 600s'); $node_standby_1->start; -my $dlpath = PostgreSQL::Test::Utils::perl2host(dirname($ENV{REGRESS_SHLIB})); -my $outputdir = PostgreSQL::Test::Utils::perl2host($PostgreSQL::Test::Utils::tmp_check); +my $dlpath = dirname($ENV{REGRESS_SHLIB}); +my $outputdir = $PostgreSQL::Test::Utils::tmp_check; # Run the regression tests against the primary. my $extra_opts = $ENV{EXTRA_REGRESS_OPTS} || ""; diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index b8f8b65a8f..5c5b16fbe7 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -51,7 +51,7 @@ "couldn't copy ssl/$keyfile to $cert_tempdir/$keyfile for permissions change: $!"; chmod 0600, "$cert_tempdir/$keyfile" or die "failed to change permissions on $cert_tempdir/$keyfile: $!"; - $key{$keyfile} = PostgreSQL::Test::Utils::perl2host("$cert_tempdir/$keyfile"); + $key{$keyfile} = "$cert_tempdir/$keyfile"; $key{$keyfile} =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os; } @@ -63,7 +63,7 @@ "couldn't copy ssl/client_key to $cert_tempdir/client_wrongperms.key for permission change: $!"; chmod 0644, "$cert_tempdir/client_wrongperms.key" or die "failed to change permissions on $cert_tempdir/client_wrongperms.key: $!"; -$key{'client_wrongperms.key'} = PostgreSQL::Test::Utils::perl2host("$cert_tempdir/client_wrongperms.key"); +$key{'client_wrongperms.key'} = "$cert_tempdir/client_wrongperms.key"; $key{'client_wrongperms.key'} =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os; #### Set up the server. diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl index 41d231c55d..4decd7a506 100644 --- a/src/test/ssl/t/002_scram.pl +++ b/src/test/ssl/t/002_scram.pl @@ -94,7 +94,7 @@ # be used in a different test, so the name of this temporary client key # is chosen here to be unique. my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); -my $client_tmp_key = PostgreSQL::Test::Utils::perl2host("$cert_tempdir/client_scram.key"); +my $client_tmp_key = "$cert_tempdir/client_scram.key"; copy("ssl/client.key", "$cert_tempdir/client_scram.key") or die "couldn't copy ssl/client_key to $cert_tempdir/client_scram.key for permission change: $!"; diff --git a/src/test/ssl/t/003_sslinfo.pl b/src/test/ssl/t/003_sslinfo.pl index f008ea6594..95742081f3 100644 --- a/src/test/ssl/t/003_sslinfo.pl +++ b/src/test/ssl/t/003_sslinfo.pl @@ -34,7 +34,7 @@ # The client's private key must not be world-readable, so take a copy # of the key stored in the code tree and update its permissions. my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); -my $client_tmp_key = PostgreSQL::Test::Utils::perl2host("$cert_tempdir/client_ext.key"); +my $client_tmp_key = "$cert_tempdir/client_ext.key"; copy("ssl/client_ext.key", "$cert_tempdir/client_ext.key") or die "couldn't copy ssl/client_ext.key to $cert_tempdir/client_ext.key for permissions change: $!"; From 1c6d4629394d1b696b4e47ab4c501752e8c974e7 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Sun, 20 Feb 2022 11:47:56 -0500 Subject: [PATCH 023/108] Remove most msys special processing in TAP tests Following migration of Windows buildfarm members running TAP tests to use of ucrt64 perl for those tests, special processing for msys perl is no longer necessary and so is removed. Backpatch to release 10 Discussion: https://postgr.es/m/c65a8781-77ac-ea95-d185-6db291e1baeb@dunslane.net --- src/bin/pg_ctl/t/001_start_stop.pl | 11 +---------- src/bin/pg_rewind/t/RewindTest.pm | 1 - src/test/perl/PostgreSQL/Test/Cluster.pm | 15 +-------------- src/test/perl/PostgreSQL/Test/Utils.pm | 6 ------ src/test/recovery/t/021_row_visibility.pl | 3 --- src/test/recovery/t/cp_history_files | 7 ------- 6 files changed, 2 insertions(+), 41 deletions(-) diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl index 7d3fbc3f6a..3b45390ced 100644 --- a/src/bin/pg_ctl/t/001_start_stop.pl +++ b/src/bin/pg_ctl/t/001_start_stop.pl @@ -47,16 +47,7 @@ 'pg_ctl', 'start', '-D', "$tempdir/data", '-l', "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log" ]; -if ($Config{osname} ne 'msys') -{ - command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start'); -} -else -{ - - # use the version of command_like that doesn't hang on Msys here - command_like_safe($ctlcmd, qr/done.*server started/s, 'pg_ctl start'); -} +command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start'); # sleep here is because Windows builds can't check postmaster.pid exactly, # so they may mistake a pre-existing postmaster.pid for one created by the diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm index 60e3234788..2fedc626cc 100644 --- a/src/bin/pg_rewind/t/RewindTest.pm +++ b/src/bin/pg_rewind/t/RewindTest.pm @@ -115,7 +115,6 @@ sub check_query } else { - $stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; is($stdout, $expected_stdout, "$test_name: query result matches"); } return; diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index 702b4c2b1c..be05845248 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -904,9 +904,7 @@ sub kill9 local %ENV = $self->_get_env(); print "### Killing node \"$name\" using signal 9\n"; - # kill(9, ...) fails under msys Perl 5.8.8, so fall back on pg_ctl. - kill(9, $self->{_pid}) - or PostgreSQL::Test::Utils::system_or_bail('pg_ctl', 'kill', 'KILL', $self->{_pid}); + kill(9, $self->{_pid}); $self->{_pid} = undef; return; } @@ -1845,19 +1843,13 @@ sub psql } }; - # Note: on Windows, IPC::Run seems to convert \r\n to \n in program output - # if we're using native Perl, but not if we're using MSys Perl. So do it - # by hand in the latter case, here and elsewhere. - if (defined $$stdout) { - $$stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; chomp $$stdout; } if (defined $$stderr) { - $$stderr =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; chomp $$stderr; } @@ -2337,9 +2329,7 @@ sub poll_query_until my $result = IPC::Run::run $cmd, '<', \$query, '>', \$stdout, '2>', \$stderr; - $stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; chomp($stdout); - $stderr =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; chomp($stderr); if ($stdout eq $expected && $stderr eq '') @@ -2849,9 +2839,6 @@ sub pg_recvlogical_upto } }; - $stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; - $stderr =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; - if (wantarray) { return ($ret, $stdout, $stderr, $timeout); diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index fc8ca74194..2c0c72f57a 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -401,7 +401,6 @@ sub run_command my ($cmd) = @_; my ($stdout, $stderr); my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr; - foreach ($stderr, $stdout) { s/\r\n/\n/g if $Config{osname} eq 'msys'; } chomp($stdout); chomp($stderr); return ($stdout, $stderr); @@ -486,7 +485,6 @@ sub slurp_file $contents = <$fh>; close $fh; - $contents =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; return $contents; } @@ -844,7 +842,6 @@ sub command_like my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr; ok($result, "$test_name: exit code 0"); is($stderr, '', "$test_name: no stderr"); - $stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; like($stdout, $expected_stdout, "$test_name: matches"); return; } @@ -897,7 +894,6 @@ sub command_fails_like print("# Running: " . join(" ", @{$cmd}) . "\n"); my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr; ok(!$result, "$test_name: exit code not 0"); - $stderr =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; like($stderr, $expected_stderr, "$test_name: matches"); return; } @@ -942,8 +938,6 @@ sub command_checks_all if $ret & 127; $ret = $ret >> 8; - foreach ($stderr, $stdout) { s/\r\n/\n/g if $Config{osname} eq 'msys'; } - # check status ok($ret == $expected_ret, "$test_name status (got $ret vs expected $expected_ret)"); diff --git a/src/test/recovery/t/021_row_visibility.pl b/src/test/recovery/t/021_row_visibility.pl index e2743518de..75cd487451 100644 --- a/src/test/recovery/t/021_row_visibility.pl +++ b/src/test/recovery/t/021_row_visibility.pl @@ -182,9 +182,6 @@ sub send_query_and_wait $$psql{run}->pump_nb(); while (1) { - # See PostgreSQL::Test::Cluster.pm's psql() - $$psql{stdout} =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; - last if $$psql{stdout} =~ /$untl/; if ($psql_timeout->is_expired) diff --git a/src/test/recovery/t/cp_history_files b/src/test/recovery/t/cp_history_files index 66f1b598fe..cfeea41e5b 100644 --- a/src/test/recovery/t/cp_history_files +++ b/src/test/recovery/t/cp_history_files @@ -7,11 +7,4 @@ use warnings; die "wrong number of arguments" if @ARGV != 2; my ($source, $target) = @ARGV; exit if $source !~ /history/; -if ($^O eq 'msys') -{ - # make a windows path look like an msys path if necessary - $source =~ s!^([A-Za-z]):!'/' . lc($1)!e; - $source =~ s!\\!/!g; -} - copy($source, $target) or die "couldn't copy $source to $target: $!"; From 83a7637e2c5be27a0788b920501dde284b3fca33 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 20 Feb 2022 15:02:41 -0500 Subject: [PATCH 024/108] Reset conn->errorReported when PQrequestCancel sets errorMessage. Oversight in commit 618c16707. This is mainly neatnik-ism, since if PQrequestCancel is used per its API contract, we should perform pqClearConnErrorState before reaching any place that would consult errorReported. But still, it seems like a bad idea to potentially leave errorReported pointing past errorMessage.len. --- src/interfaces/libpq/fe-connect.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index 2a3d68b4d1..1c5a2b43e9 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -4687,6 +4687,7 @@ PQrequestCancel(PGconn *conn) "PQrequestCancel() -- connection is not open\n", conn->errorMessage.maxlen); conn->errorMessage.len = strlen(conn->errorMessage.data); + conn->errorReported = 0; return false; } @@ -4706,7 +4707,10 @@ PQrequestCancel(PGconn *conn) } if (!r) + { conn->errorMessage.len = strlen(conn->errorMessage.data); + conn->errorReported = 0; + } return r; } From fbabdf8f9a55894f7cd8f0fa86c9a4ef55576296 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 20 Feb 2022 13:51:36 -0800 Subject: [PATCH 025/108] Fix meaning-changing typo introduced in fa0e03c15a9f. --- src/backend/utils/init/postinit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index e2208151e4..8a332a72b1 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -710,7 +710,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, } /* - * If this is either a bootstrap process nor a standalone backend, start + * If this is either a bootstrap process or a standalone backend, start * up the XLOG machinery, and register to have it closed down at exit. * In other cases, the startup process is responsible for starting up * the XLOG machinery, and the checkpointer for closing it down. From bf4ed12b58205d8527053d53c8f473074e191b8d Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Mon, 21 Feb 2022 09:55:55 +0900 Subject: [PATCH 026/108] doc: Mention environment variable ZSTD in the TAP tests for MSVC 6c417bb has added the build infrastructure to support ZSTD, but forgot to update this section of the docs to mention the variable ZSTD, as per the change done in vcregress.pl. While on it, reword this section of the docs to describe what happens in the default case, as per a suggestion from Robert Haas. Discussion: https://postgr.es/m/YhCL0fKnDv/Zvtuo@paquier.xyz --- doc/src/sgml/install-windows.sgml | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml index 98fa6962f6..43d05bde4e 100644 --- a/doc/src/sgml/install-windows.sgml +++ b/doc/src/sgml/install-windows.sgml @@ -534,9 +534,9 @@ $ENV{PROVE_TESTS}='t/020*.pl t/010*.pl' GZIP_PROGRAM - Path to a gzip command. The default is - gzip, that would be the command found in - PATH. + Path to a gzip command. The default is + gzip, which will search for a command by that + name in the configured PATH. @@ -544,8 +544,8 @@ $ENV{PROVE_TESTS}='t/020*.pl t/010*.pl' LZ4 Path to a lz4 command. The default is - lz4, that would be the command found in - PATH. + lz4, which will search for a command by that + name in the configured PATH. @@ -553,8 +553,17 @@ $ENV{PROVE_TESTS}='t/020*.pl t/010*.pl' TAR Path to a tar command. The default is - tar, that would be the command found in - PATH. + tar, which will search for a command by that + name in the configured PATH. + + + + + ZSTD + + Path to a zstd command. The default is + zstd, which will search for a command by that + name in the configured PATH. From 5c868c92caa864d223006c095d623b8086754c6f Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 21 Feb 2022 09:42:46 +0100 Subject: [PATCH 027/108] Fix possible null pointer reference Per Coverity. Introduced in 37851a8b83d3d57ca48736093b10aa5f3bc0c177. --- src/backend/utils/init/postinit.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 8a332a72b1..a29fa0b3e6 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -439,8 +439,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect ereport(WARNING, (errmsg("database \"%s\" has no actual collation version, but a version was recorded", name))); - - if (strcmp(actual_versionstr, collversionstr) != 0) + else if (strcmp(actual_versionstr, collversionstr) != 0) ereport(WARNING, (errmsg("database \"%s\" has a collation version mismatch", name), From abe81ee08468e63f94b91e484f47c867bcc706d3 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 21 Feb 2022 10:28:43 +0100 Subject: [PATCH 028/108] pgcrypto: Remove unused error code PXE_MCRYPT_INTERNAL was apparently never used even when it was added. --- contrib/pgcrypto/px.c | 1 - contrib/pgcrypto/px.h | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c index 4205e9c3ef..360acbd593 100644 --- a/contrib/pgcrypto/px.c +++ b/contrib/pgcrypto/px.c @@ -55,7 +55,6 @@ static const struct error_desc px_err_list[] = { {PXE_ARGUMENT_ERROR, "Illegal argument to function"}, {PXE_UNKNOWN_SALT_ALGO, "Unknown salt algorithm"}, {PXE_BAD_SALT_ROUNDS, "Incorrect number of rounds"}, - {PXE_MCRYPT_INTERNAL, "mcrypt internal error"}, {PXE_NO_RANDOM, "Failed to generate strong random bits"}, {PXE_DECRYPT_FAILED, "Decryption failed"}, {PXE_ENCRYPT_FAILED, "Encryption failed"}, diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h index 17d6f22498..3ed9f711c8 100644 --- a/contrib/pgcrypto/px.h +++ b/contrib/pgcrypto/px.h @@ -58,7 +58,7 @@ #define PXE_ARGUMENT_ERROR -13 #define PXE_UNKNOWN_SALT_ALGO -14 #define PXE_BAD_SALT_ROUNDS -15 -#define PXE_MCRYPT_INTERNAL -16 +/* -16 is unused */ #define PXE_NO_RANDOM -17 #define PXE_DECRYPT_FAILED -18 #define PXE_ENCRYPT_FAILED -19 From 3f649663a49d5bb815858d90a271bb532c58fd0e Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 21 Feb 2022 10:55:03 +0100 Subject: [PATCH 029/108] pgcrypto: Remove unused error code PXE_DEV_READ_ERROR hasn't been used since random device support was removed from pgcrypto (fe0a0b5993dfe24e4b3bcf52fa64ff41a444b8f1). --- contrib/pgcrypto/px.c | 1 - contrib/pgcrypto/px.h | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c index 360acbd593..75e2426e9f 100644 --- a/contrib/pgcrypto/px.c +++ b/contrib/pgcrypto/px.c @@ -50,7 +50,6 @@ static const struct error_desc px_err_list[] = { {PXE_KEY_TOO_BIG, "Key was too big"}, {PXE_CIPHER_INIT, "Cipher cannot be initialized ?"}, {PXE_HASH_UNUSABLE_FOR_HMAC, "This hash algorithm is unusable for HMAC"}, - {PXE_DEV_READ_ERROR, "Error reading from random device"}, {PXE_BUG, "pgcrypto bug"}, {PXE_ARGUMENT_ERROR, "Illegal argument to function"}, {PXE_UNKNOWN_SALT_ALGO, "Unknown salt algorithm"}, diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h index 3ed9f711c8..eef49a8b76 100644 --- a/contrib/pgcrypto/px.h +++ b/contrib/pgcrypto/px.h @@ -53,7 +53,8 @@ #define PXE_KEY_TOO_BIG -7 #define PXE_CIPHER_INIT -8 #define PXE_HASH_UNUSABLE_FOR_HMAC -9 -#define PXE_DEV_READ_ERROR -10 +/* -10 is unused */ +/* -11 is unused */ #define PXE_BUG -12 #define PXE_ARGUMENT_ERROR -13 #define PXE_UNKNOWN_SALT_ALGO -14 From 27b02e070fd1b6622b10937d9346b65ffacbc351 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 21 Feb 2022 08:34:59 -0800 Subject: [PATCH 030/108] pg_upgrade: Don't print progress status when output is not a tty. Until this change pg_upgrade with output redirected to a file / pipe would end up printing all files in the cluster. This has made check-world output exceedingly verbose. Author: Andres Freund Reviewed-By: Justin Pryzby Reviewed-By: Daniel Gustafsson Discussion: https://postgr.es/m/CA+hUKGKjrV61ZVJ8OSag+3rKRmCZXPc03bDyWMqhXg3rdZ=fOw@mail.gmail.com --- src/bin/pg_upgrade/dump.c | 2 +- src/bin/pg_upgrade/option.c | 2 ++ src/bin/pg_upgrade/pg_upgrade.c | 2 +- src/bin/pg_upgrade/pg_upgrade.h | 2 ++ src/bin/pg_upgrade/relfilenode.c | 6 ++-- src/bin/pg_upgrade/util.c | 61 ++++++++++++++++++++++++++------ 6 files changed, 59 insertions(+), 16 deletions(-) diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c index b69b4f9569..29b9e44f78 100644 --- a/src/bin/pg_upgrade/dump.c +++ b/src/bin/pg_upgrade/dump.c @@ -29,7 +29,7 @@ generate_old_dump(void) GLOBALS_DUMP_FILE); check_ok(); - prep_status("Creating dump of database schemas\n"); + prep_status_progress("Creating dump of database schemas"); /* create per-db dump files */ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c index d2c82cc2bb..e75be2c423 100644 --- a/src/bin/pg_upgrade/option.c +++ b/src/bin/pg_upgrade/option.c @@ -207,6 +207,8 @@ parseCommandLine(int argc, char *argv[]) if (log_opts.verbose) pg_log(PG_REPORT, "Running in verbose mode\n"); + log_opts.isatty = isatty(fileno(stdout)); + /* Turn off read-only mode; add prefix to PGOPTIONS? */ if (getenv("PGOPTIONS")) { diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index f66bbd5307..ecb3e1f647 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -381,7 +381,7 @@ create_new_objects(void) { int dbnum; - prep_status("Restoring database schemas in the new cluster\n"); + prep_status_progress("Restoring database schemas in the new cluster"); /* * We cannot process the template1 database concurrently with others, diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h index 0aca0a77aa..ca86c11292 100644 --- a/src/bin/pg_upgrade/pg_upgrade.h +++ b/src/bin/pg_upgrade/pg_upgrade.h @@ -274,6 +274,7 @@ typedef struct char *basedir; /* Base output directory */ char *dumpdir; /* Dumps */ char *logdir; /* Log files */ + bool isatty; /* is stdout a tty */ } LogOpts; @@ -427,6 +428,7 @@ void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); void pg_fatal(const char *fmt,...) pg_attribute_printf(1, 2) pg_attribute_noreturn(); void end_progress_output(void); void prep_status(const char *fmt,...) pg_attribute_printf(1, 2); +void prep_status_progress(const char *fmt,...) pg_attribute_printf(1, 2); void check_ok(void); unsigned int str2uint(const char *str); diff --git a/src/bin/pg_upgrade/relfilenode.c b/src/bin/pg_upgrade/relfilenode.c index 2f4deb3416..d23ac884bd 100644 --- a/src/bin/pg_upgrade/relfilenode.c +++ b/src/bin/pg_upgrade/relfilenode.c @@ -32,13 +32,13 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, switch (user_opts.transfer_mode) { case TRANSFER_MODE_CLONE: - pg_log(PG_REPORT, "Cloning user relation files\n"); + prep_status_progress("Cloning user relation files"); break; case TRANSFER_MODE_COPY: - pg_log(PG_REPORT, "Copying user relation files\n"); + prep_status_progress("Copying user relation files"); break; case TRANSFER_MODE_LINK: - pg_log(PG_REPORT, "Linking user relation files\n"); + prep_status_progress("Linking user relation files"); break; } diff --git a/src/bin/pg_upgrade/util.c b/src/bin/pg_upgrade/util.c index d98deb7f24..414de06349 100644 --- a/src/bin/pg_upgrade/util.c +++ b/src/bin/pg_upgrade/util.c @@ -38,15 +38,18 @@ report_status(eLogType type, const char *fmt,...) } -/* force blank output for progress display */ void end_progress_output(void) { /* - * In case nothing printed; pass a space so gcc doesn't complain about - * empty format string. + * For output to a tty, erase prior contents of progress line. When either + * tty or verbose, indent so that report_status() output will align + * nicely. */ - prep_status(" "); + if (log_opts.isatty) + pg_log(PG_REPORT, "\r%-*s", MESSAGE_WIDTH, ""); + else if (log_opts.verbose) + pg_log(PG_REPORT, "%-*s", MESSAGE_WIDTH, ""); } @@ -75,14 +78,43 @@ prep_status(const char *fmt,...) vsnprintf(message, sizeof(message), fmt, args); va_end(args); - if (strlen(message) > 0 && message[strlen(message) - 1] == '\n') - pg_log(PG_REPORT, "%s", message); + /* trim strings */ + pg_log(PG_REPORT, "%-*s", MESSAGE_WIDTH, message); +} + +/* + * prep_status_progress + * + * Like prep_status(), but for potentially longer running operations. + * Details about what item is currently being processed can be displayed + * with pg_log(PG_STATUS, ...). A typical sequence would look like this: + * + * prep_status_progress("copying files"); + * for (...) + * pg_log(PG_STATUS, "%s", filename); + * end_progress_output(); + * report_status(PG_REPORT, "ok"); + */ +void +prep_status_progress(const char *fmt,...) +{ + va_list args; + char message[MAX_STRING]; + + va_start(args, fmt); + vsnprintf(message, sizeof(message), fmt, args); + va_end(args); + + /* + * If outputting to a tty or in verbose, append newline. pg_log_v() will + * put the individual progress items onto the next line. + */ + if (log_opts.isatty || log_opts.verbose) + pg_log(PG_REPORT, "%-*s\n", MESSAGE_WIDTH, message); else - /* trim strings that don't end in a newline */ pg_log(PG_REPORT, "%-*s", MESSAGE_WIDTH, message); } - static void pg_log_v(eLogType type, const char *fmt, va_list ap) { @@ -111,8 +143,15 @@ pg_log_v(eLogType type, const char *fmt, va_list ap) break; case PG_STATUS: - /* for output to a display, do leading truncation and append \r */ - if (isatty(fileno(stdout))) + /* + * For output to a display, do leading truncation. Append \r so + * that the next message is output at the start of the line. + * + * If going to non-interactive output, only display progress if + * verbose is enabled. Otherwise the output gets unreasonably + * large by default. + */ + if (log_opts.isatty) /* -2 because we use a 2-space indent */ printf(" %s%-*.*s\r", /* prefix with "..." if we do leading truncation */ @@ -121,7 +160,7 @@ pg_log_v(eLogType type, const char *fmt, va_list ap) /* optional leading truncation */ strlen(message) <= MESSAGE_WIDTH - 2 ? message : message + strlen(message) - MESSAGE_WIDTH + 3 + 2); - else + else if (log_opts.verbose) printf(" %s\n", message); break; From 7c38ef2a5d6cf6d8dc3834399d7a1c364d64ce64 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 21 Feb 2022 08:57:34 -0800 Subject: [PATCH 031/108] Fix temporary object cleanup failing due to toast access without snapshot. When cleaning up temporary objects during process exit the cleanup could fail with: FATAL: cannot fetch toast data without an active snapshot The bug is caused by RemoveTempRelationsCallback() not setting up a snapshot. If an object with toasted catalog data needs to be cleaned up, init_toast_snapshot() could fail with the above error. Most of the time however the the problem is masked due to cached catalog snapshots being returned by GetOldestSnapshot(). But dropping an object can cause catalog invalidations to be emitted. If no further catalog accesses are necessary between the invalidation processing and the next toast datum deletion, the bug becomes visible. It's easy to miss this bug because it typically happens after clients disconnect and the FATAL error just ends up in the log. Luckily temporary table cleanup at the next use of the same temporary schema or during DISCARD ALL does not have the same problem. Fix the bug by pushing a snapshot in RemoveTempRelationsCallback(). Also add isolation tests for temporary object cleanup, including objects with toasted catalog data. A future HEAD only commit will add an assertion trying to make this more visible. Reported-By: Miles Delahunty Author: Andres Freund Discussion: https://postgr.es/m/CAOFAq3BU5Mf2TTvu8D9n_ZOoFAeQswuzk7yziAb7xuw_qyw5gw@mail.gmail.com Backpatch: 10- --- src/backend/catalog/namespace.c | 3 + .../expected/temp-schema-cleanup.out | 115 ++++++++++++++++++ src/test/isolation/isolation_schedule | 1 + .../isolation/specs/temp-schema-cleanup.spec | 85 +++++++++++++ 4 files changed, 204 insertions(+) create mode 100644 src/test/isolation/expected/temp-schema-cleanup.out create mode 100644 src/test/isolation/specs/temp-schema-cleanup.spec diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 5dbac9c437..fafb9349cc 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -55,6 +55,7 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/varlena.h" @@ -4292,9 +4293,11 @@ RemoveTempRelationsCallback(int code, Datum arg) /* Need to ensure we have a usable transaction. */ AbortOutOfAnyTransaction(); StartTransactionCommand(); + PushActiveSnapshot(GetTransactionSnapshot()); RemoveTempRelations(myTempNamespace); + PopActiveSnapshot(); CommitTransactionCommand(); } } diff --git a/src/test/isolation/expected/temp-schema-cleanup.out b/src/test/isolation/expected/temp-schema-cleanup.out new file mode 100644 index 0000000000..35b91d9e45 --- /dev/null +++ b/src/test/isolation/expected/temp-schema-cleanup.out @@ -0,0 +1,115 @@ +Parsed test spec with 2 sessions + +starting permutation: s1_create_temp_objects s1_discard_temp s2_check_schema +step s1_create_temp_objects: + + -- create function large enough to be toasted, to ensure we correctly clean those up, a prior bug + -- https://postgr.es/m/CAOFAq3BU5Mf2TTvu8D9n_ZOoFAeQswuzk7yziAb7xuw_qyw5gw%40mail.gmail.com + SELECT exec(format($outer$ + CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$, + (SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i)))); + + -- The above bug requirs function removal to happen after a catalog + -- invalidation. dependency.c sorts objects in descending oid order so + -- that newer objects are deleted before older objects, so create a + -- table after. + CREATE TEMPORARY TABLE invalidate_catalog_cache(); + + -- test non-temp function is dropped when depending on temp table + CREATE TEMPORARY TABLE just_give_me_a_type(id serial primary key); + + CREATE FUNCTION uses_a_temp_type(just_give_me_a_type) RETURNS int LANGUAGE sql AS $$SELECT 1;$$; + +exec +---- + +(1 row) + +step s1_discard_temp: + DISCARD TEMP; + +step s2_check_schema: + SELECT oid::regclass FROM pg_class WHERE relnamespace = (SELECT oid FROM s1_temp_schema); + SELECT oid::regproc FROM pg_proc WHERE pronamespace = (SELECT oid FROM s1_temp_schema); + SELECT oid::regproc FROM pg_type WHERE typnamespace = (SELECT oid FROM s1_temp_schema); + +oid +--- +(0 rows) + +oid +--- +(0 rows) + +oid +--- +(0 rows) + + +starting permutation: s1_advisory s2_advisory s1_create_temp_objects s1_exit s2_check_schema +step s1_advisory: + SELECT pg_advisory_lock('pg_namespace'::regclass::int8); + +pg_advisory_lock +---------------- + +(1 row) + +step s2_advisory: + SELECT pg_advisory_lock('pg_namespace'::regclass::int8); + +step s1_create_temp_objects: + + -- create function large enough to be toasted, to ensure we correctly clean those up, a prior bug + -- https://postgr.es/m/CAOFAq3BU5Mf2TTvu8D9n_ZOoFAeQswuzk7yziAb7xuw_qyw5gw%40mail.gmail.com + SELECT exec(format($outer$ + CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$, + (SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i)))); + + -- The above bug requirs function removal to happen after a catalog + -- invalidation. dependency.c sorts objects in descending oid order so + -- that newer objects are deleted before older objects, so create a + -- table after. + CREATE TEMPORARY TABLE invalidate_catalog_cache(); + + -- test non-temp function is dropped when depending on temp table + CREATE TEMPORARY TABLE just_give_me_a_type(id serial primary key); + + CREATE FUNCTION uses_a_temp_type(just_give_me_a_type) RETURNS int LANGUAGE sql AS $$SELECT 1;$$; + +exec +---- + +(1 row) + +step s1_exit: + SELECT pg_terminate_backend(pg_backend_pid()); + +FATAL: terminating connection due to administrator command +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. + +step s2_advisory: <... completed> +pg_advisory_lock +---------------- + +(1 row) + +step s2_check_schema: + SELECT oid::regclass FROM pg_class WHERE relnamespace = (SELECT oid FROM s1_temp_schema); + SELECT oid::regproc FROM pg_proc WHERE pronamespace = (SELECT oid FROM s1_temp_schema); + SELECT oid::regproc FROM pg_type WHERE typnamespace = (SELECT oid FROM s1_temp_schema); + +oid +--- +(0 rows) + +oid +--- +(0 rows) + +oid +--- +(0 rows) + diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule index 99c23b16ff..0dae483e82 100644 --- a/src/test/isolation/isolation_schedule +++ b/src/test/isolation/isolation_schedule @@ -38,6 +38,7 @@ test: eval-plan-qual-trigger test: lock-update-delete test: lock-update-traversal test: inherit-temp +test: temp-schema-cleanup test: insert-conflict-do-nothing test: insert-conflict-do-nothing-2 test: insert-conflict-do-update diff --git a/src/test/isolation/specs/temp-schema-cleanup.spec b/src/test/isolation/specs/temp-schema-cleanup.spec new file mode 100644 index 0000000000..a9417b7e90 --- /dev/null +++ b/src/test/isolation/specs/temp-schema-cleanup.spec @@ -0,0 +1,85 @@ +# Test cleanup of objects in temporary schema. + +setup { + CREATE TABLE s1_temp_schema(oid oid); + -- to help create a long function + CREATE FUNCTION exec(p_foo text) RETURNS void LANGUAGE plpgsql AS $$BEGIN EXECUTE p_foo; END;$$; +} + +teardown { + DROP TABLE s1_temp_schema; + DROP FUNCTION exec(text); +} + +session "s1" +setup { + CREATE TEMPORARY TABLE just_to_create_temp_schema(); + DROP TABLE just_to_create_temp_schema; + INSERT INTO s1_temp_schema SELECT pg_my_temp_schema(); +} + +step s1_advisory { + SELECT pg_advisory_lock('pg_namespace'::regclass::int8); +} + +step s1_create_temp_objects { + + -- create function large enough to be toasted, to ensure we correctly clean those up, a prior bug + -- https://postgr.es/m/CAOFAq3BU5Mf2TTvu8D9n_ZOoFAeQswuzk7yziAb7xuw_qyw5gw%40mail.gmail.com + SELECT exec(format($outer$ + CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$, + (SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i)))); + + -- The above bug requirs function removal to happen after a catalog + -- invalidation. dependency.c sorts objects in descending oid order so + -- that newer objects are deleted before older objects, so create a + -- table after. + CREATE TEMPORARY TABLE invalidate_catalog_cache(); + + -- test non-temp function is dropped when depending on temp table + CREATE TEMPORARY TABLE just_give_me_a_type(id serial primary key); + + CREATE FUNCTION uses_a_temp_type(just_give_me_a_type) RETURNS int LANGUAGE sql AS $$SELECT 1;$$; +} + +step s1_discard_temp { + DISCARD TEMP; +} + +step s1_exit { + SELECT pg_terminate_backend(pg_backend_pid()); +} + + +session "s2" + +step s2_advisory { + SELECT pg_advisory_lock('pg_namespace'::regclass::int8); +} + +step s2_check_schema { + SELECT oid::regclass FROM pg_class WHERE relnamespace = (SELECT oid FROM s1_temp_schema); + SELECT oid::regproc FROM pg_proc WHERE pronamespace = (SELECT oid FROM s1_temp_schema); + SELECT oid::regproc FROM pg_type WHERE typnamespace = (SELECT oid FROM s1_temp_schema); +} + + +# Test temporary object cleanup during DISCARD. +permutation + s1_create_temp_objects + s1_discard_temp + s2_check_schema + +# Test temporary object cleanup during process exit. +# +# To check (in s2) if temporary objects (in s1) have properly been removed we +# need to wait for s1 to finish cleaning up. Luckily session level advisory +# locks are released only after temp table cleanup. +permutation + s1_advisory + s2_advisory + s1_create_temp_objects + s1_exit + s2_check_schema + +# Can't run further tests here, because s1's connection is dead From 2776922201f751e3202a713b61d97fe4e44a8440 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sat, 19 Feb 2022 12:42:37 -0800 Subject: [PATCH 032/108] Assert in init_toast_snapshot() that some snapshot registered or active. Commit fixed the bug that RemoveTempRelationsCallback() did not push/register a snapshot. That only went unnoticed because often a valid catalog snapshot exists and is returned by GetOldestSnapshot(). But due to invalidation processing that is not reliable. Thus assert in init_toast_snapshot() that there is a registered or active snapshot, using the new HaveRegisteredOrActiveSnapshot(). Author: Andres Freund Discussion: https://postgr.es/m/20220219180002.6tubjq7iw7m52bgd@alap3.anarazel.de --- src/backend/access/common/toast_internals.c | 9 +++++++ src/backend/utils/time/snapmgr.c | 26 +++++++++++++++++++++ src/include/utils/snapmgr.h | 1 + 3 files changed, 36 insertions(+) diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c index de37f561ca..7052ac9978 100644 --- a/src/backend/access/common/toast_internals.c +++ b/src/backend/access/common/toast_internals.c @@ -660,5 +660,14 @@ init_toast_snapshot(Snapshot toast_snapshot) if (snapshot == NULL) elog(ERROR, "cannot fetch toast data without an active snapshot"); + /* + * Catalog snapshots can be returned by GetOldestSnapshot() even if not + * registered or active. That easily hides bugs around not having a + * snapshot set up - most of the time there is a valid catalog + * snapshot. So additionally insist that the current snapshot is + * registered or active. + */ + Assert(HaveRegisteredOrActiveSnapshot()); + InitToastSnapshot(*toast_snapshot, snapshot->lsn, snapshot->whenTaken); } diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index a0b703a519..a0b81bf154 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1625,6 +1625,32 @@ ThereAreNoPriorRegisteredSnapshots(void) return false; } +/* + * HaveRegisteredOrActiveSnapshots + * Is there any registered or active snapshot? + * + * NB: Unless pushed or active, the cached catalog snapshot will not cause + * this function to return true. That allows this function to be used in + * checks enforcing a longer-lived snapshot. + */ +bool +HaveRegisteredOrActiveSnapshot(void) +{ + if (ActiveSnapshot != NULL) + return true; + + /* + * The catalog snapshot is in RegisteredSnapshots when valid, but can be + * removed at any time due to invalidation processing. If explicitly + * registered more than one snapshot has to be in RegisteredSnapshots. + */ + if (pairingheap_is_empty(&RegisteredSnapshots) || + !pairingheap_is_singular(&RegisteredSnapshots)) + return false; + + return CatalogSnapshot == NULL; +} + /* * Return a timestamp that is exactly on a minute boundary. diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h index 293c753034..e04018c034 100644 --- a/src/include/utils/snapmgr.h +++ b/src/include/utils/snapmgr.h @@ -135,6 +135,7 @@ extern bool XactHasExportedSnapshots(void); extern void DeleteAllExportedSnapshotFiles(void); extern void WaitForOlderSnapshots(TransactionId limitXmin, bool progress); extern bool ThereAreNoPriorRegisteredSnapshots(void); +extern bool HaveRegisteredOrActiveSnapshot(void); extern bool TransactionIdLimitedForOldSnapshots(TransactionId recentXmin, Relation relation, TransactionId *limit_xid, From 88103567cb8fa5be46dc9fac3e3b8774951a2be7 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 21 Feb 2022 14:10:15 -0500 Subject: [PATCH 033/108] Disallow setting bogus GUCs within an extension's reserved namespace. Commit 75d22069e tried to throw a warning for setting a custom GUC whose prefix belongs to a previously-loaded extension, if there is no such GUC defined by the extension. But that caused unstable behavior with parallel workers, because workers don't necessarily load extensions and GUCs in the same order their leader did. To make that work safely, we have to completely disallow the case. We now actually remove any such GUCs at the time of initial extension load, and then throw an error not just a warning if you try to add one later. While this might create a compatibility issue for a few people, the improvement in error-detection capability seems worth it; it's hard to believe that there's any good use-case for choosing such GUC names. This also un-reverts 5609cc01c (Rename EmitWarningsOnPlaceholders() to MarkGUCPrefixReserved()), since that function's old name is now even more of a misnomer. Florin Irion and Tom Lane Discussion: https://postgr.es/m/1902182.1640711215@sss.pgh.pa.us --- contrib/auth_delay/auth_delay.c | 2 +- contrib/auto_explain/auto_explain.c | 2 +- contrib/basic_archive/basic_archive.c | 2 +- contrib/pg_prewarm/autoprewarm.c | 2 +- .../pg_stat_statements/pg_stat_statements.c | 2 +- contrib/pg_trgm/trgm_op.c | 2 +- contrib/postgres_fdw/option.c | 2 +- contrib/sepgsql/hooks.c | 2 +- src/backend/utils/misc/guc.c | 79 +++++++++++++++---- src/include/utils/guc.h | 5 +- src/pl/plperl/plperl.c | 2 +- src/pl/plpgsql/src/pl_handler.c | 2 +- src/pl/tcl/pltcl.c | 4 +- .../modules/delay_execution/delay_execution.c | 2 +- .../ssl_passphrase_func.c | 2 +- src/test/modules/worker_spi/worker_spi.c | 2 +- src/test/regress/expected/guc.out | 11 +++ src/test/regress/sql/guc.sql | 7 ++ 18 files changed, 101 insertions(+), 31 deletions(-) diff --git a/contrib/auth_delay/auth_delay.c b/contrib/auth_delay/auth_delay.c index 38f4276db3..6b94d653ea 100644 --- a/contrib/auth_delay/auth_delay.c +++ b/contrib/auth_delay/auth_delay.c @@ -68,7 +68,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("auth_delay"); + MarkGUCPrefixReserved("auth_delay"); /* Install Hooks */ original_client_auth_hook = ClientAuthentication_hook; diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c index 3e09abaeca..d3029f85ef 100644 --- a/contrib/auto_explain/auto_explain.c +++ b/contrib/auto_explain/auto_explain.c @@ -231,7 +231,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("auto_explain"); + MarkGUCPrefixReserved("auto_explain"); /* Install hooks. */ prev_ExecutorStart = ExecutorStart_hook; diff --git a/contrib/basic_archive/basic_archive.c b/contrib/basic_archive/basic_archive.c index 16ddddccbb..e7efbfb9c3 100644 --- a/contrib/basic_archive/basic_archive.c +++ b/contrib/basic_archive/basic_archive.c @@ -69,7 +69,7 @@ _PG_init(void) 0, check_archive_directory, NULL, NULL); - EmitWarningsOnPlaceholders("basic_archive"); + MarkGUCPrefixReserved("basic_archive"); basic_archive_context = AllocSetContextCreate(TopMemoryContext, "basic_archive", diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c index 1d4d74b171..45e012a63a 100644 --- a/contrib/pg_prewarm/autoprewarm.c +++ b/contrib/pg_prewarm/autoprewarm.c @@ -137,7 +137,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("pg_prewarm"); + MarkGUCPrefixReserved("pg_prewarm"); RequestAddinShmemSpace(MAXALIGN(sizeof(AutoPrewarmSharedState))); diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 9d7d0812ac..38d92a89cc 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -437,7 +437,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("pg_stat_statements"); + MarkGUCPrefixReserved("pg_stat_statements"); /* * Request additional shared resources. (These are no-ops if we're not in diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index 0407c7dd64..e9b7981619 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -101,7 +101,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("pg_trgm"); + MarkGUCPrefixReserved("pg_trgm"); } /* diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index af38e956e7..2c6b2894b9 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -538,5 +538,5 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("postgres_fdw"); + MarkGUCPrefixReserved("postgres_fdw"); } diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c index d71c802106..97e61b8043 100644 --- a/contrib/sepgsql/hooks.c +++ b/contrib/sepgsql/hooks.c @@ -455,7 +455,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("sepgsql"); + MarkGUCPrefixReserved("sepgsql"); /* Initialize userspace access vector cache */ sepgsql_avc_init(); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 01f373815e..eaa4bf2c30 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -150,6 +150,8 @@ extern bool optimize_bounded_sort; static int GUC_check_errcode_value; +static List *reserved_class_prefix = NIL; + /* global variables for check hook support */ char *GUC_check_errmsg_string; char *GUC_check_errdetail_string; @@ -5590,18 +5592,44 @@ find_option(const char *name, bool create_placeholders, bool skip_errors, * doesn't contain a separator, don't assume that it was meant to be a * placeholder. */ - if (strchr(name, GUC_QUALIFIER_SEPARATOR) != NULL) + const char *sep = strchr(name, GUC_QUALIFIER_SEPARATOR); + + if (sep != NULL) { - if (valid_custom_variable_name(name)) - return add_placeholder_variable(name, elevel); - /* A special error message seems desirable here */ - if (!skip_errors) - ereport(elevel, - (errcode(ERRCODE_INVALID_NAME), - errmsg("invalid configuration parameter name \"%s\"", - name), - errdetail("Custom parameter names must be two or more simple identifiers separated by dots."))); - return NULL; + size_t classLen = sep - name; + ListCell *lc; + + /* The name must be syntactically acceptable ... */ + if (!valid_custom_variable_name(name)) + { + if (!skip_errors) + ereport(elevel, + (errcode(ERRCODE_INVALID_NAME), + errmsg("invalid configuration parameter name \"%s\"", + name), + errdetail("Custom parameter names must be two or more simple identifiers separated by dots."))); + return NULL; + } + /* ... and it must not match any previously-reserved prefix */ + foreach(lc, reserved_class_prefix) + { + const char *rcprefix = lfirst(lc); + + if (strlen(rcprefix) == classLen && + strncmp(name, rcprefix, classLen) == 0) + { + if (!skip_errors) + ereport(elevel, + (errcode(ERRCODE_INVALID_NAME), + errmsg("invalid configuration parameter name \"%s\"", + name), + errdetail("\"%s\" is a reserved prefix.", + rcprefix))); + return NULL; + } + } + /* OK, create it */ + return add_placeholder_variable(name, elevel); } } @@ -9355,15 +9383,26 @@ DefineCustomEnumVariable(const char *name, } /* + * Mark the given GUC prefix as "reserved". + * + * This deletes any existing placeholders matching the prefix, + * and then prevents new ones from being created. * Extensions should call this after they've defined all of their custom * GUCs, to help catch misspelled config-file entries. */ void -EmitWarningsOnPlaceholders(const char *className) +MarkGUCPrefixReserved(const char *className) { int classLen = strlen(className); int i; + MemoryContext oldcontext; + /* + * Check for existing placeholders. We must actually remove invalid + * placeholders, else future parallel worker startups will fail. (We + * don't bother trying to free associated memory, since this shouldn't + * happen often.) + */ for (i = 0; i < num_guc_variables; i++) { struct config_generic *var = guc_variables[i]; @@ -9373,11 +9412,21 @@ EmitWarningsOnPlaceholders(const char *className) var->name[classLen] == GUC_QUALIFIER_SEPARATOR) { ereport(WARNING, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("unrecognized configuration parameter \"%s\"", - var->name))); + (errcode(ERRCODE_INVALID_NAME), + errmsg("invalid configuration parameter name \"%s\", removing it", + var->name), + errdetail("\"%s\" is now a reserved prefix.", + className))); + num_guc_variables--; + memmove(&guc_variables[i], &guc_variables[i + 1], + (num_guc_variables - i) * sizeof(struct config_generic *)); } } + + /* And remember the name so we can prevent future mistakes. */ + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + reserved_class_prefix = lappend(reserved_class_prefix, pstrdup(className)); + MemoryContextSwitchTo(oldcontext); } diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index f1bfe79feb..ea774968f0 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -354,7 +354,10 @@ extern void DefineCustomEnumVariable(const char *name, GucEnumAssignHook assign_hook, GucShowHook show_hook); -extern void EmitWarningsOnPlaceholders(const char *className); +extern void MarkGUCPrefixReserved(const char *className); + +/* old name for MarkGUCPrefixReserved, for backwards compatibility: */ +#define EmitWarningsOnPlaceholders(className) MarkGUCPrefixReserved(className) extern const char *GetConfigOption(const char *name, bool missing_ok, bool restrict_privileged); diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 3f785b1e8d..b5879c2947 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -455,7 +455,7 @@ _PG_init(void) PGC_SUSET, 0, NULL, NULL, NULL); - EmitWarningsOnPlaceholders("plperl"); + MarkGUCPrefixReserved("plperl"); /* * Create hash tables. diff --git a/src/pl/plpgsql/src/pl_handler.c b/src/pl/plpgsql/src/pl_handler.c index b4b8509280..190d286f1c 100644 --- a/src/pl/plpgsql/src/pl_handler.c +++ b/src/pl/plpgsql/src/pl_handler.c @@ -197,7 +197,7 @@ _PG_init(void) plpgsql_extra_errors_assign_hook, NULL); - EmitWarningsOnPlaceholders("plpgsql"); + MarkGUCPrefixReserved("plpgsql"); plpgsql_HashTableInit(); RegisterXactCallback(plpgsql_xact_cb, NULL); diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index 7c045f4560..ab759833db 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -474,8 +474,8 @@ _PG_init(void) PGC_SUSET, 0, NULL, NULL, NULL); - EmitWarningsOnPlaceholders("pltcl"); - EmitWarningsOnPlaceholders("pltclu"); + MarkGUCPrefixReserved("pltcl"); + MarkGUCPrefixReserved("pltclu"); pltcl_pm_init_done = true; } diff --git a/src/test/modules/delay_execution/delay_execution.c b/src/test/modules/delay_execution/delay_execution.c index ad50383bf8..cf34e8c2d7 100644 --- a/src/test/modules/delay_execution/delay_execution.c +++ b/src/test/modules/delay_execution/delay_execution.c @@ -91,7 +91,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("delay_execution"); + MarkGUCPrefixReserved("delay_execution"); /* Install our hook */ prev_planner_hook = planner_hook; diff --git a/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c index 3ba33e501c..7c469fd57e 100644 --- a/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c +++ b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c @@ -49,7 +49,7 @@ _PG_init(void) NULL, NULL); - EmitWarningsOnPlaceholders("ssl_passphrase"); + MarkGUCPrefixReserved("ssl_passphrase"); if (ssl_passphrase) openssl_tls_init_hook = set_rot13; diff --git a/src/test/modules/worker_spi/worker_spi.c b/src/test/modules/worker_spi/worker_spi.c index 05ced63780..48829df29c 100644 --- a/src/test/modules/worker_spi/worker_spi.c +++ b/src/test/modules/worker_spi/worker_spi.c @@ -322,7 +322,7 @@ _PG_init(void) 0, NULL, NULL, NULL); - EmitWarningsOnPlaceholders("worker_spi"); + MarkGUCPrefixReserved("worker_spi"); /* set up common data for all our workers */ memset(&worker, 0, sizeof(worker)); diff --git a/src/test/regress/expected/guc.out b/src/test/regress/expected/guc.out index 75b6bfbf11..3de6404ba5 100644 --- a/src/test/regress/expected/guc.out +++ b/src/test/regress/expected/guc.out @@ -548,6 +548,17 @@ ERROR: invalid configuration parameter name "special.weird name" DETAIL: Custom parameter names must be two or more simple identifiers separated by dots. SHOW special."weird name"; ERROR: unrecognized configuration parameter "special.weird name" +-- Check what happens when you try to set a "custom" GUC within the +-- namespace of an extension. +SET plpgsql.extra_foo_warnings = true; -- allowed if plpgsql is not loaded yet +LOAD 'plpgsql'; -- this will throw a warning and delete the variable +WARNING: invalid configuration parameter name "plpgsql.extra_foo_warnings", removing it +DETAIL: "plpgsql" is now a reserved prefix. +SET plpgsql.extra_foo_warnings = true; -- now, it's an error +ERROR: invalid configuration parameter name "plpgsql.extra_foo_warnings" +DETAIL: "plpgsql" is a reserved prefix. +SHOW plpgsql.extra_foo_warnings; +ERROR: unrecognized configuration parameter "plpgsql.extra_foo_warnings" -- -- Test DISCARD TEMP -- diff --git a/src/test/regress/sql/guc.sql b/src/test/regress/sql/guc.sql index 3e2819449c..d5db101e48 100644 --- a/src/test/regress/sql/guc.sql +++ b/src/test/regress/sql/guc.sql @@ -163,6 +163,13 @@ SHOW custom."bad-guc"; SET special."weird name" = 'foo'; -- could be allowed, but we choose not to SHOW special."weird name"; +-- Check what happens when you try to set a "custom" GUC within the +-- namespace of an extension. +SET plpgsql.extra_foo_warnings = true; -- allowed if plpgsql is not loaded yet +LOAD 'plpgsql'; -- this will throw a warning and delete the variable +SET plpgsql.extra_foo_warnings = true; -- now, it's an error +SHOW plpgsql.extra_foo_warnings; + -- -- Test DISCARD TEMP -- From ebf6c5249b7db525e59563fb149642665c88f747 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 22 Feb 2022 10:22:15 +0900 Subject: [PATCH 034/108] Add compute_query_id = regress "regress" is a new mode added to compute_query_id aimed at facilitating regression testing when a module computing query IDs is loaded into the backend, like pg_stat_statements. It works the same way as "auto", meaning that query IDs are computed if a module enables it, except that query IDs are hidden in EXPLAIN outputs to ensure regression output stability. Like any GUCs of the kind (force_parallel_mode, etc.), this new configuration can be added to an instance's postgresql.conf, or just passed down with PGOPTIONS at command level. compute_query_id uses an enum for its set of option values, meaning that this addition ensures ABI compatibility. Using this new configuration mode allows installcheck-world to pass when running the tests on an instance with pg_stat_statements enabled, stabilizing the test output while checking the paths doing query ID computations. Reported-by: Anton Melnikov Reviewed-by: Julien Rouhaud Discussion: https://postgr.es/m/1634283396.372373993@f75.i.mail.ru Discussion: https://postgr.es/m/YgHlxgc/OimuPYhH@paquier.xyz Backpatch-through: 14 --- doc/src/sgml/config.sgml | 7 +++++-- src/backend/commands/explain.c | 8 +++++++- src/backend/utils/misc/guc.c | 1 + src/include/utils/queryjumble.h | 3 ++- 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index d99bf38e67..7ed8c82a9d 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -7934,9 +7934,12 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; method is not acceptable. In this case, in-core computation must be always disabled. Valid values are off (always disabled), - on (always enabled) and auto, + on (always enabled), auto, which lets modules such as - automatically enable it. + automatically enable it, and regress which + has the same effect as auto, except that the + query identifier is hidden in the EXPLAIN output + to facilitate automated regression testing. The default is auto. diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index b970997c34..de81379da3 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -604,7 +604,13 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, /* Create textual dump of plan tree */ ExplainPrintPlan(es, queryDesc); - if (es->verbose && plannedstmt->queryId != UINT64CONST(0)) + /* + * COMPUTE_QUERY_ID_REGRESS means COMPUTE_QUERY_ID_AUTO, but we don't show + * the queryid in any of the EXPLAIN plans to keep stable the results + * generated by regression test suites. + */ + if (es->verbose && plannedstmt->queryId != UINT64CONST(0) && + compute_query_id != COMPUTE_QUERY_ID_REGRESS) { /* * Output the queryid as an int64 rather than a uint64 so we match diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index eaa4bf2c30..e4afd07bfe 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -414,6 +414,7 @@ static const struct config_enum_entry backslash_quote_options[] = { */ static const struct config_enum_entry compute_query_id_options[] = { {"auto", COMPUTE_QUERY_ID_AUTO, false}, + {"regress", COMPUTE_QUERY_ID_REGRESS, false}, {"on", COMPUTE_QUERY_ID_ON, false}, {"off", COMPUTE_QUERY_ID_OFF, false}, {"true", COMPUTE_QUERY_ID_ON, true}, diff --git a/src/include/utils/queryjumble.h b/src/include/utils/queryjumble.h index a4c277269e..c670662db2 100644 --- a/src/include/utils/queryjumble.h +++ b/src/include/utils/queryjumble.h @@ -57,7 +57,8 @@ enum ComputeQueryIdType { COMPUTE_QUERY_ID_OFF, COMPUTE_QUERY_ID_ON, - COMPUTE_QUERY_ID_AUTO + COMPUTE_QUERY_ID_AUTO, + COMPUTE_QUERY_ID_REGRESS }; /* GUC parameters */ From 52e4f0cd472d39d07732b99559989ea3b615be78 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Tue, 22 Feb 2022 07:54:12 +0530 Subject: [PATCH 035/108] Allow specifying row filters for logical replication of tables. This feature adds row filtering for publication tables. When a publication is defined or modified, an optional WHERE clause can be specified. Rows that don't satisfy this WHERE clause will be filtered out. This allows a set of tables to be partially replicated. The row filter is per table. A new row filter can be added simply by specifying a WHERE clause after the table name. The WHERE clause must be enclosed by parentheses. The row filter WHERE clause for a table added to a publication that publishes UPDATE and/or DELETE operations must contain only columns that are covered by REPLICA IDENTITY. The row filter WHERE clause for a table added to a publication that publishes INSERT can use any column. If the row filter evaluates to NULL, it is regarded as "false". The WHERE clause only allows simple expressions that don't have user-defined functions, user-defined operators, user-defined types, user-defined collations, non-immutable built-in functions, or references to system columns. These restrictions could be addressed in the future. If you choose to do the initial table synchronization, only data that satisfies the row filters is copied to the subscriber. If the subscription has several publications in which a table has been published with different WHERE clauses, rows that satisfy ANY of the expressions will be copied. If a subscriber is a pre-15 version, the initial table synchronization won't use row filters even if they are defined in the publisher. The row filters are applied before publishing the changes. If the subscription has several publications in which the same table has been published with different filters (for the same publish operation), those expressions get OR'ed together so that rows satisfying any of the expressions will be replicated. This means all the other filters become redundant if (a) one of the publications have no filter at all, (b) one of the publications was created using FOR ALL TABLES, (c) one of the publications was created using FOR ALL TABLES IN SCHEMA and the table belongs to that same schema. If your publication contains a partitioned table, the publication parameter publish_via_partition_root determines if it uses the partition's row filter (if the parameter is false, the default) or the root partitioned table's row filter. Psql commands \dRp+ and \d will display any row filters. Author: Hou Zhijie, Euler Taveira, Peter Smith, Ajin Cherian Reviewed-by: Greg Nancarrow, Haiying Tang, Amit Kapila, Tomas Vondra, Dilip Kumar, Vignesh C, Alvaro Herrera, Andres Freund, Wei Wang Discussion: https://www.postgresql.org/message-id/flat/CAHE3wggb715X%2BmK_DitLXF25B%3DjE6xyNCH4YOwM860JR7HarGQ%40mail.gmail.com --- doc/src/sgml/catalogs.sgml | 9 + doc/src/sgml/ref/alter_publication.sgml | 12 +- doc/src/sgml/ref/alter_subscription.sgml | 7 +- doc/src/sgml/ref/create_publication.sgml | 38 +- doc/src/sgml/ref/create_subscription.sgml | 27 +- src/backend/catalog/pg_publication.c | 59 +- src/backend/commands/publicationcmds.c | 583 +++++++++++++- src/backend/executor/execReplication.c | 39 +- src/backend/nodes/copyfuncs.c | 1 + src/backend/nodes/equalfuncs.c | 1 + src/backend/parser/gram.y | 38 +- src/backend/replication/logical/proto.c | 36 +- src/backend/replication/logical/tablesync.c | 142 +++- src/backend/replication/pgoutput/pgoutput.c | 833 +++++++++++++++++--- src/backend/utils/cache/relcache.c | 98 ++- src/bin/pg_dump/pg_dump.c | 30 +- src/bin/pg_dump/pg_dump.h | 1 + src/bin/psql/describe.c | 26 +- src/bin/psql/tab-complete.c | 29 +- src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_publication.h | 18 +- src/include/catalog/pg_publication_rel.h | 6 + src/include/commands/publicationcmds.h | 2 + src/include/nodes/parsenodes.h | 1 + src/include/replication/logicalproto.h | 11 +- src/include/replication/pgoutput.h | 1 + src/include/replication/reorderbuffer.h | 6 +- src/include/utils/rel.h | 2 +- src/include/utils/relcache.h | 5 +- src/test/regress/expected/publication.out | 352 +++++++++ src/test/regress/sql/publication.sql | 236 ++++++ src/test/subscription/t/028_row_filter.pl | 695 ++++++++++++++++ src/tools/pgindent/typedefs.list | 3 + 33 files changed, 3113 insertions(+), 236 deletions(-) create mode 100644 src/test/subscription/t/028_row_filter.pl diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index 5a1627a394..83987a9904 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -6325,6 +6325,15 @@ SCRAM-SHA-256$<iteration count>:&l Reference to relation + + + + prqual pg_node_tree + + Expression tree (in nodeToString() + representation) for the relation's publication qualifying condition. Null + if there is no publication qualifying condition. + diff --git a/doc/src/sgml/ref/alter_publication.sgml b/doc/src/sgml/ref/alter_publication.sgml index 7c7c27bf7c..32b75f6c78 100644 --- a/doc/src/sgml/ref/alter_publication.sgml +++ b/doc/src/sgml/ref/alter_publication.sgml @@ -30,7 +30,7 @@ ALTER PUBLICATION name RENAME TO where publication_object is one of: - TABLE [ ONLY ] table_name [ * ] [, ... ] + TABLE [ ONLY ] table_name [ * ] [ WHERE ( expression ) ] [, ... ] ALL TABLES IN SCHEMA { schema_name | CURRENT_SCHEMA } [, ... ] @@ -52,7 +52,9 @@ ALTER PUBLICATION name RENAME TO ALTER SUBSCRIPTION ... REFRESH PUBLICATION action on the - subscribing side in order to become effective. + subscribing side in order to become effective. Note also that the combination + of DROP with a WHERE clause is not + allowed. @@ -110,6 +112,12 @@ ALTER PUBLICATION name RENAME TO * can be specified after the table name to explicitly indicate that descendant tables are included. + If the optional WHERE clause is specified, rows for + which the expression + evaluates to false or null will not be published. Note that parentheses + are required around the expression. The + expression is evaluated with + the role used for the replication connection. diff --git a/doc/src/sgml/ref/alter_subscription.sgml b/doc/src/sgml/ref/alter_subscription.sgml index 0b027cc346..0d6f064f58 100644 --- a/doc/src/sgml/ref/alter_subscription.sgml +++ b/doc/src/sgml/ref/alter_subscription.sgml @@ -163,8 +163,11 @@ ALTER SUBSCRIPTION name RENAME TO < Specifies whether to copy pre-existing data in the publications that are being subscribed to when the replication starts. - The default is true. (Previously-subscribed - tables are not copied.) + The default is true. + + + Previously subscribed tables are not copied, even if a table's row + filter WHERE clause has since been modified. diff --git a/doc/src/sgml/ref/create_publication.sgml b/doc/src/sgml/ref/create_publication.sgml index 385975bfad..4979b9b646 100644 --- a/doc/src/sgml/ref/create_publication.sgml +++ b/doc/src/sgml/ref/create_publication.sgml @@ -28,7 +28,7 @@ CREATE PUBLICATION name where publication_object is one of: - TABLE [ ONLY ] table_name [ * ] [, ... ] + TABLE [ ONLY ] table_name [ * ] [ WHERE ( expression ) ] [, ... ] ALL TABLES IN SCHEMA { schema_name | CURRENT_SCHEMA } [, ... ] @@ -78,6 +78,14 @@ CREATE PUBLICATION name publication, so they are never explicitly added to the publication. + + If the optional WHERE clause is specified, rows for + which the expression + evaluates to false or null will not be published. Note that parentheses + are required around the expression. It has no effect on + TRUNCATE commands. + + Only persistent base tables and partitioned tables can be part of a publication. Temporary tables, unlogged tables, foreign tables, @@ -225,6 +233,22 @@ CREATE PUBLICATION name disallowed on those tables. + + A WHERE (i.e. row filter) expression must contain only + columns that are covered by the REPLICA IDENTITY, in + order for UPDATE and DELETE operations + to be published. For publication of INSERT operations, + any column may be used in the WHERE expression. The + WHERE clause allows simple expressions that don't have + user-defined functions, user-defined operators, user-defined types, + user-defined collations, non-immutable built-in functions, or references to + system columns. + If your publication contains a partitioned table, the publication parameter + publish_via_partition_root determines if it uses the + partition's row filter (if the parameter is false, the default) or the root + partitioned table's row filter. + + For an INSERT ... ON CONFLICT command, the publication will publish the operation that actually results from the command. So depending @@ -247,6 +271,11 @@ CREATE PUBLICATION name DDL operations are not published. + + + The WHERE clause expression is executed with the role used + for the replication connection. + @@ -259,6 +288,13 @@ CREATE PUBLICATION mypublication FOR TABLE users, departments; + + Create a publication that publishes all changes from active departments: + +CREATE PUBLICATION active_departments FOR TABLE departments WHERE (active IS TRUE); + + + Create a publication that publishes all changes in all tables: diff --git a/doc/src/sgml/ref/create_subscription.sgml b/doc/src/sgml/ref/create_subscription.sgml index 990a41f1a1..e80a2617a3 100644 --- a/doc/src/sgml/ref/create_subscription.sgml +++ b/doc/src/sgml/ref/create_subscription.sgml @@ -208,6 +208,11 @@ CREATE SUBSCRIPTION subscription_nametrue. + + If the publications contain WHERE clauses, it + will affect what data is copied. Refer to the + for details. + @@ -293,7 +298,7 @@ CREATE SUBSCRIPTION subscription_name - + Notes @@ -319,6 +324,26 @@ CREATE SUBSCRIPTION subscription_namecreate_slot = false. This is an implementation restriction that might be lifted in a future release. + + + If any table in the publication has a WHERE clause, rows + for which the expression + evaluates to false or null will not be published. If the subscription has + several publications in which the same table has been published with + different WHERE clauses, a row will be published if any + of the expressions (referring to that publish operation) are satisfied. In + the case of different WHERE clauses, if one of the + publications has no WHERE clause (referring to that + publish operation) or the publication is declared as + FOR ALL TABLES or + FOR ALL TABLES IN SCHEMA, rows are always published + regardless of the definition of the other expressions. + If the subscriber is a PostgreSQL version before + 15 then any row filtering is ignored during the initial data synchronization + phase. For this case, the user might want to consider deleting any initially + copied data that would be incompatible with subsequent filtering. + + diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index e14ca2f563..25998fbb39 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -275,18 +275,57 @@ GetPubPartitionOptionRelations(List *result, PublicationPartOpt pub_partopt, return result; } +/* + * Returns the relid of the topmost ancestor that is published via this + * publication if any, otherwise returns InvalidOid. + * + * Note that the list of ancestors should be ordered such that the topmost + * ancestor is at the end of the list. + */ +Oid +GetTopMostAncestorInPublication(Oid puboid, List *ancestors) +{ + ListCell *lc; + Oid topmost_relid = InvalidOid; + + /* + * Find the "topmost" ancestor that is in this publication. + */ + foreach(lc, ancestors) + { + Oid ancestor = lfirst_oid(lc); + List *apubids = GetRelationPublications(ancestor); + List *aschemaPubids = NIL; + + if (list_member_oid(apubids, puboid)) + topmost_relid = ancestor; + else + { + aschemaPubids = GetSchemaPublications(get_rel_namespace(ancestor)); + if (list_member_oid(aschemaPubids, puboid)) + topmost_relid = ancestor; + } + + list_free(apubids); + list_free(aschemaPubids); + } + + return topmost_relid; +} + /* * Insert new publication / relation mapping. */ ObjectAddress -publication_add_relation(Oid pubid, PublicationRelInfo *targetrel, +publication_add_relation(Oid pubid, PublicationRelInfo *pri, bool if_not_exists) { Relation rel; HeapTuple tup; Datum values[Natts_pg_publication_rel]; bool nulls[Natts_pg_publication_rel]; - Oid relid = RelationGetRelid(targetrel->relation); + Relation targetrel = pri->relation; + Oid relid = RelationGetRelid(targetrel); Oid pubreloid; Publication *pub = GetPublication(pubid); ObjectAddress myself, @@ -311,10 +350,10 @@ publication_add_relation(Oid pubid, PublicationRelInfo *targetrel, ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("relation \"%s\" is already member of publication \"%s\"", - RelationGetRelationName(targetrel->relation), pub->name))); + RelationGetRelationName(targetrel), pub->name))); } - check_publication_add_relation(targetrel->relation); + check_publication_add_relation(targetrel); /* Form a tuple. */ memset(values, 0, sizeof(values)); @@ -328,6 +367,12 @@ publication_add_relation(Oid pubid, PublicationRelInfo *targetrel, values[Anum_pg_publication_rel_prrelid - 1] = ObjectIdGetDatum(relid); + /* Add qualifications, if available */ + if (pri->whereClause != NULL) + values[Anum_pg_publication_rel_prqual - 1] = CStringGetTextDatum(nodeToString(pri->whereClause)); + else + nulls[Anum_pg_publication_rel_prqual - 1] = true; + tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); /* Insert tuple into catalog. */ @@ -345,6 +390,12 @@ publication_add_relation(Oid pubid, PublicationRelInfo *targetrel, ObjectAddressSet(referenced, RelationRelationId, relid); recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO); + /* Add dependency on the objects mentioned in the qualifications */ + if (pri->whereClause) + recordDependencyOnSingleRelExpr(&myself, pri->whereClause, relid, + DEPENDENCY_NORMAL, DEPENDENCY_NORMAL, + false); + /* Close the table. */ table_close(rel, RowExclusiveLock); diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 0e4bb97fb7..16b8661a1b 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -26,6 +26,7 @@ #include "catalog/partition.h" #include "catalog/pg_inherits.h" #include "catalog/pg_namespace.h" +#include "catalog/pg_proc.h" #include "catalog/pg_publication.h" #include "catalog/pg_publication_namespace.h" #include "catalog/pg_publication_rel.h" @@ -36,6 +37,10 @@ #include "commands/publicationcmds.h" #include "funcapi.h" #include "miscadmin.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_clause.h" +#include "parser/parse_collate.h" +#include "parser/parse_relation.h" #include "storage/lmgr.h" #include "utils/acl.h" #include "utils/array.h" @@ -48,6 +53,19 @@ #include "utils/syscache.h" #include "utils/varlena.h" +/* + * Information used to validate the columns in the row filter expression. See + * contain_invalid_rfcolumn_walker for details. + */ +typedef struct rf_context +{ + Bitmapset *bms_replident; /* bitset of replica identity columns */ + bool pubviaroot; /* true if we are validating the parent + * relation's row filter */ + Oid relid; /* relid of the relation */ + Oid parentid; /* relid of the parent relation */ +} rf_context; + static List *OpenRelIdList(List *relids); static List *OpenTableList(List *tables); static void CloseTableList(List *rels); @@ -234,6 +252,362 @@ CheckObjSchemaNotAlreadyInPublication(List *rels, List *schemaidlist, } } +/* + * Returns true if any of the columns used in the row filter WHERE expression is + * not part of REPLICA IDENTITY, false otherwise. + */ +static bool +contain_invalid_rfcolumn_walker(Node *node, rf_context *context) +{ + if (node == NULL) + return false; + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + AttrNumber attnum = var->varattno; + + /* + * If pubviaroot is true, we are validating the row filter of the + * parent table, but the bitmap contains the replica identity + * information of the child table. So, get the column number of the + * child table as parent and child column order could be different. + */ + if (context->pubviaroot) + { + char *colname = get_attname(context->parentid, attnum, false); + + attnum = get_attnum(context->relid, colname); + } + + if (!bms_is_member(attnum - FirstLowInvalidHeapAttributeNumber, + context->bms_replident)) + return true; + } + + return expression_tree_walker(node, contain_invalid_rfcolumn_walker, + (void *) context); +} + +/* + * Check if all columns referenced in the filter expression are part of the + * REPLICA IDENTITY index or not. + * + * Returns true if any invalid column is found. + */ +bool +contain_invalid_rfcolumn(Oid pubid, Relation relation, List *ancestors, + bool pubviaroot) +{ + HeapTuple rftuple; + Oid relid = RelationGetRelid(relation); + Oid publish_as_relid = RelationGetRelid(relation); + bool result = false; + Datum rfdatum; + bool rfisnull; + + /* + * FULL means all columns are in the REPLICA IDENTITY, so all columns are + * allowed in the row filter and we can skip the validation. + */ + if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL) + return false; + + /* + * For a partition, if pubviaroot is true, find the topmost ancestor that + * is published via this publication as we need to use its row filter + * expression to filter the partition's changes. + * + * Note that even though the row filter used is for an ancestor, the + * REPLICA IDENTITY used will be for the actual child table. + */ + if (pubviaroot && relation->rd_rel->relispartition) + { + publish_as_relid = GetTopMostAncestorInPublication(pubid, ancestors); + + if (!OidIsValid(publish_as_relid)) + publish_as_relid = relid; + } + + rftuple = SearchSysCache2(PUBLICATIONRELMAP, + ObjectIdGetDatum(publish_as_relid), + ObjectIdGetDatum(pubid)); + + if (!HeapTupleIsValid(rftuple)) + return false; + + rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple, + Anum_pg_publication_rel_prqual, + &rfisnull); + + if (!rfisnull) + { + rf_context context = {0}; + Node *rfnode; + Bitmapset *bms = NULL; + + context.pubviaroot = pubviaroot; + context.parentid = publish_as_relid; + context.relid = relid; + + /* Remember columns that are part of the REPLICA IDENTITY */ + bms = RelationGetIndexAttrBitmap(relation, + INDEX_ATTR_BITMAP_IDENTITY_KEY); + + context.bms_replident = bms; + rfnode = stringToNode(TextDatumGetCString(rfdatum)); + result = contain_invalid_rfcolumn_walker(rfnode, &context); + + bms_free(bms); + pfree(rfnode); + } + + ReleaseSysCache(rftuple); + + return result; +} + +/* check_functions_in_node callback */ +static bool +contain_mutable_or_user_functions_checker(Oid func_id, void *context) +{ + return (func_volatile(func_id) != PROVOLATILE_IMMUTABLE || + func_id >= FirstNormalObjectId); +} + +/* + * Check if the node contains any unallowed object. See + * check_simple_rowfilter_expr_walker. + * + * Returns the error detail message in errdetail_msg for unallowed expressions. + */ +static void +expr_allowed_in_node(Node *node, ParseState *pstate, char **errdetail_msg) +{ + if (IsA(node, List)) + { + /* + * OK, we don't need to perform other expr checks for List nodes + * because those are undefined for List. + */ + return; + } + + if (exprType(node) >= FirstNormalObjectId) + *errdetail_msg = _("User-defined types are not allowed."); + else if (check_functions_in_node(node, contain_mutable_or_user_functions_checker, + (void *) pstate)) + *errdetail_msg = _("User-defined or built-in mutable functions are not allowed."); + else if (exprCollation(node) >= FirstNormalObjectId || + exprInputCollation(node) >= FirstNormalObjectId) + *errdetail_msg = _("User-defined collations are not allowed."); +} + +/* + * The row filter walker checks if the row filter expression is a "simple + * expression". + * + * It allows only simple or compound expressions such as: + * - (Var Op Const) + * - (Var Op Var) + * - (Var Op Const) AND/OR (Var Op Const) + * - etc + * (where Var is a column of the table this filter belongs to) + * + * The simple expression has the following restrictions: + * - User-defined operators are not allowed; + * - User-defined functions are not allowed; + * - User-defined types are not allowed; + * - User-defined collations are not allowed; + * - Non-immutable built-in functions are not allowed; + * - System columns are not allowed. + * + * NOTES + * + * We don't allow user-defined functions/operators/types/collations because + * (a) if a user drops a user-defined object used in a row filter expression or + * if there is any other error while using it, the logical decoding + * infrastructure won't be able to recover from such an error even if the + * object is recreated again because a historic snapshot is used to evaluate + * the row filter; + * (b) a user-defined function can be used to access tables that could have + * unpleasant results because a historic snapshot is used. That's why only + * immutable built-in functions are allowed in row filter expressions. + * + * We don't allow system columns because currently, we don't have that + * information in the tuple passed to downstream. Also, as we don't replicate + * those to subscribers, there doesn't seem to be a need for a filter on those + * columns. + * + * We can allow other node types after more analysis and testing. + */ +static bool +check_simple_rowfilter_expr_walker(Node *node, ParseState *pstate) +{ + char *errdetail_msg = NULL; + + if (node == NULL) + return false; + + switch (nodeTag(node)) + { + case T_Var: + /* System columns are not allowed. */ + if (((Var *) node)->varattno < InvalidAttrNumber) + errdetail_msg = _("System columns are not allowed."); + break; + case T_OpExpr: + case T_DistinctExpr: + case T_NullIfExpr: + /* OK, except user-defined operators are not allowed. */ + if (((OpExpr *) node)->opno >= FirstNormalObjectId) + errdetail_msg = _("User-defined operators are not allowed."); + break; + case T_ScalarArrayOpExpr: + /* OK, except user-defined operators are not allowed. */ + if (((ScalarArrayOpExpr *) node)->opno >= FirstNormalObjectId) + errdetail_msg = _("User-defined operators are not allowed."); + + /* + * We don't need to check the hashfuncid and negfuncid of + * ScalarArrayOpExpr as those functions are only built for a + * subquery. + */ + break; + case T_RowCompareExpr: + { + ListCell *opid; + + /* OK, except user-defined operators are not allowed. */ + foreach(opid, ((RowCompareExpr *) node)->opnos) + { + if (lfirst_oid(opid) >= FirstNormalObjectId) + { + errdetail_msg = _("User-defined operators are not allowed."); + break; + } + } + } + break; + case T_Const: + case T_FuncExpr: + case T_BoolExpr: + case T_RelabelType: + case T_CollateExpr: + case T_CaseExpr: + case T_CaseTestExpr: + case T_ArrayExpr: + case T_RowExpr: + case T_CoalesceExpr: + case T_MinMaxExpr: + case T_XmlExpr: + case T_NullTest: + case T_BooleanTest: + case T_List: + /* OK, supported */ + break; + default: + errdetail_msg = _("Expressions only allow columns, constants, built-in operators, built-in data types, built-in collations and immutable built-in functions."); + break; + } + + /* + * For all the supported nodes, check the types, functions, and collations + * used in the nodes. + */ + if (!errdetail_msg) + expr_allowed_in_node(node, pstate, &errdetail_msg); + + if (errdetail_msg) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("invalid publication WHERE expression"), + errdetail("%s", errdetail_msg), + parser_errposition(pstate, exprLocation(node)))); + + return expression_tree_walker(node, check_simple_rowfilter_expr_walker, + (void *) pstate); +} + +/* + * Check if the row filter expression is a "simple expression". + * + * See check_simple_rowfilter_expr_walker for details. + */ +static bool +check_simple_rowfilter_expr(Node *node, ParseState *pstate) +{ + return check_simple_rowfilter_expr_walker(node, pstate); +} + +/* + * Transform the publication WHERE expression for all the relations in the list, + * ensuring it is coerced to boolean and necessary collation information is + * added if required, and add a new nsitem/RTE for the associated relation to + * the ParseState's namespace list. + * + * Also check the publication row filter expression and throw an error if + * anything not permitted or unexpected is encountered. + */ +static void +TransformPubWhereClauses(List *tables, const char *queryString, + bool pubviaroot) +{ + ListCell *lc; + + foreach(lc, tables) + { + ParseNamespaceItem *nsitem; + Node *whereclause = NULL; + ParseState *pstate; + PublicationRelInfo *pri = (PublicationRelInfo *) lfirst(lc); + + if (pri->whereClause == NULL) + continue; + + /* + * If the publication doesn't publish changes via the root partitioned + * table, the partition's row filter will be used. So disallow using + * WHERE clause on partitioned table in this case. + */ + if (!pubviaroot && + pri->relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot use publication WHERE clause for relation \"%s\"", + RelationGetRelationName(pri->relation)), + errdetail("WHERE clause cannot be used for a partitioned table when %s is false.", + "publish_via_partition_root"))); + + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + + nsitem = addRangeTableEntryForRelation(pstate, pri->relation, + AccessShareLock, NULL, + false, false); + + addNSItemToQuery(pstate, nsitem, false, true, true); + + whereclause = transformWhereClause(pstate, + copyObject(pri->whereClause), + EXPR_KIND_WHERE, + "PUBLICATION WHERE"); + + /* Fix up collation information */ + assign_expr_collations(pstate, whereclause); + + /* + * We allow only simple expressions in row filters. See + * check_simple_rowfilter_expr_walker. + */ + check_simple_rowfilter_expr(whereclause, pstate); + + free_parsestate(pstate); + + pri->whereClause = whereclause; + } +} + /* * Create new publication. */ @@ -346,6 +720,10 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt) rels = OpenTableList(relations); CheckObjSchemaNotAlreadyInPublication(rels, schemaidlist, PUBLICATIONOBJ_TABLE); + + TransformPubWhereClauses(rels, pstate->p_sourcetext, + publish_via_partition_root); + PublicationAddTables(puboid, rels, true, NULL); CloseTableList(rels); } @@ -392,6 +770,8 @@ AlterPublicationOptions(ParseState *pstate, AlterPublicationStmt *stmt, bool publish_via_partition_root; ObjectAddress obj; Form_pg_publication pubform; + List *root_relids = NIL; + ListCell *lc; parse_publication_options(pstate, stmt->options, @@ -399,6 +779,65 @@ AlterPublicationOptions(ParseState *pstate, AlterPublicationStmt *stmt, &publish_via_partition_root_given, &publish_via_partition_root); + pubform = (Form_pg_publication) GETSTRUCT(tup); + + /* + * If the publication doesn't publish changes via the root partitioned + * table, the partition's row filter will be used. So disallow using WHERE + * clause on partitioned table in this case. + */ + if (!pubform->puballtables && publish_via_partition_root_given && + !publish_via_partition_root) + { + /* + * Lock the publication so nobody else can do anything with it. This + * prevents concurrent alter to add partitioned table(s) with WHERE + * clause(s) which we don't allow when not publishing via root. + */ + LockDatabaseObject(PublicationRelationId, pubform->oid, 0, + AccessShareLock); + + root_relids = GetPublicationRelations(pubform->oid, + PUBLICATION_PART_ROOT); + + foreach(lc, root_relids) + { + HeapTuple rftuple; + Oid relid = lfirst_oid(lc); + + rftuple = SearchSysCache2(PUBLICATIONRELMAP, + ObjectIdGetDatum(relid), + ObjectIdGetDatum(pubform->oid)); + + if (HeapTupleIsValid(rftuple) && + !heap_attisnull(rftuple, Anum_pg_publication_rel_prqual, NULL)) + { + HeapTuple tuple; + + tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (HeapTupleIsValid(tuple)) + { + Form_pg_class relform = (Form_pg_class) GETSTRUCT(tuple); + + if (relform->relkind == RELKIND_PARTITIONED_TABLE) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot set %s for publication \"%s\"", + "publish_via_partition_root = false", + stmt->pubname), + errdetail("The publication contains a WHERE clause for a partitioned table \"%s\" " + "which is not allowed when %s is false.", + NameStr(relform->relname), + "publish_via_partition_root"))); + + ReleaseSysCache(tuple); + } + + ReleaseSysCache(rftuple); + } + } + } + /* Everything ok, form a new tuple. */ memset(values, 0, sizeof(values)); memset(nulls, false, sizeof(nulls)); @@ -450,8 +889,21 @@ AlterPublicationOptions(ParseState *pstate, AlterPublicationStmt *stmt, * invalidate all partitions contained in the respective partition * trees, not just those explicitly mentioned in the publication. */ - relids = GetPublicationRelations(pubform->oid, - PUBLICATION_PART_ALL); + if (root_relids == NIL) + relids = GetPublicationRelations(pubform->oid, + PUBLICATION_PART_ALL); + else + { + /* + * We already got tables explicitly mentioned in the publication. + * Now get all partitions for the partitioned table in the list. + */ + foreach(lc, root_relids) + relids = GetPubPartitionOptionRelations(relids, + PUBLICATION_PART_ALL, + lfirst_oid(lc)); + } + schemarelids = GetAllSchemaPublicationRelations(pubform->oid, PUBLICATION_PART_ALL); relids = list_concat_unique_oid(relids, schemarelids); @@ -492,7 +944,8 @@ InvalidatePublicationRels(List *relids) */ static void AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup, - List *tables, List *schemaidlist) + List *tables, List *schemaidlist, + const char *queryString) { List *rels = NIL; Form_pg_publication pubform = (Form_pg_publication) GETSTRUCT(tup); @@ -519,6 +972,9 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup, schemas = list_concat_copy(schemaidlist, GetPublicationSchemas(pubid)); CheckObjSchemaNotAlreadyInPublication(rels, schemas, PUBLICATIONOBJ_TABLE); + + TransformPubWhereClauses(rels, queryString, pubform->pubviaroot); + PublicationAddTables(pubid, rels, false, stmt); } else if (stmt->action == AP_DropObjects) @@ -533,37 +989,76 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup, CheckObjSchemaNotAlreadyInPublication(rels, schemaidlist, PUBLICATIONOBJ_TABLE); - /* Calculate which relations to drop. */ + TransformPubWhereClauses(rels, queryString, pubform->pubviaroot); + + /* + * To recreate the relation list for the publication, look for + * existing relations that do not need to be dropped. + */ foreach(oldlc, oldrelids) { Oid oldrelid = lfirst_oid(oldlc); ListCell *newlc; + PublicationRelInfo *oldrel; bool found = false; + HeapTuple rftuple; + bool rfisnull = true; + Node *oldrelwhereclause = NULL; + + /* look up the cache for the old relmap */ + rftuple = SearchSysCache2(PUBLICATIONRELMAP, + ObjectIdGetDatum(oldrelid), + ObjectIdGetDatum(pubid)); + + if (HeapTupleIsValid(rftuple)) + { + Datum whereClauseDatum; + + whereClauseDatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple, + Anum_pg_publication_rel_prqual, + &rfisnull); + if (!rfisnull) + oldrelwhereclause = stringToNode(TextDatumGetCString(whereClauseDatum)); + + ReleaseSysCache(rftuple); + } foreach(newlc, rels) { PublicationRelInfo *newpubrel; newpubrel = (PublicationRelInfo *) lfirst(newlc); + + /* + * Check if any of the new set of relations matches with the + * existing relations in the publication. Additionally, if the + * relation has an associated WHERE clause, check the WHERE + * expressions also match. Drop the rest. + */ if (RelationGetRelid(newpubrel->relation) == oldrelid) { - found = true; - break; + if (equal(oldrelwhereclause, newpubrel->whereClause)) + { + found = true; + break; + } } } - /* Not yet in the list, open it and add to the list */ - if (!found) - { - Relation oldrel; - PublicationRelInfo *pubrel; - /* Wrap relation into PublicationRelInfo */ - oldrel = table_open(oldrelid, ShareUpdateExclusiveLock); + if (oldrelwhereclause) + pfree(oldrelwhereclause); - pubrel = palloc(sizeof(PublicationRelInfo)); - pubrel->relation = oldrel; - - delrels = lappend(delrels, pubrel); + /* + * Add the non-matched relations to a list so that they can be + * dropped. + */ + if (!found) + { + oldrel = palloc(sizeof(PublicationRelInfo)); + oldrel->whereClause = NULL; + oldrel->relation = table_open(oldrelid, + ShareUpdateExclusiveLock); + delrels = lappend(delrels, oldrel); } } @@ -720,12 +1215,15 @@ AlterPublication(ParseState *pstate, AlterPublicationStmt *stmt) { List *relations = NIL; List *schemaidlist = NIL; + Oid pubid = pubform->oid; ObjectsInPublicationToOids(stmt->pubobjects, pstate, &relations, &schemaidlist); CheckAlterPublication(stmt, tup, relations, schemaidlist); + heap_freetuple(tup); + /* * Lock the publication so nobody else can do anything with it. This * prevents concurrent alter to add table(s) that were already going @@ -734,22 +1232,24 @@ AlterPublication(ParseState *pstate, AlterPublicationStmt *stmt) * addition of schema(s) for which there is any corresponding table * being added by this command. */ - LockDatabaseObject(PublicationRelationId, pubform->oid, 0, + LockDatabaseObject(PublicationRelationId, pubid, 0, AccessExclusiveLock); /* * It is possible that by the time we acquire the lock on publication, * concurrent DDL has removed it. We can test this by checking the - * existence of publication. + * existence of publication. We get the tuple again to avoid the risk + * of any publication option getting changed. */ - if (!SearchSysCacheExists1(PUBLICATIONOID, - ObjectIdGetDatum(pubform->oid))) + tup = SearchSysCacheCopy1(PUBLICATIONOID, ObjectIdGetDatum(pubid)); + if (!HeapTupleIsValid(tup)) ereport(ERROR, errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("publication \"%s\" does not exist", stmt->pubname)); - AlterPublicationTables(stmt, tup, relations, schemaidlist); + AlterPublicationTables(stmt, tup, relations, schemaidlist, + pstate->p_sourcetext); AlterPublicationSchemas(stmt, tup, schemaidlist); } @@ -901,6 +1401,7 @@ OpenTableList(List *tables) List *relids = NIL; List *rels = NIL; ListCell *lc; + List *relids_with_rf = NIL; /* * Open, share-lock, and check all the explicitly-specified relations @@ -928,15 +1429,26 @@ OpenTableList(List *tables) */ if (list_member_oid(relids, myrelid)) { + /* Disallow duplicate tables if there are any with row filters. */ + if (t->whereClause || list_member_oid(relids_with_rf, myrelid)) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("conflicting or redundant WHERE clauses for table \"%s\"", + RelationGetRelationName(rel)))); + table_close(rel, ShareUpdateExclusiveLock); continue; } pub_rel = palloc(sizeof(PublicationRelInfo)); pub_rel->relation = rel; + pub_rel->whereClause = t->whereClause; rels = lappend(rels, pub_rel); relids = lappend_oid(relids, myrelid); + if (t->whereClause) + relids_with_rf = lappend_oid(relids_with_rf, myrelid); + /* * Add children of this rel, if requested, so that they too are added * to the publication. A partitioned table can't have any inheritance @@ -963,19 +1475,39 @@ OpenTableList(List *tables) * tables. */ if (list_member_oid(relids, childrelid)) + { + /* + * We don't allow to specify row filter for both parent + * and child table at the same time as it is not very + * clear which one should be given preference. + */ + if (childrelid != myrelid && + (t->whereClause || list_member_oid(relids_with_rf, childrelid))) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("conflicting or redundant WHERE clauses for table \"%s\"", + RelationGetRelationName(rel)))); + continue; + } /* find_all_inheritors already got lock */ rel = table_open(childrelid, NoLock); pub_rel = palloc(sizeof(PublicationRelInfo)); pub_rel->relation = rel; + /* child inherits WHERE clause from parent */ + pub_rel->whereClause = t->whereClause; rels = lappend(rels, pub_rel); relids = lappend_oid(relids, childrelid); + + if (t->whereClause) + relids_with_rf = lappend_oid(relids_with_rf, childrelid); } } } list_free(relids); + list_free(relids_with_rf); return rels; } @@ -995,6 +1527,8 @@ CloseTableList(List *rels) pub_rel = (PublicationRelInfo *) lfirst(lc); table_close(pub_rel->relation, NoLock); } + + list_free_deep(rels); } /* @@ -1090,6 +1624,11 @@ PublicationDropTables(Oid pubid, List *rels, bool missing_ok) RelationGetRelationName(rel)))); } + if (pubrel->whereClause) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("cannot use a WHERE clause when removing a table from a publication"))); + ObjectAddressSet(obj, PublicationRelRelationId, prid); performDeletion(&obj, DROP_CASCADE, 0); } diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index 313c87398b..de106d767d 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -567,15 +567,43 @@ ExecSimpleRelationDelete(ResultRelInfo *resultRelInfo, void CheckCmdReplicaIdentity(Relation rel, CmdType cmd) { - PublicationActions *pubactions; + PublicationDesc pubdesc; /* We only need to do checks for UPDATE and DELETE. */ if (cmd != CMD_UPDATE && cmd != CMD_DELETE) return; + if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL) + return; + + /* + * It is only safe to execute UPDATE/DELETE when all columns, referenced + * in the row filters from publications which the relation is in, are + * valid - i.e. when all referenced columns are part of REPLICA IDENTITY + * or the table does not publish UPDATEs or DELETEs. + * + * XXX We could optimize it by first checking whether any of the + * publications have a row filter for this relation. If not and relation + * has replica identity then we can avoid building the descriptor but as + * this happens only one time it doesn't seem worth the additional + * complexity. + */ + RelationBuildPublicationDesc(rel, &pubdesc); + if (cmd == CMD_UPDATE && !pubdesc.rf_valid_for_update) + ereport(ERROR, + (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), + errmsg("cannot update table \"%s\"", + RelationGetRelationName(rel)), + errdetail("Column used in the publication WHERE expression is not part of the replica identity."))); + else if (cmd == CMD_DELETE && !pubdesc.rf_valid_for_delete) + ereport(ERROR, + (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), + errmsg("cannot delete from table \"%s\"", + RelationGetRelationName(rel)), + errdetail("Column used in the publication WHERE expression is not part of the replica identity."))); + /* If relation has replica identity we are always good. */ - if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL || - OidIsValid(RelationGetReplicaIndex(rel))) + if (OidIsValid(RelationGetReplicaIndex(rel))) return; /* @@ -583,14 +611,13 @@ CheckCmdReplicaIdentity(Relation rel, CmdType cmd) * * Check if the table publishes UPDATES or DELETES. */ - pubactions = GetRelationPublicationActions(rel); - if (cmd == CMD_UPDATE && pubactions->pubupdate) + if (cmd == CMD_UPDATE && pubdesc.pubactions.pubupdate) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot update table \"%s\" because it does not have a replica identity and publishes updates", RelationGetRelationName(rel)), errhint("To enable updating the table, set REPLICA IDENTITY using ALTER TABLE."))); - else if (cmd == CMD_DELETE && pubactions->pubdelete) + else if (cmd == CMD_DELETE && pubdesc.pubactions.pubdelete) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot delete from table \"%s\" because it does not have a replica identity and publishes deletes", diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index bc0d90b4b1..d4f8455a2b 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -4849,6 +4849,7 @@ _copyPublicationTable(const PublicationTable *from) PublicationTable *newnode = makeNode(PublicationTable); COPY_NODE_FIELD(relation); + COPY_NODE_FIELD(whereClause); return newnode; } diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 2e7122ad2f..f1002afe7a 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -2321,6 +2321,7 @@ static bool _equalPublicationTable(const PublicationTable *a, const PublicationTable *b) { COMPARE_NODE_FIELD(relation); + COMPARE_NODE_FIELD(whereClause); return true; } diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index 92f93cfc72..a03b33b53b 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -9751,12 +9751,13 @@ CreatePublicationStmt: * relation_expr here. */ PublicationObjSpec: - TABLE relation_expr + TABLE relation_expr OptWhereClause { $$ = makeNode(PublicationObjSpec); $$->pubobjtype = PUBLICATIONOBJ_TABLE; $$->pubtable = makeNode(PublicationTable); $$->pubtable->relation = $2; + $$->pubtable->whereClause = $3; } | ALL TABLES IN_P SCHEMA ColId { @@ -9771,28 +9772,45 @@ PublicationObjSpec: $$->pubobjtype = PUBLICATIONOBJ_TABLES_IN_CUR_SCHEMA; $$->location = @5; } - | ColId + | ColId OptWhereClause { $$ = makeNode(PublicationObjSpec); $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION; - $$->name = $1; + if ($2) + { + /* + * The OptWhereClause must be stored here but it is + * valid only for tables. For non-table objects, an + * error will be thrown later via + * preprocess_pubobj_list(). + */ + $$->pubtable = makeNode(PublicationTable); + $$->pubtable->relation = makeRangeVar(NULL, $1, @1); + $$->pubtable->whereClause = $2; + } + else + { + $$->name = $1; + } $$->location = @1; } - | ColId indirection + | ColId indirection OptWhereClause { $$ = makeNode(PublicationObjSpec); $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION; $$->pubtable = makeNode(PublicationTable); $$->pubtable->relation = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner); + $$->pubtable->whereClause = $3; $$->location = @1; } /* grammar like tablename * , ONLY tablename, ONLY ( tablename ) */ - | extended_relation_expr + | extended_relation_expr OptWhereClause { $$ = makeNode(PublicationObjSpec); $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION; $$->pubtable = makeNode(PublicationTable); $$->pubtable->relation = $1; + $$->pubtable->whereClause = $2; } | CURRENT_SCHEMA { @@ -17448,7 +17466,8 @@ preprocess_pubobj_list(List *pubobjspec_list, core_yyscan_t yyscanner) errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid table name at or near"), parser_errposition(pubobj->location)); - else if (pubobj->name) + + if (pubobj->name) { /* convert it to PublicationTable */ PublicationTable *pubtable = makeNode(PublicationTable); @@ -17462,6 +17481,13 @@ preprocess_pubobj_list(List *pubobjspec_list, core_yyscan_t yyscanner) else if (pubobj->pubobjtype == PUBLICATIONOBJ_TABLES_IN_SCHEMA || pubobj->pubobjtype == PUBLICATIONOBJ_TABLES_IN_CUR_SCHEMA) { + /* WHERE clause is not allowed on a schema object */ + if (pubobj->pubtable && pubobj->pubtable->whereClause) + ereport(ERROR, + errcode(ERRCODE_SYNTAX_ERROR), + errmsg("WHERE clause not allowed for schema"), + parser_errposition(pubobj->location)); + /* * We can distinguish between the different type of schema * objects based on whether name and pubtable is set. diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c index 953942692c..c9b0eeefd7 100644 --- a/src/backend/replication/logical/proto.c +++ b/src/backend/replication/logical/proto.c @@ -31,8 +31,8 @@ static void logicalrep_write_attrs(StringInfo out, Relation rel); static void logicalrep_write_tuple(StringInfo out, Relation rel, - HeapTuple tuple, bool binary); - + TupleTableSlot *slot, + bool binary); static void logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel); static void logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple); @@ -398,7 +398,7 @@ logicalrep_read_origin(StringInfo in, XLogRecPtr *origin_lsn) */ void logicalrep_write_insert(StringInfo out, TransactionId xid, Relation rel, - HeapTuple newtuple, bool binary) + TupleTableSlot *newslot, bool binary) { pq_sendbyte(out, LOGICAL_REP_MSG_INSERT); @@ -410,7 +410,7 @@ logicalrep_write_insert(StringInfo out, TransactionId xid, Relation rel, pq_sendint32(out, RelationGetRelid(rel)); pq_sendbyte(out, 'N'); /* new tuple follows */ - logicalrep_write_tuple(out, rel, newtuple, binary); + logicalrep_write_tuple(out, rel, newslot, binary); } /* @@ -442,7 +442,8 @@ logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup) */ void logicalrep_write_update(StringInfo out, TransactionId xid, Relation rel, - HeapTuple oldtuple, HeapTuple newtuple, bool binary) + TupleTableSlot *oldslot, TupleTableSlot *newslot, + bool binary) { pq_sendbyte(out, LOGICAL_REP_MSG_UPDATE); @@ -457,17 +458,17 @@ logicalrep_write_update(StringInfo out, TransactionId xid, Relation rel, /* use Oid as relation identifier */ pq_sendint32(out, RelationGetRelid(rel)); - if (oldtuple != NULL) + if (oldslot != NULL) { if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL) pq_sendbyte(out, 'O'); /* old tuple follows */ else pq_sendbyte(out, 'K'); /* old key follows */ - logicalrep_write_tuple(out, rel, oldtuple, binary); + logicalrep_write_tuple(out, rel, oldslot, binary); } pq_sendbyte(out, 'N'); /* new tuple follows */ - logicalrep_write_tuple(out, rel, newtuple, binary); + logicalrep_write_tuple(out, rel, newslot, binary); } /* @@ -516,7 +517,7 @@ logicalrep_read_update(StringInfo in, bool *has_oldtuple, */ void logicalrep_write_delete(StringInfo out, TransactionId xid, Relation rel, - HeapTuple oldtuple, bool binary) + TupleTableSlot *oldslot, bool binary) { Assert(rel->rd_rel->relreplident == REPLICA_IDENTITY_DEFAULT || rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL || @@ -536,7 +537,7 @@ logicalrep_write_delete(StringInfo out, TransactionId xid, Relation rel, else pq_sendbyte(out, 'K'); /* old key follows */ - logicalrep_write_tuple(out, rel, oldtuple, binary); + logicalrep_write_tuple(out, rel, oldslot, binary); } /* @@ -749,11 +750,12 @@ logicalrep_read_typ(StringInfo in, LogicalRepTyp *ltyp) * Write a tuple to the outputstream, in the most efficient format possible. */ static void -logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple, bool binary) +logicalrep_write_tuple(StringInfo out, Relation rel, TupleTableSlot *slot, + bool binary) { TupleDesc desc; - Datum values[MaxTupleAttributeNumber]; - bool isnull[MaxTupleAttributeNumber]; + Datum *values; + bool *isnull; int i; uint16 nliveatts = 0; @@ -767,11 +769,9 @@ logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple, bool binar } pq_sendint16(out, nliveatts); - /* try to allocate enough memory from the get-go */ - enlargeStringInfo(out, tuple->t_len + - nliveatts * (1 + 4)); - - heap_deform_tuple(tuple, desc, values, isnull); + slot_getallattrs(slot); + values = slot->tts_values; + isnull = slot->tts_isnull; /* Write the values */ for (i = 0; i < desc->natts; i++) diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index e596b69d46..1659964571 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -690,19 +690,23 @@ copy_read_data(void *outbuf, int minread, int maxread) /* * Get information about remote relation in similar fashion the RELATION - * message provides during replication. + * message provides during replication. This function also returns the relation + * qualifications to be used in the COPY command. */ static void fetch_remote_table_info(char *nspname, char *relname, - LogicalRepRelation *lrel) + LogicalRepRelation *lrel, List **qual) { WalRcvExecResult *res; StringInfoData cmd; TupleTableSlot *slot; Oid tableRow[] = {OIDOID, CHAROID, CHAROID}; Oid attrRow[] = {TEXTOID, OIDOID, BOOLOID}; + Oid qualRow[] = {TEXTOID}; bool isnull; int natt; + ListCell *lc; + bool first; lrel->nspname = nspname; lrel->relname = relname; @@ -798,6 +802,98 @@ fetch_remote_table_info(char *nspname, char *relname, lrel->natts = natt; walrcv_clear_result(res); + + /* + * Get relation's row filter expressions. DISTINCT avoids the same + * expression of a table in multiple publications from being included + * multiple times in the final expression. + * + * We need to copy the row even if it matches just one of the + * publications, so we later combine all the quals with OR. + * + * For initial synchronization, row filtering can be ignored in following + * cases: + * + * 1) one of the subscribed publications for the table hasn't specified + * any row filter + * + * 2) one of the subscribed publications has puballtables set to true + * + * 3) one of the subscribed publications is declared as ALL TABLES IN + * SCHEMA that includes this relation + */ + if (walrcv_server_version(LogRepWorkerWalRcvConn) >= 150000) + { + StringInfoData pub_names; + + /* Build the pubname list. */ + initStringInfo(&pub_names); + first = true; + foreach(lc, MySubscription->publications) + { + char *pubname = strVal(lfirst(lc)); + + if (first) + first = false; + else + appendStringInfoString(&pub_names, ", "); + + appendStringInfoString(&pub_names, quote_literal_cstr(pubname)); + } + + /* Check for row filters. */ + resetStringInfo(&cmd); + appendStringInfo(&cmd, + "SELECT DISTINCT pg_get_expr(pr.prqual, pr.prrelid)" + " FROM pg_publication p" + " LEFT OUTER JOIN pg_publication_rel pr" + " ON (p.oid = pr.prpubid AND pr.prrelid = %u)," + " LATERAL pg_get_publication_tables(p.pubname) gpt" + " WHERE gpt.relid = %u" + " AND p.pubname IN ( %s )", + lrel->remoteid, + lrel->remoteid, + pub_names.data); + + res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data, 1, qualRow); + + if (res->status != WALRCV_OK_TUPLES) + ereport(ERROR, + (errmsg("could not fetch table WHERE clause info for table \"%s.%s\" from publisher: %s", + nspname, relname, res->err))); + + /* + * Multiple row filter expressions for the same table will be combined + * by COPY using OR. If any of the filter expressions for this table + * are null, it means the whole table will be copied. In this case it + * is not necessary to construct a unified row filter expression at + * all. + */ + slot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple); + while (tuplestore_gettupleslot(res->tuplestore, true, false, slot)) + { + Datum rf = slot_getattr(slot, 1, &isnull); + + if (!isnull) + *qual = lappend(*qual, makeString(TextDatumGetCString(rf))); + else + { + /* Ignore filters and cleanup as necessary. */ + if (*qual) + { + list_free_deep(*qual); + *qual = NIL; + } + break; + } + + ExecClearTuple(slot); + } + ExecDropSingleTupleTableSlot(slot); + + walrcv_clear_result(res); + } + pfree(cmd.data); } @@ -811,6 +907,7 @@ copy_table(Relation rel) { LogicalRepRelMapEntry *relmapentry; LogicalRepRelation lrel; + List *qual = NIL; WalRcvExecResult *res; StringInfoData cmd; CopyFromState cstate; @@ -819,7 +916,7 @@ copy_table(Relation rel) /* Get the publisher relation info. */ fetch_remote_table_info(get_namespace_name(RelationGetNamespace(rel)), - RelationGetRelationName(rel), &lrel); + RelationGetRelationName(rel), &lrel, &qual); /* Put the relation into relmap. */ logicalrep_relmap_update(&lrel); @@ -830,14 +927,18 @@ copy_table(Relation rel) /* Start copy on the publisher. */ initStringInfo(&cmd); - if (lrel.relkind == RELKIND_RELATION) + + /* Regular table with no row filter */ + if (lrel.relkind == RELKIND_RELATION && qual == NIL) appendStringInfo(&cmd, "COPY %s TO STDOUT", quote_qualified_identifier(lrel.nspname, lrel.relname)); else { /* - * For non-tables, we need to do COPY (SELECT ...), but we can't just - * do SELECT * because we need to not copy generated columns. + * For non-tables and tables with row filters, we need to do COPY + * (SELECT ...), but we can't just do SELECT * because we need to not + * copy generated columns. For tables with any row filters, build a + * SELECT query with OR'ed row filters for COPY. */ appendStringInfoString(&cmd, "COPY (SELECT "); for (int i = 0; i < lrel.natts; i++) @@ -846,8 +947,33 @@ copy_table(Relation rel) if (i < lrel.natts - 1) appendStringInfoString(&cmd, ", "); } - appendStringInfo(&cmd, " FROM %s) TO STDOUT", - quote_qualified_identifier(lrel.nspname, lrel.relname)); + + appendStringInfoString(&cmd, " FROM "); + + /* + * For regular tables, make sure we don't copy data from a child that + * inherits the named table as those will be copied separately. + */ + if (lrel.relkind == RELKIND_RELATION) + appendStringInfoString(&cmd, "ONLY "); + + appendStringInfoString(&cmd, quote_qualified_identifier(lrel.nspname, lrel.relname)); + /* list of OR'ed filters */ + if (qual != NIL) + { + ListCell *lc; + char *q = strVal(linitial(qual)); + + appendStringInfo(&cmd, " WHERE %s", q); + for_each_from(lc, qual, 1) + { + q = strVal(lfirst(lc)); + appendStringInfo(&cmd, " OR %s", q); + } + list_free_deep(qual); + } + + appendStringInfoString(&cmd, ") TO STDOUT"); } res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data, 0, NULL); pfree(cmd.data); diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 4162bb8de7..ea57a0477f 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -15,12 +15,17 @@ #include "access/tupconvert.h" #include "catalog/partition.h" #include "catalog/pg_publication.h" +#include "catalog/pg_publication_rel.h" #include "commands/defrem.h" +#include "executor/executor.h" #include "fmgr.h" +#include "nodes/makefuncs.h" +#include "optimizer/optimizer.h" #include "replication/logical.h" #include "replication/logicalproto.h" #include "replication/origin.h" #include "replication/pgoutput.h" +#include "utils/builtins.h" #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -85,6 +90,19 @@ static void send_repl_origin(LogicalDecodingContext *ctx, RepOriginId origin_id, XLogRecPtr origin_lsn, bool send_origin); +/* + * Only 3 publication actions are used for row filtering ("insert", "update", + * "delete"). See RelationSyncEntry.exprstate[]. + */ +enum RowFilterPubAction +{ + PUBACTION_INSERT, + PUBACTION_UPDATE, + PUBACTION_DELETE +}; + +#define NUM_ROWFILTER_PUBACTIONS (PUBACTION_DELETE+1) + /* * Entry in the map used to remember which relation schemas we sent. * @@ -116,6 +134,21 @@ typedef struct RelationSyncEntry /* are we publishing this rel? */ PublicationActions pubactions; + /* + * ExprState array for row filter. Different publication actions don't + * allow multiple expressions to always be combined into one, because + * updates or deletes restrict the column in expression to be part of the + * replica identity index whereas inserts do not have this restriction, so + * there is one ExprState per publication action. + */ + ExprState *exprstate[NUM_ROWFILTER_PUBACTIONS]; + EState *estate; /* executor state used for row filter */ + MemoryContext cache_expr_cxt; /* private context for exprstate and + * estate, if any */ + + TupleTableSlot *new_slot; /* slot for storing new tuple */ + TupleTableSlot *old_slot; /* slot for storing old tuple */ + /* * OID of the relation to publish changes as. For a partition, this may * be set to one of its ancestors whose schema will be used when @@ -130,7 +163,7 @@ typedef struct RelationSyncEntry * same as 'relid' or if unnecessary due to partition and the ancestor * having identical TupleDesc. */ - TupleConversionMap *map; + AttrMap *attrmap; } RelationSyncEntry; /* Map used to remember which relation schemas we sent. */ @@ -138,7 +171,8 @@ static HTAB *RelationSyncCache = NULL; static void init_rel_sync_cache(MemoryContext decoding_context); static void cleanup_rel_sync_cache(TransactionId xid, bool is_commit); -static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, Oid relid); +static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, + Relation relation); static void rel_sync_cache_relation_cb(Datum arg, Oid relid); static void rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue); @@ -146,6 +180,20 @@ static void set_schema_sent_in_streamed_txn(RelationSyncEntry *entry, TransactionId xid); static bool get_schema_sent_in_streamed_txn(RelationSyncEntry *entry, TransactionId xid); +static void init_tuple_slot(PGOutputData *data, Relation relation, + RelationSyncEntry *entry); + +/* row filter routines */ +static EState *create_estate_for_relation(Relation rel); +static void pgoutput_row_filter_init(PGOutputData *data, + List *publications, + RelationSyncEntry *entry); +static bool pgoutput_row_filter_exec_expr(ExprState *state, + ExprContext *econtext); +static bool pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot, + TupleTableSlot **new_slot_ptr, + RelationSyncEntry *entry, + ReorderBufferChangeType *action); /* * Specify output plugin callbacks @@ -303,6 +351,10 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, "logical replication output context", ALLOCSET_DEFAULT_SIZES); + data->cachectx = AllocSetContextCreate(ctx->context, + "logical replication cache context", + ALLOCSET_DEFAULT_SIZES); + ctx->output_plugin_private = data; /* This plugin uses binary protocol. */ @@ -543,37 +595,14 @@ maybe_send_schema(LogicalDecodingContext *ctx, return; /* - * Nope, so send the schema. If the changes will be published using an - * ancestor's schema, not the relation's own, send that ancestor's schema - * before sending relation's own (XXX - maybe sending only the former - * suffices?). This is also a good place to set the map that will be used - * to convert the relation's tuples into the ancestor's format, if needed. + * Send the schema. If the changes will be published using an ancestor's + * schema, not the relation's own, send that ancestor's schema before + * sending relation's own (XXX - maybe sending only the former suffices?). */ if (relentry->publish_as_relid != RelationGetRelid(relation)) { Relation ancestor = RelationIdGetRelation(relentry->publish_as_relid); - TupleDesc indesc = RelationGetDescr(relation); - TupleDesc outdesc = RelationGetDescr(ancestor); - MemoryContext oldctx; - - /* Map must live as long as the session does. */ - oldctx = MemoryContextSwitchTo(CacheMemoryContext); - /* - * Make copies of the TupleDescs that will live as long as the map - * does before putting into the map. - */ - indesc = CreateTupleDescCopy(indesc); - outdesc = CreateTupleDescCopy(outdesc); - relentry->map = convert_tuples_by_name(indesc, outdesc); - if (relentry->map == NULL) - { - /* Map not necessary, so free the TupleDescs too. */ - FreeTupleDesc(indesc); - FreeTupleDesc(outdesc); - } - - MemoryContextSwitchTo(oldctx); send_relation_and_attrs(ancestor, xid, ctx); RelationClose(ancestor); } @@ -624,6 +653,484 @@ send_relation_and_attrs(Relation relation, TransactionId xid, OutputPluginWrite(ctx, false); } +/* + * Executor state preparation for evaluation of row filter expressions for the + * specified relation. + */ +static EState * +create_estate_for_relation(Relation rel) +{ + EState *estate; + RangeTblEntry *rte; + + estate = CreateExecutorState(); + + rte = makeNode(RangeTblEntry); + rte->rtekind = RTE_RELATION; + rte->relid = RelationGetRelid(rel); + rte->relkind = rel->rd_rel->relkind; + rte->rellockmode = AccessShareLock; + ExecInitRangeTable(estate, list_make1(rte)); + + estate->es_output_cid = GetCurrentCommandId(false); + + return estate; +} + +/* + * Evaluates row filter. + * + * If the row filter evaluates to NULL, it is taken as false i.e. the change + * isn't replicated. + */ +static bool +pgoutput_row_filter_exec_expr(ExprState *state, ExprContext *econtext) +{ + Datum ret; + bool isnull; + + Assert(state != NULL); + + ret = ExecEvalExprSwitchContext(state, econtext, &isnull); + + elog(DEBUG3, "row filter evaluates to %s (isnull: %s)", + isnull ? "false" : DatumGetBool(ret) ? "true" : "false", + isnull ? "true" : "false"); + + if (isnull) + return false; + + return DatumGetBool(ret); +} + +/* + * Initialize the row filter. + */ +static void +pgoutput_row_filter_init(PGOutputData *data, List *publications, + RelationSyncEntry *entry) +{ + ListCell *lc; + List *rfnodes[] = {NIL, NIL, NIL}; /* One per pubaction */ + bool no_filter[] = {false, false, false}; /* One per pubaction */ + MemoryContext oldctx; + int idx; + bool has_filter = true; + + /* + * Find if there are any row filters for this relation. If there are, then + * prepare the necessary ExprState and cache it in entry->exprstate. To + * build an expression state, we need to ensure the following: + * + * All the given publication-table mappings must be checked. + * + * Multiple publications might have multiple row filters for this + * relation. Since row filter usage depends on the DML operation, there + * are multiple lists (one for each operation) to which row filters will + * be appended. + * + * FOR ALL TABLES implies "don't use row filter expression" so it takes + * precedence. + */ + foreach(lc, publications) + { + Publication *pub = lfirst(lc); + HeapTuple rftuple = NULL; + Datum rfdatum = 0; + bool pub_no_filter = false; + + if (pub->alltables) + { + /* + * If the publication is FOR ALL TABLES then it is treated the + * same as if this table has no row filters (even if for other + * publications it does). + */ + pub_no_filter = true; + } + else + { + /* + * Check for the presence of a row filter in this publication. + */ + rftuple = SearchSysCache2(PUBLICATIONRELMAP, + ObjectIdGetDatum(entry->publish_as_relid), + ObjectIdGetDatum(pub->oid)); + + if (HeapTupleIsValid(rftuple)) + { + /* Null indicates no filter. */ + rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple, + Anum_pg_publication_rel_prqual, + &pub_no_filter); + } + else + { + pub_no_filter = true; + } + } + + if (pub_no_filter) + { + if (rftuple) + ReleaseSysCache(rftuple); + + no_filter[PUBACTION_INSERT] |= pub->pubactions.pubinsert; + no_filter[PUBACTION_UPDATE] |= pub->pubactions.pubupdate; + no_filter[PUBACTION_DELETE] |= pub->pubactions.pubdelete; + + /* + * Quick exit if all the DML actions are publicized via this + * publication. + */ + if (no_filter[PUBACTION_INSERT] && + no_filter[PUBACTION_UPDATE] && + no_filter[PUBACTION_DELETE]) + { + has_filter = false; + break; + } + + /* No additional work for this publication. Next one. */ + continue; + } + + /* Form the per pubaction row filter lists. */ + if (pub->pubactions.pubinsert && !no_filter[PUBACTION_INSERT]) + rfnodes[PUBACTION_INSERT] = lappend(rfnodes[PUBACTION_INSERT], + TextDatumGetCString(rfdatum)); + if (pub->pubactions.pubupdate && !no_filter[PUBACTION_UPDATE]) + rfnodes[PUBACTION_UPDATE] = lappend(rfnodes[PUBACTION_UPDATE], + TextDatumGetCString(rfdatum)); + if (pub->pubactions.pubdelete && !no_filter[PUBACTION_DELETE]) + rfnodes[PUBACTION_DELETE] = lappend(rfnodes[PUBACTION_DELETE], + TextDatumGetCString(rfdatum)); + + ReleaseSysCache(rftuple); + } /* loop all subscribed publications */ + + /* Clean the row filter */ + for (idx = 0; idx < NUM_ROWFILTER_PUBACTIONS; idx++) + { + if (no_filter[idx]) + { + list_free_deep(rfnodes[idx]); + rfnodes[idx] = NIL; + } + } + + if (has_filter) + { + Relation relation = RelationIdGetRelation(entry->publish_as_relid); + + Assert(entry->cache_expr_cxt == NULL); + + /* Create the memory context for row filters */ + entry->cache_expr_cxt = AllocSetContextCreate(data->cachectx, + "Row filter expressions", + ALLOCSET_DEFAULT_SIZES); + + MemoryContextCopyAndSetIdentifier(entry->cache_expr_cxt, + RelationGetRelationName(relation)); + + /* + * Now all the filters for all pubactions are known. Combine them when + * their pubactions are the same. + */ + oldctx = MemoryContextSwitchTo(entry->cache_expr_cxt); + entry->estate = create_estate_for_relation(relation); + for (idx = 0; idx < NUM_ROWFILTER_PUBACTIONS; idx++) + { + List *filters = NIL; + Expr *rfnode; + + if (rfnodes[idx] == NIL) + continue; + + foreach(lc, rfnodes[idx]) + filters = lappend(filters, stringToNode((char *) lfirst(lc))); + + /* combine the row filter and cache the ExprState */ + rfnode = make_orclause(filters); + entry->exprstate[idx] = ExecPrepareExpr(rfnode, entry->estate); + } /* for each pubaction */ + MemoryContextSwitchTo(oldctx); + + RelationClose(relation); + } +} + +/* + * Initialize the slot for storing new and old tuples, and build the map that + * will be used to convert the relation's tuples into the ancestor's format. + */ +static void +init_tuple_slot(PGOutputData *data, Relation relation, + RelationSyncEntry *entry) +{ + MemoryContext oldctx; + TupleDesc oldtupdesc; + TupleDesc newtupdesc; + + oldctx = MemoryContextSwitchTo(data->cachectx); + + /* + * Create tuple table slots. Create a copy of the TupleDesc as it needs to + * live as long as the cache remains. + */ + oldtupdesc = CreateTupleDescCopy(RelationGetDescr(relation)); + newtupdesc = CreateTupleDescCopy(RelationGetDescr(relation)); + + entry->old_slot = MakeSingleTupleTableSlot(oldtupdesc, &TTSOpsHeapTuple); + entry->new_slot = MakeSingleTupleTableSlot(newtupdesc, &TTSOpsHeapTuple); + + MemoryContextSwitchTo(oldctx); + + /* + * Cache the map that will be used to convert the relation's tuples into + * the ancestor's format, if needed. + */ + if (entry->publish_as_relid != RelationGetRelid(relation)) + { + Relation ancestor = RelationIdGetRelation(entry->publish_as_relid); + TupleDesc indesc = RelationGetDescr(relation); + TupleDesc outdesc = RelationGetDescr(ancestor); + + /* Map must live as long as the session does. */ + oldctx = MemoryContextSwitchTo(CacheMemoryContext); + + entry->attrmap = build_attrmap_by_name_if_req(indesc, outdesc); + + MemoryContextSwitchTo(oldctx); + RelationClose(ancestor); + } +} + +/* + * Change is checked against the row filter if any. + * + * Returns true if the change is to be replicated, else false. + * + * For inserts, evaluate the row filter for new tuple. + * For deletes, evaluate the row filter for old tuple. + * For updates, evaluate the row filter for old and new tuple. + * + * For updates, if both evaluations are true, we allow sending the UPDATE and + * if both the evaluations are false, it doesn't replicate the UPDATE. Now, if + * only one of the tuples matches the row filter expression, we transform + * UPDATE to DELETE or INSERT to avoid any data inconsistency based on the + * following rules: + * + * Case 1: old-row (no match) new-row (no match) -> (drop change) + * Case 2: old-row (no match) new row (match) -> INSERT + * Case 3: old-row (match) new-row (no match) -> DELETE + * Case 4: old-row (match) new row (match) -> UPDATE + * + * The new action is updated in the action parameter. + * + * The new slot could be updated when transforming the UPDATE into INSERT, + * because the original new tuple might not have column values from the replica + * identity. + * + * Examples: + * Let's say the old tuple satisfies the row filter but the new tuple doesn't. + * Since the old tuple satisfies, the initial table synchronization copied this + * row (or another method was used to guarantee that there is data + * consistency). However, after the UPDATE the new tuple doesn't satisfy the + * row filter, so from a data consistency perspective, that row should be + * removed on the subscriber. The UPDATE should be transformed into a DELETE + * statement and be sent to the subscriber. Keeping this row on the subscriber + * is undesirable because it doesn't reflect what was defined in the row filter + * expression on the publisher. This row on the subscriber would likely not be + * modified by replication again. If someone inserted a new row with the same + * old identifier, replication could stop due to a constraint violation. + * + * Let's say the old tuple doesn't match the row filter but the new tuple does. + * Since the old tuple doesn't satisfy, the initial table synchronization + * probably didn't copy this row. However, after the UPDATE the new tuple does + * satisfy the row filter, so from a data consistency perspective, that row + * should be inserted on the subscriber. Otherwise, subsequent UPDATE or DELETE + * statements have no effect (it matches no row -- see + * apply_handle_update_internal()). So, the UPDATE should be transformed into a + * INSERT statement and be sent to the subscriber. However, this might surprise + * someone who expects the data set to satisfy the row filter expression on the + * provider. + */ +static bool +pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot, + TupleTableSlot **new_slot_ptr, RelationSyncEntry *entry, + ReorderBufferChangeType *action) +{ + TupleDesc desc; + int i; + bool old_matched, + new_matched, + result; + TupleTableSlot *tmp_new_slot; + TupleTableSlot *new_slot = *new_slot_ptr; + ExprContext *ecxt; + ExprState *filter_exprstate; + + /* + * We need this map to avoid relying on ReorderBufferChangeType enums + * having specific values. + */ + static const int map_changetype_pubaction[] = { + [REORDER_BUFFER_CHANGE_INSERT] = PUBACTION_INSERT, + [REORDER_BUFFER_CHANGE_UPDATE] = PUBACTION_UPDATE, + [REORDER_BUFFER_CHANGE_DELETE] = PUBACTION_DELETE + }; + + Assert(*action == REORDER_BUFFER_CHANGE_INSERT || + *action == REORDER_BUFFER_CHANGE_UPDATE || + *action == REORDER_BUFFER_CHANGE_DELETE); + + Assert(new_slot || old_slot); + + /* Get the corresponding row filter */ + filter_exprstate = entry->exprstate[map_changetype_pubaction[*action]]; + + /* Bail out if there is no row filter */ + if (!filter_exprstate) + return true; + + elog(DEBUG3, "table \"%s.%s\" has row filter", + get_namespace_name(RelationGetNamespace(relation)), + RelationGetRelationName(relation)); + + ResetPerTupleExprContext(entry->estate); + + ecxt = GetPerTupleExprContext(entry->estate); + + /* + * For the following occasions where there is only one tuple, we can + * evaluate the row filter for that tuple and return. + * + * For inserts, we only have the new tuple. + * + * For updates, we can have only a new tuple when none of the replica + * identity columns changed but we still need to evaluate the row filter + * for new tuple as the existing values of those columns might not match + * the filter. Also, users can use constant expressions in the row filter, + * so we anyway need to evaluate it for the new tuple. + * + * For deletes, we only have the old tuple. + */ + if (!new_slot || !old_slot) + { + ecxt->ecxt_scantuple = new_slot ? new_slot : old_slot; + result = pgoutput_row_filter_exec_expr(filter_exprstate, ecxt); + + return result; + } + + /* + * Both the old and new tuples must be valid only for updates and need to + * be checked against the row filter. + */ + Assert(map_changetype_pubaction[*action] == PUBACTION_UPDATE); + + slot_getallattrs(new_slot); + slot_getallattrs(old_slot); + + tmp_new_slot = NULL; + desc = RelationGetDescr(relation); + + /* + * The new tuple might not have all the replica identity columns, in which + * case it needs to be copied over from the old tuple. + */ + for (i = 0; i < desc->natts; i++) + { + Form_pg_attribute att = TupleDescAttr(desc, i); + + /* + * if the column in the new tuple or old tuple is null, nothing to do + */ + if (new_slot->tts_isnull[i] || old_slot->tts_isnull[i]) + continue; + + /* + * Unchanged toasted replica identity columns are only logged in the + * old tuple. Copy this over to the new tuple. The changed (or WAL + * Logged) toast values are always assembled in memory and set as + * VARTAG_INDIRECT. See ReorderBufferToastReplace. + */ + if (att->attlen == -1 && + VARATT_IS_EXTERNAL_ONDISK(new_slot->tts_values[i]) && + !VARATT_IS_EXTERNAL_ONDISK(old_slot->tts_values[i])) + { + if (!tmp_new_slot) + { + tmp_new_slot = MakeSingleTupleTableSlot(desc, &TTSOpsVirtual); + ExecClearTuple(tmp_new_slot); + + memcpy(tmp_new_slot->tts_values, new_slot->tts_values, + desc->natts * sizeof(Datum)); + memcpy(tmp_new_slot->tts_isnull, new_slot->tts_isnull, + desc->natts * sizeof(bool)); + } + + tmp_new_slot->tts_values[i] = old_slot->tts_values[i]; + tmp_new_slot->tts_isnull[i] = old_slot->tts_isnull[i]; + } + } + + ecxt->ecxt_scantuple = old_slot; + old_matched = pgoutput_row_filter_exec_expr(filter_exprstate, ecxt); + + if (tmp_new_slot) + { + ExecStoreVirtualTuple(tmp_new_slot); + ecxt->ecxt_scantuple = tmp_new_slot; + } + else + ecxt->ecxt_scantuple = new_slot; + + new_matched = pgoutput_row_filter_exec_expr(filter_exprstate, ecxt); + + /* + * Case 1: if both tuples don't match the row filter, bailout. Send + * nothing. + */ + if (!old_matched && !new_matched) + return false; + + /* + * Case 2: if the old tuple doesn't satisfy the row filter but the new + * tuple does, transform the UPDATE into INSERT. + * + * Use the newly transformed tuple that must contain the column values for + * all the replica identity columns. This is required to ensure that the + * while inserting the tuple in the downstream node, we have all the + * required column values. + */ + if (!old_matched && new_matched) + { + *action = REORDER_BUFFER_CHANGE_INSERT; + + if (tmp_new_slot) + *new_slot_ptr = tmp_new_slot; + } + + /* + * Case 3: if the old tuple satisfies the row filter but the new tuple + * doesn't, transform the UPDATE into DELETE. + * + * This transformation does not require another tuple. The Old tuple will + * be used for DELETE. + */ + else if (old_matched && !new_matched) + *action = REORDER_BUFFER_CHANGE_DELETE; + + /* + * Case 4: if both tuples match the row filter, transformation isn't + * required. (*action is default UPDATE). + */ + + return true; +} + /* * Sends the decoded DML over wire. * @@ -638,6 +1145,10 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, RelationSyncEntry *relentry; TransactionId xid = InvalidTransactionId; Relation ancestor = NULL; + Relation targetrel = relation; + ReorderBufferChangeType action = change->action; + TupleTableSlot *old_slot = NULL; + TupleTableSlot *new_slot = NULL; if (!is_publishable_relation(relation)) return; @@ -651,10 +1162,10 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, if (in_streaming) xid = change->txn->xid; - relentry = get_rel_sync_entry(data, RelationGetRelid(relation)); + relentry = get_rel_sync_entry(data, relation); /* First check the table filter */ - switch (change->action) + switch (action) { case REORDER_BUFFER_CHANGE_INSERT: if (!relentry->pubactions.pubinsert) @@ -675,80 +1186,149 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, /* Avoid leaking memory by using and resetting our own context */ old = MemoryContextSwitchTo(data->context); - maybe_send_schema(ctx, change, relation, relentry); - /* Send the data */ - switch (change->action) + switch (action) { case REORDER_BUFFER_CHANGE_INSERT: - { - HeapTuple tuple = &change->data.tp.newtuple->tuple; + new_slot = relentry->new_slot; + ExecStoreHeapTuple(&change->data.tp.newtuple->tuple, + new_slot, false); - /* Switch relation if publishing via root. */ - if (relentry->publish_as_relid != RelationGetRelid(relation)) + /* Switch relation if publishing via root. */ + if (relentry->publish_as_relid != RelationGetRelid(relation)) + { + Assert(relation->rd_rel->relispartition); + ancestor = RelationIdGetRelation(relentry->publish_as_relid); + targetrel = ancestor; + /* Convert tuple if needed. */ + if (relentry->attrmap) { - Assert(relation->rd_rel->relispartition); - ancestor = RelationIdGetRelation(relentry->publish_as_relid); - relation = ancestor; - /* Convert tuple if needed. */ - if (relentry->map) - tuple = execute_attr_map_tuple(tuple, relentry->map); + TupleDesc tupdesc = RelationGetDescr(targetrel); + + new_slot = execute_attr_map_slot(relentry->attrmap, + new_slot, + MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); } + } - OutputPluginPrepareWrite(ctx, true); - logicalrep_write_insert(ctx->out, xid, relation, tuple, - data->binary); - OutputPluginWrite(ctx, true); + /* Check row filter */ + if (!pgoutput_row_filter(targetrel, NULL, &new_slot, relentry, + &action)) break; - } + + /* + * Schema should be sent using the original relation because it + * also sends the ancestor's relation. + */ + maybe_send_schema(ctx, change, relation, relentry); + + OutputPluginPrepareWrite(ctx, true); + logicalrep_write_insert(ctx->out, xid, targetrel, new_slot, + data->binary); + OutputPluginWrite(ctx, true); + break; case REORDER_BUFFER_CHANGE_UPDATE: + if (change->data.tp.oldtuple) { - HeapTuple oldtuple = change->data.tp.oldtuple ? - &change->data.tp.oldtuple->tuple : NULL; - HeapTuple newtuple = &change->data.tp.newtuple->tuple; + old_slot = relentry->old_slot; + ExecStoreHeapTuple(&change->data.tp.oldtuple->tuple, + old_slot, false); + } - /* Switch relation if publishing via root. */ - if (relentry->publish_as_relid != RelationGetRelid(relation)) + new_slot = relentry->new_slot; + ExecStoreHeapTuple(&change->data.tp.newtuple->tuple, + new_slot, false); + + /* Switch relation if publishing via root. */ + if (relentry->publish_as_relid != RelationGetRelid(relation)) + { + Assert(relation->rd_rel->relispartition); + ancestor = RelationIdGetRelation(relentry->publish_as_relid); + targetrel = ancestor; + /* Convert tuples if needed. */ + if (relentry->attrmap) { - Assert(relation->rd_rel->relispartition); - ancestor = RelationIdGetRelation(relentry->publish_as_relid); - relation = ancestor; - /* Convert tuples if needed. */ - if (relentry->map) - { - if (oldtuple) - oldtuple = execute_attr_map_tuple(oldtuple, - relentry->map); - newtuple = execute_attr_map_tuple(newtuple, - relentry->map); - } + TupleDesc tupdesc = RelationGetDescr(targetrel); + + if (old_slot) + old_slot = execute_attr_map_slot(relentry->attrmap, + old_slot, + MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); + + new_slot = execute_attr_map_slot(relentry->attrmap, + new_slot, + MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); } + } - OutputPluginPrepareWrite(ctx, true); - logicalrep_write_update(ctx->out, xid, relation, oldtuple, - newtuple, data->binary); - OutputPluginWrite(ctx, true); + /* Check row filter */ + if (!pgoutput_row_filter(targetrel, old_slot, &new_slot, + relentry, &action)) break; + + maybe_send_schema(ctx, change, relation, relentry); + + OutputPluginPrepareWrite(ctx, true); + + /* + * Updates could be transformed to inserts or deletes based on the + * results of the row filter for old and new tuple. + */ + switch (action) + { + case REORDER_BUFFER_CHANGE_INSERT: + logicalrep_write_insert(ctx->out, xid, targetrel, + new_slot, data->binary); + break; + case REORDER_BUFFER_CHANGE_UPDATE: + logicalrep_write_update(ctx->out, xid, targetrel, + old_slot, new_slot, data->binary); + break; + case REORDER_BUFFER_CHANGE_DELETE: + logicalrep_write_delete(ctx->out, xid, targetrel, + old_slot, data->binary); + break; + default: + Assert(false); } + + OutputPluginWrite(ctx, true); + break; case REORDER_BUFFER_CHANGE_DELETE: if (change->data.tp.oldtuple) { - HeapTuple oldtuple = &change->data.tp.oldtuple->tuple; + old_slot = relentry->old_slot; + + ExecStoreHeapTuple(&change->data.tp.oldtuple->tuple, + old_slot, false); /* Switch relation if publishing via root. */ if (relentry->publish_as_relid != RelationGetRelid(relation)) { Assert(relation->rd_rel->relispartition); ancestor = RelationIdGetRelation(relentry->publish_as_relid); - relation = ancestor; + targetrel = ancestor; /* Convert tuple if needed. */ - if (relentry->map) - oldtuple = execute_attr_map_tuple(oldtuple, relentry->map); + if (relentry->attrmap) + { + TupleDesc tupdesc = RelationGetDescr(targetrel); + + old_slot = execute_attr_map_slot(relentry->attrmap, + old_slot, + MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); + } } + /* Check row filter */ + if (!pgoutput_row_filter(targetrel, old_slot, &new_slot, + relentry, &action)) + break; + + maybe_send_schema(ctx, change, relation, relentry); + OutputPluginPrepareWrite(ctx, true); - logicalrep_write_delete(ctx->out, xid, relation, oldtuple, - data->binary); + logicalrep_write_delete(ctx->out, xid, targetrel, + old_slot, data->binary); OutputPluginWrite(ctx, true); } else @@ -798,7 +1378,7 @@ pgoutput_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, if (!is_publishable_relation(relation)) continue; - relentry = get_rel_sync_entry(data, relid); + relentry = get_rel_sync_entry(data, relation); if (!relentry->pubactions.pubtruncate) continue; @@ -873,8 +1453,9 @@ pgoutput_origin_filter(LogicalDecodingContext *ctx, /* * Shutdown the output plugin. * - * Note, we don't need to clean the data->context as it's child context - * of the ctx->context so it will be cleaned up by logical decoding machinery. + * Note, we don't need to clean the data->context and data->cachectx as + * they are child context of the ctx->context so it will be cleaned up by + * logical decoding machinery. */ static void pgoutput_shutdown(LogicalDecodingContext *ctx) @@ -1122,11 +1703,12 @@ set_schema_sent_in_streamed_txn(RelationSyncEntry *entry, TransactionId xid) * when publishing. */ static RelationSyncEntry * -get_rel_sync_entry(PGOutputData *data, Oid relid) +get_rel_sync_entry(PGOutputData *data, Relation relation) { RelationSyncEntry *entry; bool found; MemoryContext oldctx; + Oid relid = RelationGetRelid(relation); Assert(RelationSyncCache != NULL); @@ -1144,9 +1726,12 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) entry->streamed_txns = NIL; entry->pubactions.pubinsert = entry->pubactions.pubupdate = entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false; + entry->new_slot = NULL; + entry->old_slot = NULL; + memset(entry->exprstate, 0, sizeof(entry->exprstate)); + entry->cache_expr_cxt = NULL; entry->publish_as_relid = InvalidOid; - entry->map = NULL; /* will be set by maybe_send_schema() if - * needed */ + entry->attrmap = NULL; } /* Validate the entry */ @@ -1165,6 +1750,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) Oid publish_as_relid = relid; bool am_partition = get_rel_relispartition(relid); char relkind = get_rel_relkind(relid); + List *rel_publications = NIL; /* Reload publications if needed before use. */ if (!publications_valid) @@ -1193,17 +1779,31 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) entry->pubactions.pubupdate = false; entry->pubactions.pubdelete = false; entry->pubactions.pubtruncate = false; - if (entry->map) - { - /* - * Must free the TupleDescs contained in the map explicitly, - * because free_conversion_map() doesn't. - */ - FreeTupleDesc(entry->map->indesc); - FreeTupleDesc(entry->map->outdesc); - free_conversion_map(entry->map); - } - entry->map = NULL; + + /* + * Tuple slots cleanups. (Will be rebuilt later if needed). + */ + if (entry->old_slot) + ExecDropSingleTupleTableSlot(entry->old_slot); + if (entry->new_slot) + ExecDropSingleTupleTableSlot(entry->new_slot); + + entry->old_slot = NULL; + entry->new_slot = NULL; + + if (entry->attrmap) + free_attrmap(entry->attrmap); + entry->attrmap = NULL; + + /* + * Row filter cache cleanups. + */ + if (entry->cache_expr_cxt) + MemoryContextDelete(entry->cache_expr_cxt); + + entry->cache_expr_cxt = NULL; + entry->estate = NULL; + memset(entry->exprstate, 0, sizeof(entry->exprstate)); /* * Build publication cache. We can't use one provided by relcache as @@ -1234,28 +1834,17 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) */ if (am_partition) { + Oid ancestor; List *ancestors = get_partition_ancestors(relid); - ListCell *lc2; - /* - * Find the "topmost" ancestor that is in this - * publication. - */ - foreach(lc2, ancestors) + ancestor = GetTopMostAncestorInPublication(pub->oid, + ancestors); + + if (ancestor != InvalidOid) { - Oid ancestor = lfirst_oid(lc2); - List *apubids = GetRelationPublications(ancestor); - List *aschemaPubids = GetSchemaPublications(get_rel_namespace(ancestor)); - - if (list_member_oid(apubids, pub->oid) || - list_member_oid(aschemaPubids, pub->oid)) - { - ancestor_published = true; - if (pub->pubviaroot) - publish_as_relid = ancestor; - } - list_free(apubids); - list_free(aschemaPubids); + ancestor_published = true; + if (pub->pubviaroot) + publish_as_relid = ancestor; } } @@ -1277,17 +1866,31 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) entry->pubactions.pubupdate |= pub->pubactions.pubupdate; entry->pubactions.pubdelete |= pub->pubactions.pubdelete; entry->pubactions.pubtruncate |= pub->pubactions.pubtruncate; + + rel_publications = lappend(rel_publications, pub); } + } - if (entry->pubactions.pubinsert && entry->pubactions.pubupdate && - entry->pubactions.pubdelete && entry->pubactions.pubtruncate) - break; + entry->publish_as_relid = publish_as_relid; + + /* + * Initialize the tuple slot, map, and row filter. These are only used + * when publishing inserts, updates, or deletes. + */ + if (entry->pubactions.pubinsert || entry->pubactions.pubupdate || + entry->pubactions.pubdelete) + { + /* Initialize the tuple slot and map */ + init_tuple_slot(data, relation, entry); + + /* Initialize the row filter */ + pgoutput_row_filter_init(data, rel_publications, entry); } list_free(pubids); list_free(schemaPubids); + list_free(rel_publications); - entry->publish_as_relid = publish_as_relid; entry->replicate_valid = true; } diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 2707fed12f..fccffce572 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -66,6 +66,7 @@ #include "catalog/schemapg.h" #include "catalog/storage.h" #include "commands/policy.h" +#include "commands/publicationcmds.h" #include "commands/trigger.h" #include "miscadmin.h" #include "nodes/makefuncs.h" @@ -2419,8 +2420,8 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc) bms_free(relation->rd_pkattr); bms_free(relation->rd_idattr); bms_free(relation->rd_hotblockingattr); - if (relation->rd_pubactions) - pfree(relation->rd_pubactions); + if (relation->rd_pubdesc) + pfree(relation->rd_pubdesc); if (relation->rd_options) pfree(relation->rd_options); if (relation->rd_indextuple) @@ -5523,38 +5524,57 @@ RelationGetExclusionInfo(Relation indexRelation, } /* - * Get publication actions for the given relation. + * Get the publication information for the given relation. + * + * Traverse all the publications which the relation is in to get the + * publication actions and validate the row filter expressions for such + * publications if any. We consider the row filter expression as invalid if it + * references any column which is not part of REPLICA IDENTITY. + * + * To avoid fetching the publication information repeatedly, we cache the + * publication actions and row filter validation information. */ -struct PublicationActions * -GetRelationPublicationActions(Relation relation) +void +RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc) { List *puboids; ListCell *lc; MemoryContext oldcxt; Oid schemaid; - PublicationActions *pubactions = palloc0(sizeof(PublicationActions)); + List *ancestors = NIL; + Oid relid = RelationGetRelid(relation); /* * If not publishable, it publishes no actions. (pgoutput_change() will * ignore it.) */ if (!is_publishable_relation(relation)) - return pubactions; + { + memset(pubdesc, 0, sizeof(PublicationDesc)); + pubdesc->rf_valid_for_update = true; + pubdesc->rf_valid_for_delete = true; + return; + } + + if (relation->rd_pubdesc) + { + memcpy(pubdesc, relation->rd_pubdesc, sizeof(PublicationDesc)); + return; + } - if (relation->rd_pubactions) - return memcpy(pubactions, relation->rd_pubactions, - sizeof(PublicationActions)); + memset(pubdesc, 0, sizeof(PublicationDesc)); + pubdesc->rf_valid_for_update = true; + pubdesc->rf_valid_for_delete = true; /* Fetch the publication membership info. */ - puboids = GetRelationPublications(RelationGetRelid(relation)); + puboids = GetRelationPublications(relid); schemaid = RelationGetNamespace(relation); puboids = list_concat_unique_oid(puboids, GetSchemaPublications(schemaid)); if (relation->rd_rel->relispartition) { /* Add publications that the ancestors are in too. */ - List *ancestors = get_partition_ancestors(RelationGetRelid(relation)); - ListCell *lc; + ancestors = get_partition_ancestors(relid); foreach(lc, ancestors) { @@ -5582,35 +5602,53 @@ GetRelationPublicationActions(Relation relation) pubform = (Form_pg_publication) GETSTRUCT(tup); - pubactions->pubinsert |= pubform->pubinsert; - pubactions->pubupdate |= pubform->pubupdate; - pubactions->pubdelete |= pubform->pubdelete; - pubactions->pubtruncate |= pubform->pubtruncate; + pubdesc->pubactions.pubinsert |= pubform->pubinsert; + pubdesc->pubactions.pubupdate |= pubform->pubupdate; + pubdesc->pubactions.pubdelete |= pubform->pubdelete; + pubdesc->pubactions.pubtruncate |= pubform->pubtruncate; + + /* + * Check if all columns referenced in the filter expression are part of + * the REPLICA IDENTITY index or not. + * + * If the publication is FOR ALL TABLES then it means the table has no + * row filters and we can skip the validation. + */ + if (!pubform->puballtables && + (pubform->pubupdate || pubform->pubdelete) && + contain_invalid_rfcolumn(pubid, relation, ancestors, + pubform->pubviaroot)) + { + if (pubform->pubupdate) + pubdesc->rf_valid_for_update = false; + if (pubform->pubdelete) + pubdesc->rf_valid_for_delete = false; + } ReleaseSysCache(tup); /* - * If we know everything is replicated, there is no point to check for - * other publications. + * If we know everything is replicated and the row filter is invalid + * for update and delete, there is no point to check for other + * publications. */ - if (pubactions->pubinsert && pubactions->pubupdate && - pubactions->pubdelete && pubactions->pubtruncate) + if (pubdesc->pubactions.pubinsert && pubdesc->pubactions.pubupdate && + pubdesc->pubactions.pubdelete && pubdesc->pubactions.pubtruncate && + !pubdesc->rf_valid_for_update && !pubdesc->rf_valid_for_delete) break; } - if (relation->rd_pubactions) + if (relation->rd_pubdesc) { - pfree(relation->rd_pubactions); - relation->rd_pubactions = NULL; + pfree(relation->rd_pubdesc); + relation->rd_pubdesc = NULL; } - /* Now save copy of the actions in the relcache entry. */ + /* Now save copy of the descriptor in the relcache entry. */ oldcxt = MemoryContextSwitchTo(CacheMemoryContext); - relation->rd_pubactions = palloc(sizeof(PublicationActions)); - memcpy(relation->rd_pubactions, pubactions, sizeof(PublicationActions)); + relation->rd_pubdesc = palloc(sizeof(PublicationDesc)); + memcpy(relation->rd_pubdesc, pubdesc, sizeof(PublicationDesc)); MemoryContextSwitchTo(oldcxt); - - return pubactions; } /* @@ -6163,7 +6201,7 @@ load_relcache_init_file(bool shared) rel->rd_pkattr = NULL; rel->rd_idattr = NULL; rel->rd_hotblockingattr = NULL; - rel->rd_pubactions = NULL; + rel->rd_pubdesc = NULL; rel->rd_statvalid = false; rel->rd_statlist = NIL; rel->rd_fkeyvalid = false; diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 4485ea83b1..e69dcf8a48 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -4074,6 +4074,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables) int i_oid; int i_prpubid; int i_prrelid; + int i_prrelqual; int i, j, ntups; @@ -4084,9 +4085,16 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables) query = createPQExpBuffer(); /* Collect all publication membership info. */ - appendPQExpBufferStr(query, - "SELECT tableoid, oid, prpubid, prrelid " - "FROM pg_catalog.pg_publication_rel"); + if (fout->remoteVersion >= 150000) + appendPQExpBufferStr(query, + "SELECT tableoid, oid, prpubid, prrelid, " + "pg_catalog.pg_get_expr(prqual, prrelid) AS prrelqual " + "FROM pg_catalog.pg_publication_rel"); + else + appendPQExpBufferStr(query, + "SELECT tableoid, oid, prpubid, prrelid, " + "NULL AS prrelqual " + "FROM pg_catalog.pg_publication_rel"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -4095,6 +4103,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables) i_oid = PQfnumber(res, "oid"); i_prpubid = PQfnumber(res, "prpubid"); i_prrelid = PQfnumber(res, "prrelid"); + i_prrelqual = PQfnumber(res, "prrelqual"); /* this allocation may be more than we need */ pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo)); @@ -4135,6 +4144,10 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables) pubrinfo[j].dobj.name = tbinfo->dobj.name; pubrinfo[j].publication = pubinfo; pubrinfo[j].pubtable = tbinfo; + if (PQgetisnull(res, i, i_prrelqual)) + pubrinfo[j].pubrelqual = NULL; + else + pubrinfo[j].pubrelqual = pg_strdup(PQgetvalue(res, i, i_prrelqual)); /* Decide whether we want to dump it */ selectDumpablePublicationObject(&(pubrinfo[j].dobj), fout); @@ -4212,8 +4225,17 @@ dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo) appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY", fmtId(pubinfo->dobj.name)); - appendPQExpBuffer(query, " %s;\n", + appendPQExpBuffer(query, " %s", fmtQualifiedDumpable(tbinfo)); + if (pubrinfo->pubrelqual) + { + /* + * It's necessary to add parentheses around the expression because + * pg_get_expr won't supply the parentheses for things like WHERE TRUE. + */ + appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual); + } + appendPQExpBufferStr(query, ";\n"); /* * There is no point in creating a drop query as the drop is done by table diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 9965ac2518..997a3b6071 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -631,6 +631,7 @@ typedef struct _PublicationRelInfo DumpableObject dobj; PublicationInfo *publication; TableInfo *pubtable; + char *pubrelqual; } PublicationRelInfo; /* diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 654ef2d7c3..e3382933d9 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -2879,17 +2879,21 @@ describeOneTableDetails(const char *schemaname, { printfPQExpBuffer(&buf, "SELECT pubname\n" + " , NULL\n" "FROM pg_catalog.pg_publication p\n" " JOIN pg_catalog.pg_publication_namespace pn ON p.oid = pn.pnpubid\n" " JOIN pg_catalog.pg_class pc ON pc.relnamespace = pn.pnnspid\n" "WHERE pc.oid ='%s' and pg_catalog.pg_relation_is_publishable('%s')\n" "UNION\n" "SELECT pubname\n" + " , pg_get_expr(pr.prqual, c.oid)\n" "FROM pg_catalog.pg_publication p\n" " JOIN pg_catalog.pg_publication_rel pr ON p.oid = pr.prpubid\n" + " JOIN pg_catalog.pg_class c ON c.oid = pr.prrelid\n" "WHERE pr.prrelid = '%s'\n" "UNION\n" "SELECT pubname\n" + " , NULL\n" "FROM pg_catalog.pg_publication p\n" "WHERE p.puballtables AND pg_catalog.pg_relation_is_publishable('%s')\n" "ORDER BY 1;", @@ -2899,11 +2903,13 @@ describeOneTableDetails(const char *schemaname, { printfPQExpBuffer(&buf, "SELECT pubname\n" + " , NULL\n" "FROM pg_catalog.pg_publication p\n" "JOIN pg_catalog.pg_publication_rel pr ON p.oid = pr.prpubid\n" "WHERE pr.prrelid = '%s'\n" "UNION ALL\n" "SELECT pubname\n" + " , NULL\n" "FROM pg_catalog.pg_publication p\n" "WHERE p.puballtables AND pg_catalog.pg_relation_is_publishable('%s')\n" "ORDER BY 1;", @@ -2925,6 +2931,11 @@ describeOneTableDetails(const char *schemaname, printfPQExpBuffer(&buf, " \"%s\"", PQgetvalue(result, i, 0)); + /* row filter (if any) */ + if (!PQgetisnull(result, i, 1)) + appendPQExpBuffer(&buf, " WHERE %s", + PQgetvalue(result, i, 1)); + printTableAddFooter(&cont, buf.data); } PQclear(result); @@ -5874,8 +5885,12 @@ addFooterToPublicationDesc(PQExpBuffer buf, char *footermsg, for (i = 0; i < count; i++) { if (!singlecol) + { printfPQExpBuffer(buf, " \"%s.%s\"", PQgetvalue(res, i, 0), PQgetvalue(res, i, 1)); + if (!PQgetisnull(res, i, 2)) + appendPQExpBuffer(buf, " WHERE %s", PQgetvalue(res, i, 2)); + } else printfPQExpBuffer(buf, " \"%s\"", PQgetvalue(res, i, 0)); @@ -6004,8 +6019,15 @@ describePublications(const char *pattern) { /* Get the tables for the specified publication */ printfPQExpBuffer(&buf, - "SELECT n.nspname, c.relname\n" - "FROM pg_catalog.pg_class c,\n" + "SELECT n.nspname, c.relname"); + if (pset.sversion >= 150000) + appendPQExpBufferStr(&buf, + ", pg_get_expr(pr.prqual, c.oid)"); + else + appendPQExpBufferStr(&buf, + ", NULL"); + appendPQExpBuffer(&buf, + "\nFROM pg_catalog.pg_class c,\n" " pg_catalog.pg_namespace n,\n" " pg_catalog.pg_publication_rel pr\n" "WHERE c.relnamespace = n.oid\n" diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 010edb685f..6957567264 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -1787,6 +1787,20 @@ psql_completion(const char *text, int start, int end) (HeadMatches("ALTER", "PUBLICATION", MatchAny, "ADD|SET", "TABLE") && ends_with(prev_wd, ','))) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables); + /* + * "ALTER PUBLICATION SET TABLE WHERE (" - complete with + * table attributes + * + * "ALTER PUBLICATION ADD TABLE WHERE (" - complete with + * table attributes + */ + else if (HeadMatches("ALTER", "PUBLICATION", MatchAny) && TailMatches("WHERE")) + COMPLETE_WITH("("); + else if (HeadMatches("ALTER", "PUBLICATION", MatchAny) && TailMatches("WHERE", "(")) + COMPLETE_WITH_ATTR(prev3_wd); + else if (HeadMatches("ALTER", "PUBLICATION", MatchAny, "ADD|SET", "TABLE") && + !TailMatches("WHERE", "(*)")) + COMPLETE_WITH(",", "WHERE ("); else if (HeadMatches("ALTER", "PUBLICATION", MatchAny, "ADD|SET", "TABLE")) COMPLETE_WITH(","); /* ALTER PUBLICATION DROP */ @@ -2919,12 +2933,23 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH("TABLES", "TABLES IN SCHEMA"); else if (Matches("CREATE", "PUBLICATION", MatchAny, "FOR", "ALL", "TABLES")) COMPLETE_WITH("IN SCHEMA", "WITH ("); - else if (Matches("CREATE", "PUBLICATION", MatchAny, "FOR", "TABLE", MatchAny)) - COMPLETE_WITH("WITH ("); + else if (Matches("CREATE", "PUBLICATION", MatchAny, "FOR", "TABLE", MatchAny) && !ends_with(prev_wd, ',')) + COMPLETE_WITH("WHERE (", "WITH ("); /* Complete "CREATE PUBLICATION FOR TABLE" with ", ..." */ else if (Matches("CREATE", "PUBLICATION", MatchAny, "FOR", "TABLE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables); + /* + * "CREATE PUBLICATION FOR TABLE WHERE (" - complete with + * table attributes + */ + else if (HeadMatches("CREATE", "PUBLICATION", MatchAny) && TailMatches("WHERE")) + COMPLETE_WITH("("); + else if (HeadMatches("CREATE", "PUBLICATION", MatchAny) && TailMatches("WHERE", "(")) + COMPLETE_WITH_ATTR(prev3_wd); + else if (HeadMatches("CREATE", "PUBLICATION", MatchAny) && TailMatches("WHERE", "(*)")) + COMPLETE_WITH(" WITH ("); + /* * Complete "CREATE PUBLICATION FOR ALL TABLES IN SCHEMA , * ..." diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index b940a0cf0c..1addb568ef 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202202141 +#define CATALOG_VERSION_NO 202202221 #endif diff --git a/src/include/catalog/pg_publication.h b/src/include/catalog/pg_publication.h index 841b9b6c25..ba72e62e61 100644 --- a/src/include/catalog/pg_publication.h +++ b/src/include/catalog/pg_publication.h @@ -74,6 +74,19 @@ typedef struct PublicationActions bool pubtruncate; } PublicationActions; +typedef struct PublicationDesc +{ + PublicationActions pubactions; + + /* + * true if the columns referenced in row filters which are used for UPDATE + * or DELETE are part of the replica identity or the publication actions + * do not include UPDATE or DELETE. + */ + bool rf_valid_for_update; + bool rf_valid_for_delete; +} PublicationDesc; + typedef struct Publication { Oid oid; @@ -86,6 +99,7 @@ typedef struct Publication typedef struct PublicationRelInfo { Relation relation; + Node *whereClause; } PublicationRelInfo; extern Publication *GetPublication(Oid pubid); @@ -120,10 +134,11 @@ extern List *GetAllSchemaPublicationRelations(Oid puboid, extern List *GetPubPartitionOptionRelations(List *result, PublicationPartOpt pub_partopt, Oid relid); +extern Oid GetTopMostAncestorInPublication(Oid puboid, List *ancestors); extern bool is_publishable_relation(Relation rel); extern bool is_schema_publication(Oid pubid); -extern ObjectAddress publication_add_relation(Oid pubid, PublicationRelInfo *targetrel, +extern ObjectAddress publication_add_relation(Oid pubid, PublicationRelInfo *pri, bool if_not_exists); extern ObjectAddress publication_add_schema(Oid pubid, Oid schemaid, bool if_not_exists); @@ -131,5 +146,4 @@ extern ObjectAddress publication_add_schema(Oid pubid, Oid schemaid, extern Oid get_publication_oid(const char *pubname, bool missing_ok); extern char *get_publication_name(Oid pubid, bool missing_ok); - #endif /* PG_PUBLICATION_H */ diff --git a/src/include/catalog/pg_publication_rel.h b/src/include/catalog/pg_publication_rel.h index 117a1d67e5..0dd0f425db 100644 --- a/src/include/catalog/pg_publication_rel.h +++ b/src/include/catalog/pg_publication_rel.h @@ -31,6 +31,10 @@ CATALOG(pg_publication_rel,6106,PublicationRelRelationId) Oid oid; /* oid */ Oid prpubid BKI_LOOKUP(pg_publication); /* Oid of the publication */ Oid prrelid BKI_LOOKUP(pg_class); /* Oid of the relation */ + +#ifdef CATALOG_VARLEN /* variable-length fields start here */ + pg_node_tree prqual; /* qualifications */ +#endif } FormData_pg_publication_rel; /* ---------------- @@ -40,6 +44,8 @@ CATALOG(pg_publication_rel,6106,PublicationRelRelationId) */ typedef FormData_pg_publication_rel *Form_pg_publication_rel; +DECLARE_TOAST(pg_publication_rel, 8287, 8288); + DECLARE_UNIQUE_INDEX_PKEY(pg_publication_rel_oid_index, 6112, PublicationRelObjectIndexId, on pg_publication_rel using btree(oid oid_ops)); DECLARE_UNIQUE_INDEX(pg_publication_rel_prrelid_prpubid_index, 6113, PublicationRelPrrelidPrpubidIndexId, on pg_publication_rel using btree(prrelid oid_ops, prpubid oid_ops)); DECLARE_INDEX(pg_publication_rel_prpubid_index, 6116, PublicationRelPrpubidIndexId, on pg_publication_rel using btree(prpubid oid_ops)); diff --git a/src/include/commands/publicationcmds.h b/src/include/commands/publicationcmds.h index cec7525826..7813cbcb6b 100644 --- a/src/include/commands/publicationcmds.h +++ b/src/include/commands/publicationcmds.h @@ -31,5 +31,7 @@ extern void RemovePublicationSchemaById(Oid psoid); extern ObjectAddress AlterPublicationOwner(const char *name, Oid newOwnerId); extern void AlterPublicationOwner_oid(Oid pubid, Oid newOwnerId); extern void InvalidatePublicationRels(List *relids); +extern bool contain_invalid_rfcolumn(Oid pubid, Relation relation, + List *ancestors, bool pubviaroot); #endif /* PUBLICATIONCMDS_H */ diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index 34218b718c..1617702d9d 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -3651,6 +3651,7 @@ typedef struct PublicationTable { NodeTag type; RangeVar *relation; /* relation to be published */ + Node *whereClause; /* qualifications */ } PublicationTable; /* diff --git a/src/include/replication/logicalproto.h b/src/include/replication/logicalproto.h index 22fffaca62..4d2c881644 100644 --- a/src/include/replication/logicalproto.h +++ b/src/include/replication/logicalproto.h @@ -14,6 +14,7 @@ #define LOGICAL_PROTO_H #include "access/xact.h" +#include "executor/tuptable.h" #include "replication/reorderbuffer.h" #include "utils/rel.h" @@ -206,17 +207,19 @@ extern void logicalrep_write_origin(StringInfo out, const char *origin, XLogRecPtr origin_lsn); extern char *logicalrep_read_origin(StringInfo in, XLogRecPtr *origin_lsn); extern void logicalrep_write_insert(StringInfo out, TransactionId xid, - Relation rel, HeapTuple newtuple, + Relation rel, + TupleTableSlot *newslot, bool binary); extern LogicalRepRelId logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup); extern void logicalrep_write_update(StringInfo out, TransactionId xid, - Relation rel, HeapTuple oldtuple, - HeapTuple newtuple, bool binary); + Relation rel, + TupleTableSlot *oldslot, + TupleTableSlot *newslot, bool binary); extern LogicalRepRelId logicalrep_read_update(StringInfo in, bool *has_oldtuple, LogicalRepTupleData *oldtup, LogicalRepTupleData *newtup); extern void logicalrep_write_delete(StringInfo out, TransactionId xid, - Relation rel, HeapTuple oldtuple, + Relation rel, TupleTableSlot *oldtuple, bool binary); extern LogicalRepRelId logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup); diff --git a/src/include/replication/pgoutput.h b/src/include/replication/pgoutput.h index 78aa9151ef..eafedd610a 100644 --- a/src/include/replication/pgoutput.h +++ b/src/include/replication/pgoutput.h @@ -19,6 +19,7 @@ typedef struct PGOutputData { MemoryContext context; /* private memory context for transient * allocations */ + MemoryContext cachectx; /* private memory context for cache data */ /* client-supplied info: */ uint32 protocol_version; diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 859424bbd9..0bcc150b33 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -51,7 +51,7 @@ typedef struct ReorderBufferTupleBuf * respectively. They're used by INSERT .. ON CONFLICT .. UPDATE. Users of * logical decoding don't have to care about these. */ -enum ReorderBufferChangeType +typedef enum ReorderBufferChangeType { REORDER_BUFFER_CHANGE_INSERT, REORDER_BUFFER_CHANGE_UPDATE, @@ -66,7 +66,7 @@ enum ReorderBufferChangeType REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT, REORDER_BUFFER_CHANGE_TRUNCATE, REORDER_BUFFER_CHANGE_SEQUENCE -}; +} ReorderBufferChangeType; /* forward declaration */ struct ReorderBufferTXN; @@ -83,7 +83,7 @@ typedef struct ReorderBufferChange XLogRecPtr lsn; /* The type of change. */ - enum ReorderBufferChangeType action; + ReorderBufferChangeType action; /* Transaction this change belongs to. */ struct ReorderBufferTXN *txn; diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 6da1b220cd..3b4ab65ae2 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -161,7 +161,7 @@ typedef struct RelationData Bitmapset *rd_idattr; /* included in replica identity index */ Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */ - PublicationActions *rd_pubactions; /* publication actions */ + PublicationDesc *rd_pubdesc; /* publication descriptor, or NULL */ /* * rd_options is set whenever rd_rel is loaded into the relcache entry. diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h index 84d6afef19..2281a7dc53 100644 --- a/src/include/utils/relcache.h +++ b/src/include/utils/relcache.h @@ -74,8 +74,9 @@ extern void RelationGetExclusionInfo(Relation indexRelation, extern void RelationInitIndexAccessInfo(Relation relation); /* caller must include pg_publication.h */ -struct PublicationActions; -extern struct PublicationActions *GetRelationPublicationActions(Relation relation); +struct PublicationDesc; +extern void RelationBuildPublicationDesc(Relation relation, + struct PublicationDesc *pubdesc); extern void RelationInitTableAccessMethod(Relation relation); diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index b97f98cda7..3c382e520e 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -239,6 +239,358 @@ ALTER PUBLICATION testpub_forparted DROP TABLE testpub_parted; UPDATE testpub_parted2 SET a = 2; DROP TABLE testpub_parted1, testpub_parted2; DROP PUBLICATION testpub_forparted, testpub_forparted1; +-- Tests for row filters +CREATE TABLE testpub_rf_tbl1 (a integer, b text); +CREATE TABLE testpub_rf_tbl2 (c text, d integer); +CREATE TABLE testpub_rf_tbl3 (e integer); +CREATE TABLE testpub_rf_tbl4 (g text); +CREATE TABLE testpub_rf_tbl5 (a xml); +CREATE SCHEMA testpub_rf_schema1; +CREATE TABLE testpub_rf_schema1.testpub_rf_tbl5 (h integer); +CREATE SCHEMA testpub_rf_schema2; +CREATE TABLE testpub_rf_schema2.testpub_rf_tbl6 (i integer); +SET client_min_messages = 'ERROR'; +-- Firstly, test using the option publish='insert' because the row filter +-- validation of referenced columns is less strict than for delete/update. +CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) + +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) + "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) + +ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) + +-- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl3" WHERE ((e > 300) AND (e < 500)) + +-- test \d (now it displays filter information) +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); +CREATE PUBLICATION testpub_rf_no FOR TABLE testpub_rf_tbl1; +RESET client_min_messages; +\d testpub_rf_tbl1 + Table "public.testpub_rf_tbl1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | +Publications: + "testpub_rf_no" + "testpub_rf_yes" WHERE (a > 1) + +DROP PUBLICATION testpub_rf_yes, testpub_rf_no; +-- some more syntax tests to exercise other parser pathways +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub_syntax1 + Publication testpub_syntax1 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl3" WHERE (e < 999) + +DROP PUBLICATION testpub_syntax1; +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub_syntax2 + Publication testpub_syntax2 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "testpub_rf_schema1.testpub_rf_tbl5" WHERE (h < 999) + +DROP PUBLICATION testpub_syntax2; +-- fail - schemas don't allow WHERE clause +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax3 FOR ALL TABLES IN SCHEMA testpub_rf_schema1 WHERE (a = 123); +ERROR: syntax error at or near "WHERE" +LINE 1: ...ntax3 FOR ALL TABLES IN SCHEMA testpub_rf_schema1 WHERE (a =... + ^ +CREATE PUBLICATION testpub_syntax3 FOR ALL TABLES IN SCHEMA testpub_rf_schema1, testpub_rf_schema1 WHERE (a = 123); +ERROR: WHERE clause not allowed for schema +LINE 1: ...tax3 FOR ALL TABLES IN SCHEMA testpub_rf_schema1, testpub_rf... + ^ +RESET client_min_messages; +-- fail - duplicate tables are not allowed if that table has any WHERE clause +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1 WHERE (a = 1), testpub_rf_tbl1 WITH (publish = 'insert'); +ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" +CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1, testpub_rf_tbl1 WHERE (a = 2) WITH (publish = 'insert'); +ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" +RESET client_min_messages; +-- fail - publication WHERE clause must be boolean +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); +ERROR: argument of PUBLICATION WHERE must be type boolean, not type integer +LINE 1: ...PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); + ^ +-- fail - aggregate functions not allowed in WHERE clause +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); +ERROR: aggregate functions are not allowed in WHERE +LINE 1: ...ATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); + ^ +-- fail - user-defined operators are not allowed +CREATE FUNCTION testpub_rf_func1(integer, integer) RETURNS boolean AS $$ SELECT hashint4($1) > $2 $$ LANGUAGE SQL; +CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer); +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); +ERROR: invalid publication WHERE expression +LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); + ^ +DETAIL: User-defined operators are not allowed. +-- fail - user-defined functions are not allowed +CREATE FUNCTION testpub_rf_func2() RETURNS integer AS $$ BEGIN RETURN 123; END; $$ LANGUAGE plpgsql; +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf_func2()); +ERROR: invalid publication WHERE expression +LINE 1: ...ON testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf... + ^ +DETAIL: User-defined or built-in mutable functions are not allowed. +-- fail - non-immutable functions are not allowed. random() is volatile. +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); +ERROR: invalid publication WHERE expression +LINE 1: ...ION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); + ^ +DETAIL: User-defined or built-in mutable functions are not allowed. +-- fail - user-defined collations are not allowed +CREATE COLLATION user_collation FROM "C"; +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' COLLATE user_collation); +ERROR: invalid publication WHERE expression +LINE 1: ...ICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' CO... + ^ +DETAIL: User-defined collations are not allowed. +-- ok - NULLIF is allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1,2) = a); +-- ok - built-in operators are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS NULL); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a > 5) IS FALSE); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS DISTINCT FROM 5); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a, a + 1) < (2, 3)); +-- ok - built-in type coercions between two binary compatible datatypes are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (b::varchar < '2'); +-- ok - immutable built-in functions are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl4 WHERE (length(g) < 6); +-- fail - user-defined types are not allowed +CREATE TYPE rf_bug_status AS ENUM ('new', 'open', 'closed'); +CREATE TABLE rf_bug (id serial, description text, status rf_bug_status); +CREATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = 'open') WITH (publish = 'insert'); +ERROR: invalid publication WHERE expression +LINE 1: ...EATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = '... + ^ +DETAIL: User-defined types are not allowed. +DROP TABLE rf_bug; +DROP TYPE rf_bug_status; +-- fail - row filter expression is not simple +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELECT generate_series(1,5))); +ERROR: invalid publication WHERE expression +LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELE... + ^ +DETAIL: Expressions only allow columns, constants, built-in operators, built-in data types, built-in collations and immutable built-in functions. +-- fail - system columns are not allowed +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); +ERROR: invalid publication WHERE expression +LINE 1: ...tpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); + ^ +DETAIL: System columns are not allowed. +-- ok - conditional expressions are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (a IS DOCUMENT); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (xmlexists('//foo[text() = ''bar'']' PASSING BY VALUE a)); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1, 2) = a); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (CASE a WHEN 5 THEN true ELSE false END); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (COALESCE(b, 'foo') = 'foo'); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (GREATEST(a, 10) > 10); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IN (2, 4, 6)); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ARRAY[a] <@ ARRAY[2, 4, 6]); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ROW(a, 2) IS NULL); +-- fail - WHERE not allowed in DROP +ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl1 WHERE (e < 27); +ERROR: cannot use a WHERE clause when removing a table from a publication +-- fail - cannot ALTER SET table which is a member of a pre-existing schema +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub6 FOR ALL TABLES IN SCHEMA testpub_rf_schema2; +ALTER PUBLICATION testpub6 SET ALL TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99); +ERROR: cannot add relation "testpub_rf_schema2.testpub_rf_tbl6" to publication +DETAIL: Table's schema "testpub_rf_schema2" is already part of the publication or part of the specified schema list. +RESET client_min_messages; +DROP TABLE testpub_rf_tbl1; +DROP TABLE testpub_rf_tbl2; +DROP TABLE testpub_rf_tbl3; +DROP TABLE testpub_rf_tbl4; +DROP TABLE testpub_rf_tbl5; +DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; +DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; +DROP SCHEMA testpub_rf_schema1; +DROP SCHEMA testpub_rf_schema2; +DROP PUBLICATION testpub5; +DROP PUBLICATION testpub6; +DROP OPERATOR =#>(integer, integer); +DROP FUNCTION testpub_rf_func1(integer, integer); +DROP FUNCTION testpub_rf_func2(); +DROP COLLATION user_collation; +-- ====================================================== +-- More row filter tests for validating column references +CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); +CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); +CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); +CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); +ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); +-- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) +-- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk WHERE (a > 99); +RESET client_min_messages; +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (b > 99); +-- ok - "b" is a PK col +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- fail - "c" is not part of the PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (d > 99); +-- fail - "d" is not part of the PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- 1b. REPLICA IDENTITY is DEFAULT and table has no PK +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not part of REPLICA IDENTITY +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- Case 2. REPLICA IDENTITY FULL +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- ok - "c" is in REPLICA IDENTITY now even though not in PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- ok - "a" is in REPLICA IDENTITY now +UPDATE rf_tbl_abcd_nopk SET a = 1; +-- Case 3. REPLICA IDENTITY NOTHING +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); +-- fail - "a" is in PK but it is not part of REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- fail - "c" is not in PK and not in REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not in REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- Case 4. REPLICA IDENTITY INDEX +ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; +ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); +-- fail - "a" is in PK but it is not part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- ok - "c" is not in PK but it is part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (c > 99); +-- ok - "c" is part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_nopk SET a = 1; +-- Tests for partitioned table +-- set PUBLISH_VIA_PARTITION_ROOT to false and test row filter for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - cannot use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); +ERROR: cannot use publication WHERE clause for relation "rf_tbl_abcd_part_pk" +DETAIL: WHERE clause cannot be used for a partitioned table when publish_via_partition_root is false. +-- ok - can use row filter for partition +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (a > 99); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- set PUBLISH_VIA_PARTITION_ROOT to true and test row filter for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any row filter is +-- used for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +ERROR: cannot set publish_via_partition_root = false for publication "testpub6" +DETAIL: The publication contains a WHERE clause for a partitioned table "rf_tbl_abcd_part_pk" which is not allowed when publish_via_partition_root is false. +-- Now change the root filter to use a column "b" +-- (which is not in the replica identity) +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (b > 99); +-- ok - we don't have row filter for partitioned table. +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_part_pk_1" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- set PUBLISH_VIA_PARTITION_ROOT to true +-- can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (b > 99); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_part_pk_1" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +DROP PUBLICATION testpub6; +DROP TABLE rf_tbl_abcd_pk; +DROP TABLE rf_tbl_abcd_nopk; +DROP TABLE rf_tbl_abcd_part_pk; +-- ====================================================== -- Test cache invalidation FOR ALL TABLES publication SET client_min_messages = 'ERROR'; CREATE TABLE testpub_tbl4(a int); diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index 86c019bddb..3f04d34264 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -134,6 +134,242 @@ UPDATE testpub_parted2 SET a = 2; DROP TABLE testpub_parted1, testpub_parted2; DROP PUBLICATION testpub_forparted, testpub_forparted1; +-- Tests for row filters +CREATE TABLE testpub_rf_tbl1 (a integer, b text); +CREATE TABLE testpub_rf_tbl2 (c text, d integer); +CREATE TABLE testpub_rf_tbl3 (e integer); +CREATE TABLE testpub_rf_tbl4 (g text); +CREATE TABLE testpub_rf_tbl5 (a xml); +CREATE SCHEMA testpub_rf_schema1; +CREATE TABLE testpub_rf_schema1.testpub_rf_tbl5 (h integer); +CREATE SCHEMA testpub_rf_schema2; +CREATE TABLE testpub_rf_schema2.testpub_rf_tbl6 (i integer); +SET client_min_messages = 'ERROR'; +-- Firstly, test using the option publish='insert' because the row filter +-- validation of referenced columns is less strict than for delete/update. +CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub5 +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); +\dRp+ testpub5 +ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; +\dRp+ testpub5 +-- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); +\dRp+ testpub5 +-- test \d (now it displays filter information) +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); +CREATE PUBLICATION testpub_rf_no FOR TABLE testpub_rf_tbl1; +RESET client_min_messages; +\d testpub_rf_tbl1 +DROP PUBLICATION testpub_rf_yes, testpub_rf_no; +-- some more syntax tests to exercise other parser pathways +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub_syntax1 +DROP PUBLICATION testpub_syntax1; +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub_syntax2 +DROP PUBLICATION testpub_syntax2; +-- fail - schemas don't allow WHERE clause +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax3 FOR ALL TABLES IN SCHEMA testpub_rf_schema1 WHERE (a = 123); +CREATE PUBLICATION testpub_syntax3 FOR ALL TABLES IN SCHEMA testpub_rf_schema1, testpub_rf_schema1 WHERE (a = 123); +RESET client_min_messages; +-- fail - duplicate tables are not allowed if that table has any WHERE clause +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1 WHERE (a = 1), testpub_rf_tbl1 WITH (publish = 'insert'); +CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1, testpub_rf_tbl1 WHERE (a = 2) WITH (publish = 'insert'); +RESET client_min_messages; +-- fail - publication WHERE clause must be boolean +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); +-- fail - aggregate functions not allowed in WHERE clause +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); +-- fail - user-defined operators are not allowed +CREATE FUNCTION testpub_rf_func1(integer, integer) RETURNS boolean AS $$ SELECT hashint4($1) > $2 $$ LANGUAGE SQL; +CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer); +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); +-- fail - user-defined functions are not allowed +CREATE FUNCTION testpub_rf_func2() RETURNS integer AS $$ BEGIN RETURN 123; END; $$ LANGUAGE plpgsql; +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf_func2()); +-- fail - non-immutable functions are not allowed. random() is volatile. +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); +-- fail - user-defined collations are not allowed +CREATE COLLATION user_collation FROM "C"; +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' COLLATE user_collation); +-- ok - NULLIF is allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1,2) = a); +-- ok - built-in operators are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS NULL); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a > 5) IS FALSE); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS DISTINCT FROM 5); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a, a + 1) < (2, 3)); +-- ok - built-in type coercions between two binary compatible datatypes are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (b::varchar < '2'); +-- ok - immutable built-in functions are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl4 WHERE (length(g) < 6); +-- fail - user-defined types are not allowed +CREATE TYPE rf_bug_status AS ENUM ('new', 'open', 'closed'); +CREATE TABLE rf_bug (id serial, description text, status rf_bug_status); +CREATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = 'open') WITH (publish = 'insert'); +DROP TABLE rf_bug; +DROP TYPE rf_bug_status; +-- fail - row filter expression is not simple +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELECT generate_series(1,5))); +-- fail - system columns are not allowed +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); +-- ok - conditional expressions are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (a IS DOCUMENT); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (xmlexists('//foo[text() = ''bar'']' PASSING BY VALUE a)); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1, 2) = a); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (CASE a WHEN 5 THEN true ELSE false END); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (COALESCE(b, 'foo') = 'foo'); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (GREATEST(a, 10) > 10); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IN (2, 4, 6)); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ARRAY[a] <@ ARRAY[2, 4, 6]); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ROW(a, 2) IS NULL); +-- fail - WHERE not allowed in DROP +ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl1 WHERE (e < 27); +-- fail - cannot ALTER SET table which is a member of a pre-existing schema +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub6 FOR ALL TABLES IN SCHEMA testpub_rf_schema2; +ALTER PUBLICATION testpub6 SET ALL TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99); +RESET client_min_messages; + +DROP TABLE testpub_rf_tbl1; +DROP TABLE testpub_rf_tbl2; +DROP TABLE testpub_rf_tbl3; +DROP TABLE testpub_rf_tbl4; +DROP TABLE testpub_rf_tbl5; +DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; +DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; +DROP SCHEMA testpub_rf_schema1; +DROP SCHEMA testpub_rf_schema2; +DROP PUBLICATION testpub5; +DROP PUBLICATION testpub6; +DROP OPERATOR =#>(integer, integer); +DROP FUNCTION testpub_rf_func1(integer, integer); +DROP FUNCTION testpub_rf_func2(); +DROP COLLATION user_collation; + +-- ====================================================== +-- More row filter tests for validating column references +CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); +CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); +CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); +CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); +ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); + +-- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) +-- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk WHERE (a > 99); +RESET client_min_messages; +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (b > 99); +-- ok - "b" is a PK col +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- fail - "c" is not part of the PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (d > 99); +-- fail - "d" is not part of the PK +UPDATE rf_tbl_abcd_pk SET a = 1; +-- 1b. REPLICA IDENTITY is DEFAULT and table has no PK +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not part of REPLICA IDENTITY +UPDATE rf_tbl_abcd_nopk SET a = 1; + +-- Case 2. REPLICA IDENTITY FULL +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- ok - "c" is in REPLICA IDENTITY now even though not in PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- ok - "a" is in REPLICA IDENTITY now +UPDATE rf_tbl_abcd_nopk SET a = 1; + +-- Case 3. REPLICA IDENTITY NOTHING +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); +-- fail - "a" is in PK but it is not part of REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- fail - "c" is not in PK and not in REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not in REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_nopk SET a = 1; + +-- Case 4. REPLICA IDENTITY INDEX +ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; +ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); +-- fail - "a" is in PK but it is not part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- ok - "c" is not in PK but it is part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_nopk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (c > 99); +-- ok - "c" is part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_nopk SET a = 1; + +-- Tests for partitioned table + +-- set PUBLISH_VIA_PARTITION_ROOT to false and test row filter for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - cannot use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); +-- ok - can use row filter for partition +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (a > 99); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- set PUBLISH_VIA_PARTITION_ROOT to true and test row filter for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any row filter is +-- used for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- Now change the root filter to use a column "b" +-- (which is not in the replica identity) +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (b > 99); +-- ok - we don't have row filter for partitioned table. +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- set PUBLISH_VIA_PARTITION_ROOT to true +-- can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (b > 99); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; + +DROP PUBLICATION testpub6; +DROP TABLE rf_tbl_abcd_pk; +DROP TABLE rf_tbl_abcd_nopk; +DROP TABLE rf_tbl_abcd_part_pk; +-- ====================================================== + -- Test cache invalidation FOR ALL TABLES publication SET client_min_messages = 'ERROR'; CREATE TABLE testpub_tbl4(a int); diff --git a/src/test/subscription/t/028_row_filter.pl b/src/test/subscription/t/028_row_filter.pl new file mode 100644 index 0000000000..88dc865829 --- /dev/null +++ b/src/test/subscription/t/028_row_filter.pl @@ -0,0 +1,695 @@ +# Copyright (c) 2021-2022, PostgreSQL Global Development Group + +# Test logical replication behavior with row filtering +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +# create publisher node +my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +# create subscriber node +my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +my $synced_query = + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; + +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; +my $appname = 'tap_sub'; + +# ==================================================================== +# Testcase start: FOR ALL TABLES +# +# The FOR ALL TABLES test must come first so that it is not affected by +# all the other test tables that are later created. + +# create tables pub and sub +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rf_x (x int primary key)"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rf_x (x int primary key)"); + +# insert some initial data +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rf_x (x) VALUES (0), (5), (10), (15), (20)"); + +# create pub/sub +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_x FOR TABLE tab_rf_x WHERE (x > 10)"); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_forall FOR ALL TABLES"); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_x, tap_pub_forall" +); + +$node_publisher->wait_for_catchup($appname); +# wait for initial table synchronization to finish +$node_subscriber->poll_query_until('postgres', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; + +# The subscription of the FOR ALL TABLES publication means there should be no +# filtering on the tablesync COPY, so all expect all 5 will be present. +my $result = + $node_subscriber->safe_psql('postgres', "SELECT count(x) FROM tab_rf_x"); +is($result, qq(5), + 'check initial data copy from table tab_rf_x should not be filtered'); + +# Similarly, the table filter for tab_rf_x (after the initial phase) has no +# effect when combined with the ALL TABLES. +# Expected: 5 initial rows + 2 new rows = 7 rows +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rf_x (x) VALUES (-99), (99)"); +$node_publisher->wait_for_catchup($appname); +$result = + $node_subscriber->safe_psql('postgres', "SELECT count(x) FROM tab_rf_x"); +is($result, qq(7), 'check table tab_rf_x should not be filtered'); + +# cleanup pub +$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_forall"); +$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_x"); +$node_publisher->safe_psql('postgres', "DROP TABLE tab_rf_x"); +# cleanup sub +$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub"); +$node_subscriber->safe_psql('postgres', "DROP TABLE tab_rf_x"); + +# Testcase end: FOR ALL TABLES +# ==================================================================== + +# ==================================================================== +# Testcase start: ALL TABLES IN SCHEMA +# +# The ALL TABLES IN SCHEMA test is independent of all other test cases so it +# cleans up after itself. + +# create tables pub and sub +$node_publisher->safe_psql('postgres', "CREATE SCHEMA schema_rf_x"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE schema_rf_x.tab_rf_x (x int primary key)"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE schema_rf_x.tab_rf_partitioned (x int primary key) PARTITION BY RANGE(x)" +); +$node_publisher->safe_psql('postgres', + "CREATE TABLE public.tab_rf_partition (LIKE schema_rf_x.tab_rf_partitioned)" +); +$node_publisher->safe_psql('postgres', + "ALTER TABLE schema_rf_x.tab_rf_partitioned ATTACH PARTITION public.tab_rf_partition DEFAULT" +); +$node_subscriber->safe_psql('postgres', "CREATE SCHEMA schema_rf_x"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE schema_rf_x.tab_rf_x (x int primary key)"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE schema_rf_x.tab_rf_partitioned (x int primary key) PARTITION BY RANGE(x)" +); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE public.tab_rf_partition (LIKE schema_rf_x.tab_rf_partitioned)" +); +$node_subscriber->safe_psql('postgres', + "ALTER TABLE schema_rf_x.tab_rf_partitioned ATTACH PARTITION public.tab_rf_partition DEFAULT" +); + +# insert some initial data +$node_publisher->safe_psql('postgres', + "INSERT INTO schema_rf_x.tab_rf_x (x) VALUES (0), (5), (10), (15), (20)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO schema_rf_x.tab_rf_partitioned (x) VALUES (1), (20)"); + +# create pub/sub +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_x FOR TABLE schema_rf_x.tab_rf_x WHERE (x > 10)" +); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_allinschema FOR ALL TABLES IN SCHEMA schema_rf_x" +); +$node_publisher->safe_psql('postgres', + "ALTER PUBLICATION tap_pub_allinschema ADD TABLE public.tab_rf_partition WHERE (x > 10)" +); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_x, tap_pub_allinschema" +); + +$node_publisher->wait_for_catchup($appname); +# wait for initial table synchronization to finish +$node_subscriber->poll_query_until('postgres', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; + +# The subscription of the ALL TABLES IN SCHEMA publication means there should be +# no filtering on the tablesync COPY, so expect all 5 will be present. +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(x) FROM schema_rf_x.tab_rf_x"); +is($result, qq(5), + 'check initial data copy from table tab_rf_x should not be filtered'); + +# Similarly, the table filter for tab_rf_x (after the initial phase) has no +# effect when combined with the ALL TABLES IN SCHEMA. Meanwhile, the filter for +# the tab_rf_partition does work because that partition belongs to a different +# schema (and publish_via_partition_root = false). +# Expected: +# tab_rf_x : 5 initial rows + 2 new rows = 7 rows +# tab_rf_partition : 1 initial row + 1 new row = 2 rows +$node_publisher->safe_psql('postgres', + "INSERT INTO schema_rf_x.tab_rf_x (x) VALUES (-99), (99)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO schema_rf_x.tab_rf_partitioned (x) VALUES (5), (25)"); +$node_publisher->wait_for_catchup($appname); +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(x) FROM schema_rf_x.tab_rf_x"); +is($result, qq(7), 'check table tab_rf_x should not be filtered'); +$result = $node_subscriber->safe_psql('postgres', + "SELECT * FROM public.tab_rf_partition"); +is( $result, qq(20 +25), 'check table tab_rf_partition should be filtered'); + +# cleanup pub +$node_publisher->safe_psql('postgres', + "DROP PUBLICATION tap_pub_allinschema"); +$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_x"); +$node_publisher->safe_psql('postgres', "DROP TABLE public.tab_rf_partition"); +$node_publisher->safe_psql('postgres', + "DROP TABLE schema_rf_x.tab_rf_partitioned"); +$node_publisher->safe_psql('postgres', "DROP TABLE schema_rf_x.tab_rf_x"); +$node_publisher->safe_psql('postgres', "DROP SCHEMA schema_rf_x"); +# cleanup sub +$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub"); +$node_subscriber->safe_psql('postgres', "DROP TABLE public.tab_rf_partition"); +$node_subscriber->safe_psql('postgres', + "DROP TABLE schema_rf_x.tab_rf_partitioned"); +$node_subscriber->safe_psql('postgres', "DROP TABLE schema_rf_x.tab_rf_x"); +$node_subscriber->safe_psql('postgres', "DROP SCHEMA schema_rf_x"); + +# Testcase end: ALL TABLES IN SCHEMA +# ==================================================================== + +# ====================================================== +# Testcase start: FOR TABLE with row filter publications + +# setup structure on publisher +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_1 (a int primary key, b text)"); +$node_publisher->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_1 REPLICA IDENTITY FULL;"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_2 (c int primary key)"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_3 (a int primary key, b boolean)"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_4 (c int primary key)"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_partitioned (a int primary key, b integer) PARTITION BY RANGE(a)" +); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_less_10k (LIKE tab_rowfilter_partitioned)"); +$node_publisher->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_less_10k FOR VALUES FROM (MINVALUE) TO (10000)" +); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_greater_10k (LIKE tab_rowfilter_partitioned)" +); +$node_publisher->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_greater_10k FOR VALUES FROM (10000) TO (MAXVALUE)" +); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_partitioned_2 (a int primary key, b integer) PARTITION BY RANGE(a)" +); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_partition (LIKE tab_rowfilter_partitioned_2)" +); +$node_publisher->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_partitioned_2 ATTACH PARTITION tab_rowfilter_partition DEFAULT" +); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_toast (a text NOT NULL, b text NOT NULL)"); +$node_publisher->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_toast ALTER COLUMN a SET STORAGE EXTERNAL"); +$node_publisher->safe_psql('postgres', + "CREATE UNIQUE INDEX tab_rowfilter_toast_ri_index on tab_rowfilter_toast (a, b)" +); +$node_publisher->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_toast REPLICA IDENTITY USING INDEX tab_rowfilter_toast_ri_index" +); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_inherited (a int)"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_child (b text) INHERITS (tab_rowfilter_inherited)" +); + +# setup structure on subscriber +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_1 (a int primary key, b text)"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_2 (c int primary key)"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_3 (a int primary key, b boolean)"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_4 (c int primary key)"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_partitioned (a int primary key, b integer) PARTITION BY RANGE(a)" +); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_less_10k (LIKE tab_rowfilter_partitioned)"); +$node_subscriber->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_less_10k FOR VALUES FROM (MINVALUE) TO (10000)" +); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_greater_10k (LIKE tab_rowfilter_partitioned)" +); +$node_subscriber->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_greater_10k FOR VALUES FROM (10000) TO (MAXVALUE)" +); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_partitioned_2 (a int primary key, b integer) PARTITION BY RANGE(a)" +); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_partition (LIKE tab_rowfilter_partitioned_2)" +); +$node_subscriber->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_partitioned_2 ATTACH PARTITION tab_rowfilter_partition DEFAULT" +); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_toast (a text NOT NULL, b text NOT NULL)"); +$node_subscriber->safe_psql('postgres', + "CREATE UNIQUE INDEX tab_rowfilter_toast_ri_index on tab_rowfilter_toast (a, b)" +); +$node_subscriber->safe_psql('postgres', + "ALTER TABLE tab_rowfilter_toast REPLICA IDENTITY USING INDEX tab_rowfilter_toast_ri_index" +); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_inherited (a int)"); +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_rowfilter_child (b text) INHERITS (tab_rowfilter_inherited)" +); + +# setup logical replication +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_1 FOR TABLE tab_rowfilter_1 WHERE (a > 1000 AND b <> 'filtered')" +); + +$node_publisher->safe_psql('postgres', + "ALTER PUBLICATION tap_pub_1 ADD TABLE tab_rowfilter_2 WHERE (c % 7 = 0)" +); + +$node_publisher->safe_psql('postgres', + "ALTER PUBLICATION tap_pub_1 SET TABLE tab_rowfilter_1 WHERE (a > 1000 AND b <> 'filtered'), tab_rowfilter_2 WHERE (c % 2 = 0), tab_rowfilter_3" +); + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_2 FOR TABLE tab_rowfilter_2 WHERE (c % 3 = 0)" +); + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_3 FOR TABLE tab_rowfilter_partitioned"); +$node_publisher->safe_psql('postgres', + "ALTER PUBLICATION tap_pub_3 ADD TABLE tab_rowfilter_less_10k WHERE (a < 6000)" +); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_not_used FOR TABLE tab_rowfilter_1 WHERE (a < 0)" +); + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_4a FOR TABLE tab_rowfilter_4 WHERE (c % 2 = 0)" +); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_4b FOR TABLE tab_rowfilter_4"); + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_5a FOR TABLE tab_rowfilter_partitioned_2"); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_5b FOR TABLE tab_rowfilter_partition WHERE (a > 10)" +); + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_toast FOR TABLE tab_rowfilter_toast WHERE (a = repeat('1234567890', 200) AND b < '10')" +); + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub_inherits FOR TABLE tab_rowfilter_inherited WHERE (a > 15)" +); + +# +# The following INSERTs are executed before the CREATE SUBSCRIPTION, so these +# SQL commands are for testing the initial data copy using logical replication. +# +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1, 'not replicated')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1500, 'filtered')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1980, 'not filtered')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) SELECT x, 'test ' || x FROM generate_series(990,1002) x" +); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_2 (c) SELECT generate_series(1, 20)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_3 (a, b) SELECT x, (x % 3 = 0) FROM generate_series(1, 10) x" +); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_4 (c) SELECT generate_series(1, 10)"); + +# insert data into partitioned table and directly on the partition +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_partitioned (a, b) VALUES(1, 100),(7000, 101),(15000, 102),(5500, 300)" +); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_less_10k (a, b) VALUES(2, 200),(6005, 201)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_greater_10k (a, b) VALUES(16000, 103)"); + +# insert data into partitioned table. +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_partitioned_2 (a, b) VALUES(1, 1),(20, 20)"); + +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_toast(a, b) VALUES(repeat('1234567890', 200), '1234567890')" +); + +# insert data into parent and child table. +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_inherited(a) VALUES(10),(20)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_child(a, b) VALUES(0,'0'),(30,'30'),(40,'40')" +); + +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_1, tap_pub_2, tap_pub_3, tap_pub_4a, tap_pub_4b, tap_pub_5a, tap_pub_5b, tap_pub_toast, tap_pub_inherits" +); + +$node_publisher->wait_for_catchup($appname); + +# wait for initial table synchronization to finish +$node_subscriber->poll_query_until('postgres', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; + +# Check expected replicated rows for tab_rowfilter_1 +# tap_pub_1 filter is: (a > 1000 AND b <> 'filtered') +# - INSERT (1, 'not replicated') NO, because a is not > 1000 +# - INSERT (1500, 'filtered') NO, because b == 'filtered' +# - INSERT (1980, 'not filtered') YES +# - generate_series(990,1002) YES, only for 1001,1002 because a > 1000 +# +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a, b FROM tab_rowfilter_1 ORDER BY 1, 2"); +is( $result, qq(1001|test 1001 +1002|test 1002 +1980|not filtered), 'check initial data copy from table tab_rowfilter_1'); + +# Check expected replicated rows for tab_rowfilter_2 +# tap_pub_1 filter is: (c % 2 = 0) +# tap_pub_2 filter is: (c % 3 = 0) +# When there are multiple publications for the same table, the filters +# expressions are OR'ed together. In this case, rows are replicated if +# c value is divided by 2 OR 3 (2, 3, 4, 6, 8, 9, 10, 12, 14, 15, 16, 18, 20) +# +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(c), min(c), max(c) FROM tab_rowfilter_2"); +is($result, qq(13|2|20), + 'check initial data copy from table tab_rowfilter_2'); + +# Check expected replicated rows for tab_rowfilter_4 +# (same table in two publications but only one has a filter). +# tap_pub_4a filter is: (c % 2 = 0) +# tap_pub_4b filter is: +# Expressions are OR'ed together but when there is no filter it just means +# OR everything - e.g. same as no filter at all. +# Expect all rows: (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(c), min(c), max(c) FROM tab_rowfilter_4"); +is($result, qq(10|1|10), + 'check initial data copy from table tab_rowfilter_4'); + +# Check expected replicated rows for tab_rowfilter_3 +# There is no filter. 10 rows are inserted, so 10 rows are replicated. +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(a) FROM tab_rowfilter_3"); +is($result, qq(10), 'check initial data copy from table tab_rowfilter_3'); + +# Check expected replicated rows for partitions +# publication option publish_via_partition_root is false so use the row filter +# from a partition +# tab_rowfilter_partitioned filter: (a < 5000) +# tab_rowfilter_less_10k filter: (a < 6000) +# tab_rowfilter_greater_10k filter: no filter +# +# INSERT into tab_rowfilter_partitioned: +# - INSERT (1,100) YES, because 1 < 6000 +# - INSERT (7000, 101) NO, because 7000 is not < 6000 +# - INSERT (15000, 102) YES, because tab_rowfilter_greater_10k has no filter +# - INSERT (5500, 300) YES, because 5500 < 6000 +# +# INSERT directly into tab_rowfilter_less_10k: +# - INSERT (2, 200) YES, because 2 < 6000 +# - INSERT (6005, 201) NO, because 6005 is not < 6000 +# +# INSERT directly into tab_rowfilter_greater_10k: +# - INSERT (16000, 103) YES, because tab_rowfilter_greater_10k has no filter +# +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a, b FROM tab_rowfilter_less_10k ORDER BY 1, 2"); +is( $result, qq(1|100 +2|200 +5500|300), 'check initial data copy from partition tab_rowfilter_less_10k'); + +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a, b FROM tab_rowfilter_greater_10k ORDER BY 1, 2"); +is( $result, qq(15000|102 +16000|103), 'check initial data copy from partition tab_rowfilter_greater_10k' +); + +# Check expected replicated rows for partitions +# publication option publish_via_partition_root is false so use the row filter +# from a partition +# tap_pub_5a filter: +# tap_pub_5b filter: (a > 10) +# The parent table for this partition is published via tap_pub_5a, so there is +# no filter for the partition. And expressions are OR'ed together so it means +# OR everything - e.g. same as no filter at all. +# Expect all rows: (1, 1) and (20, 20) +# +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a, b FROM tab_rowfilter_partition ORDER BY 1, 2"); +is( $result, qq(1|1 +20|20), 'check initial data copy from partition tab_rowfilter_partition'); + +# Check expected replicated rows for tab_rowfilter_toast +# tab_rowfilter_toast filter: (a = repeat('1234567890', 200) AND b < '10') +# INSERT (repeat('1234567890', 200) ,'1234567890') NO +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(*) FROM tab_rowfilter_toast"); +is($result, qq(0), 'check initial data copy from table tab_rowfilter_toast'); + +# Check expected replicated rows for tab_rowfilter_inherited +# tab_rowfilter_inherited filter is: (a > 15) +# - INSERT (10) NO, 10 < 15 +# - INSERT (20) YES, 20 > 15 +# - INSERT (0, '0') NO, 0 < 15 +# - INSERT (30, '30') YES, 30 > 15 +# - INSERT (40, '40') YES, 40 > 15 +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a FROM tab_rowfilter_inherited ORDER BY a"); +is( $result, qq(20 +30 +40), 'check initial data copy from table tab_rowfilter_inherited'); + +# The following commands are executed after CREATE SUBSCRIPTION, so these SQL +# commands are for testing normal logical replication behavior. +# +# test row filter (INSERT, UPDATE, DELETE) +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (800, 'test 800')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1600, 'test 1600')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1601, 'test 1601')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1602, 'filtered')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1700, 'test 1700')"); +$node_publisher->safe_psql('postgres', + "UPDATE tab_rowfilter_1 SET b = NULL WHERE a = 1600"); +$node_publisher->safe_psql('postgres', + "UPDATE tab_rowfilter_1 SET b = 'test 1601 updated' WHERE a = 1601"); +$node_publisher->safe_psql('postgres', + "UPDATE tab_rowfilter_1 SET b = 'test 1602 updated' WHERE a = 1602"); +$node_publisher->safe_psql('postgres', + "DELETE FROM tab_rowfilter_1 WHERE a = 1700"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_2 (c) VALUES (21), (22), (23), (24), (25)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_4 (c) VALUES (0), (11), (12)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_inherited (a) VALUES (14), (16)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_child (a, b) VALUES (13, '13'), (17, '17')"); + +$node_publisher->wait_for_catchup($appname); + +# Check expected replicated rows for tab_rowfilter_2 +# tap_pub_1 filter is: (c % 2 = 0) +# tap_pub_2 filter is: (c % 3 = 0) +# When there are multiple publications for the same table, the filters +# expressions are OR'ed together. In this case, rows are replicated if +# c value is divided by 2 OR 3. +# +# Expect original rows (2, 3, 4, 6, 8, 9, 10, 12, 14, 15, 16, 18, 20) +# Plus (21, 22, 24) +# +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(c), min(c), max(c) FROM tab_rowfilter_2"); +is($result, qq(16|2|24), 'check replicated rows to tab_rowfilter_2'); + +# Check expected replicated rows for tab_rowfilter_4 +# (same table in two publications but only one has a filter). +# tap_pub_4a filter is: (c % 2 = 0) +# tap_pub_4b filter is: +# Expressions are OR'ed together but when there is no filter it just means +# OR everything - e.g. same as no filter at all. +# Expect all rows from initial copy: (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) +# And also (0, 11, 12) +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(c), min(c), max(c) FROM tab_rowfilter_4"); +is($result, qq(13|0|12), 'check replicated rows to tab_rowfilter_4'); + +# Check expected replicated rows for tab_rowfilter_1 +# tap_pub_1 filter is: (a > 1000 AND b <> 'filtered') +# +# - 1001, 1002, 1980 already exist from initial data copy +# - INSERT (800, 'test 800') NO, because 800 is not > 1000 +# - INSERT (1600, 'test 1600') YES, because 1600 > 1000 and 'test 1600' <> 'filtered', +# but row deleted after the update below. +# - INSERT (1601, 'test 1601') YES, because 1601 > 1000 and 'test 1601' <> 'filtered' +# - INSERT (1602, 'filtered') NO, because b == 'filtered' +# - INSERT (1700, 'test 1700') YES, because 1700 > 1000 and 'test 1700' <> 'filtered' +# - UPDATE (1600, NULL) NO, row filter evaluates to false because NULL is not <> 'filtered' +# - UPDATE (1601, 'test 1601 updated') YES, because 1601 > 1000 and 'test 1601 updated' <> 'filtered' +# - UPDATE (1602, 'test 1602 updated') YES, because 1602 > 1000 and 'test 1602 updated' <> 'filtered' +# - DELETE (1700) YES, because 1700 > 1000 and 'test 1700' <> 'filtered' +# +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a, b FROM tab_rowfilter_1 ORDER BY 1, 2"); +is( $result, qq(1001|test 1001 +1002|test 1002 +1601|test 1601 updated +1602|test 1602 updated +1980|not filtered), 'check replicated rows to table tab_rowfilter_1'); + +# Publish using root partitioned table +# Use a different partitioned table layout (exercise publish_via_partition_root) +$node_publisher->safe_psql('postgres', + "ALTER PUBLICATION tap_pub_3 SET (publish_via_partition_root = true)"); +$node_publisher->safe_psql('postgres', + "ALTER PUBLICATION tap_pub_3 SET TABLE tab_rowfilter_partitioned WHERE (a < 5000), tab_rowfilter_less_10k WHERE (a < 6000)" +); +$node_subscriber->safe_psql('postgres', + "TRUNCATE TABLE tab_rowfilter_partitioned"); +$node_subscriber->safe_psql('postgres', + "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = true)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_partitioned (a, b) VALUES(4000, 400),(4001, 401),(4002, 402)" +); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_less_10k (a, b) VALUES(4500, 450)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_less_10k (a, b) VALUES(5600, 123)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_rowfilter_greater_10k (a, b) VALUES(14000, 1950)"); +$node_publisher->safe_psql('postgres', + "UPDATE tab_rowfilter_less_10k SET b = 30 WHERE a = 4001"); +$node_publisher->safe_psql('postgres', + "DELETE FROM tab_rowfilter_less_10k WHERE a = 4002"); + +$node_publisher->wait_for_catchup($appname); + +# Check expected replicated rows for partitions +# publication option publish_via_partition_root is true so use the row filter +# from the root partitioned table +# tab_rowfilter_partitioned filter: (a < 5000) +# tab_rowfilter_less_10k filter: (a < 6000) +# tab_rowfilter_greater_10k filter: no filter +# +# After TRUNCATE, REFRESH PUBLICATION, the initial data copy will apply the +# partitioned table row filter. +# - INSERT (1, 100) YES, 1 < 5000 +# - INSERT (7000, 101) NO, 7000 is not < 5000 +# - INSERT (15000, 102) NO, 15000 is not < 5000 +# - INSERT (5500, 300) NO, 5500 is not < 5000 +# - INSERT (2, 200) YES, 2 < 5000 +# - INSERT (6005, 201) NO, 6005 is not < 5000 +# - INSERT (16000, 103) NO, 16000 is not < 5000 +# +# Execute SQL commands after initial data copy for testing the logical +# replication behavior. +# - INSERT (4000, 400) YES, 4000 < 5000 +# - INSERT (4001, 401) YES, 4001 < 5000 +# - INSERT (4002, 402) YES, 4002 < 5000 +# - INSERT (4500, 450) YES, 4500 < 5000 +# - INSERT (5600, 123) NO, 5600 is not < 5000 +# - INSERT (14000, 1950) NO, 16000 is not < 5000 +# - UPDATE (4001) YES, 4001 < 5000 +# - DELETE (4002) YES, 4002 < 5000 +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a, b FROM tab_rowfilter_partitioned ORDER BY 1, 2"); +is( $result, qq(1|100 +2|200 +4000|400 +4001|30 +4500|450), 'check publish_via_partition_root behavior'); + +# Check expected replicated rows for tab_rowfilter_inherited and +# tab_rowfilter_child. +# tab_rowfilter_inherited filter is: (a > 15) +# - INSERT (14) NO, 14 < 15 +# - INSERT (16) YES, 16 > 15 +# +# tab_rowfilter_child filter is: (a > 15) +# - INSERT (13, '13') NO, 13 < 15 +# - INSERT (17, '17') YES, 17 > 15 + +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a FROM tab_rowfilter_inherited ORDER BY a"); +is( $result, qq(16 +17 +20 +30 +40), + 'check replicated rows to tab_rowfilter_inherited and tab_rowfilter_child' +); + +# UPDATE the non-toasted column for table tab_rowfilter_toast +$node_publisher->safe_psql('postgres', + "UPDATE tab_rowfilter_toast SET b = '1'"); + +# Check expected replicated rows for tab_rowfilter_toast +# tab_rowfilter_toast filter: (a = repeat('1234567890', 200) AND b < '10') +# UPDATE old (repeat('1234567890', 200) ,'1234567890') NO +# new: (repeat('1234567890', 200) ,'1') YES +$result = + $node_subscriber->safe_psql('postgres', + "SELECT a = repeat('1234567890', 200), b FROM tab_rowfilter_toast"); +is($result, qq(t|1), 'check replicated rows to tab_rowfilter_toast'); + +# Testcase end: FOR TABLE with row filter publications +# ====================================================== + +$node_subscriber->stop('fast'); +$node_publisher->stop('fast'); + +done_testing(); diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 15684f53ba..c6b302c7b2 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2053,6 +2053,7 @@ PsqlScanStateData PsqlSettings Publication PublicationActions +PublicationDesc PublicationInfo PublicationObjSpec PublicationObjSpecType @@ -2199,6 +2200,7 @@ ReorderBufferApplyChangeCB ReorderBufferApplyTruncateCB ReorderBufferBeginCB ReorderBufferChange +ReorderBufferChangeType ReorderBufferCommitCB ReorderBufferCommitPreparedCB ReorderBufferDiskChange @@ -3506,6 +3508,7 @@ replace_rte_variables_context ret_type rewind_source rewrite_event +rf_context rijndael_ctx rm_detail_t role_auth_extra From 9467321649efc1fec28603d4ba35d03202c4ead1 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 22 Feb 2022 10:08:11 +0100 Subject: [PATCH 036/108] Put typtype letters back into consistent order --- src/test/regress/expected/type_sanity.out | 2 +- src/test/regress/sql/type_sanity.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/type_sanity.out b/src/test/regress/expected/type_sanity.out index 81c39a0112..d3ac08c9ee 100644 --- a/src/test/regress/expected/type_sanity.out +++ b/src/test/regress/expected/type_sanity.out @@ -17,7 +17,7 @@ SELECT t1.oid, t1.typname FROM pg_type as t1 WHERE t1.typnamespace = 0 OR (t1.typlen <= 0 AND t1.typlen != -1 AND t1.typlen != -2) OR - (t1.typtype not in ('b', 'c', 'd', 'e', 'p', 'r', 'm')) OR + (t1.typtype not in ('b', 'c', 'd', 'e', 'm', 'p', 'r')) OR NOT t1.typisdefined OR (t1.typalign not in ('c', 's', 'i', 'd')) OR (t1.typstorage not in ('p', 'x', 'e', 'm')); diff --git a/src/test/regress/sql/type_sanity.sql b/src/test/regress/sql/type_sanity.sql index 50a885eb44..5edc1f1f6e 100644 --- a/src/test/regress/sql/type_sanity.sql +++ b/src/test/regress/sql/type_sanity.sql @@ -20,7 +20,7 @@ SELECT t1.oid, t1.typname FROM pg_type as t1 WHERE t1.typnamespace = 0 OR (t1.typlen <= 0 AND t1.typlen != -1 AND t1.typlen != -2) OR - (t1.typtype not in ('b', 'c', 'd', 'e', 'p', 'r', 'm')) OR + (t1.typtype not in ('b', 'c', 'd', 'e', 'm', 'p', 'r')) OR NOT t1.typisdefined OR (t1.typalign not in ('c', 's', 'i', 'd')) OR (t1.typstorage not in ('p', 'x', 'e', 'm')); From afdeff10526e29e3fc63b18c08100458780489d9 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Tue, 22 Feb 2022 18:02:34 -0800 Subject: [PATCH 037/108] Add temporary debug info to help debug 019_replslot_limit.pl failures. I have not been able to reproduce the occasional failures of 019_replslot_limit.pl we are seeing in the buildfarm and not for lack of trying. The additional logging and increased log level will hopefully help. Will be reverted once the cause is identified. Discussion: https://postgr.es/m/20220218231415.c4plkp4i3reqcwip@alap3.anarazel.de --- src/backend/replication/slot.c | 21 +++++++++++++++++++++ src/bin/pg_basebackup/pg_basebackup.c | 10 +++++++++- src/test/recovery/t/019_replslot_limit.pl | 5 ++++- 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 5da5fa825a..3d39fddaae 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -177,6 +177,10 @@ ReplicationSlotInitialize(void) static void ReplicationSlotShmemExit(int code, Datum arg) { + /* temp debugging aid to analyze 019_replslot_limit failures */ + elog(DEBUG3, "replication slot exit hook, %s active slot", + MyReplicationSlot != NULL ? "with" : "without"); + /* Make sure active replication slots are released */ if (MyReplicationSlot != NULL) ReplicationSlotRelease(); @@ -554,6 +558,9 @@ ReplicationSlotCleanup(void) Assert(MyReplicationSlot == NULL); restart: + /* temp debugging aid to analyze 019_replslot_limit failures */ + elog(DEBUG3, "temporary replication slot cleanup: begin"); + LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); for (i = 0; i < max_replication_slots; i++) { @@ -579,6 +586,8 @@ ReplicationSlotCleanup(void) } LWLockRelease(ReplicationSlotControlLock); + + elog(DEBUG3, "temporary replication slot cleanup: done"); } /* @@ -1284,6 +1293,12 @@ InvalidatePossiblyObsoleteSlot(ReplicationSlot *s, XLogRecPtr oldestLSN, (void) kill(active_pid, SIGTERM); last_signaled_pid = active_pid; } + else + { + /* temp debugging aid to analyze 019_replslot_limit failures */ + elog(DEBUG3, "not signalling process %d during invalidation of slot \"%s\"", + active_pid, NameStr(slotname)); + } /* Wait until the slot is released. */ ConditionVariableSleep(&s->active_cv, @@ -1347,6 +1362,10 @@ InvalidateObsoleteReplicationSlots(XLogSegNo oldestSegno) XLogSegNoOffsetToRecPtr(oldestSegno, 0, wal_segment_size, oldestLSN); restart: + /* temp debugging aid to analyze 019_replslot_limit failures */ + elog(DEBUG3, "begin invalidating obsolete replication slots older than %X/%X", + LSN_FORMAT_ARGS(oldestLSN)); + LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); for (int i = 0; i < max_replication_slots; i++) { @@ -1372,6 +1391,8 @@ InvalidateObsoleteReplicationSlots(XLogSegNo oldestSegno) ReplicationSlotsComputeRequiredLSN(); } + elog(DEBUG3, "done invalidating obsolete replication slots"); + return invalidated; } diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 08b07d5a06..8c77c533e6 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -700,8 +700,16 @@ StartLogStreamer(char *startpos, uint32 timeline, char *sysidentifier) bgchild = fork(); if (bgchild == 0) { + int ret; + /* in child process */ - exit(LogStreamerMain(param)); + ret = LogStreamerMain(param); + + /* temp debugging aid to analyze 019_replslot_limit failures */ + if (verbose) + pg_log_info("log streamer with pid %d exiting", getpid()); + + exit(ret); } else if (bgchild < 0) { diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index 4257bd4d35..0c9da9bf27 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -316,13 +316,16 @@ max_wal_size = 2MB log_checkpoints = yes max_slot_wal_keep_size = 1MB + + # temp debugging aid to analyze 019_replslot_limit failures + log_min_messages=debug3 )); $node_primary3->start; $node_primary3->safe_psql('postgres', "SELECT pg_create_physical_replication_slot('rep3')"); # Take backup $backup_name = 'my_backup'; -$node_primary3->backup($backup_name); +$node_primary3->backup($backup_name, backup_options => ['--verbose']); # Create standby my $node_standby3 = PostgreSQL::Test::Cluster->new('standby_3'); $node_standby3->init_from_backup($node_primary3, $backup_name, From 2313a3ee22eb3c63a987b496df64c67443763a5a Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Wed, 23 Feb 2022 10:54:03 +0100 Subject: [PATCH 038/108] Fix statenames in mergejoin comments The names in the comments were on a few states not consistent with the documented state. Author: Zhihong Yu Discussion: https://postgr.es/m/CALNJ-vQVthfQXVqmrHR8BKHtC4fMGbhM1xbvJNJAPexTq_dH=w@mail.gmail.com --- src/backend/executor/nodeMergejoin.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index a049bc4ae0..edb8972c5b 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -1143,7 +1143,7 @@ ExecMergeJoin(PlanState *pstate) break; /*---------------------------------------------------------- - * EXEC_MJ_SKIP means compare tuples and if they do not + * EXEC_MJ_SKIP_TEST means compare tuples and if they do not * match, skip whichever is lesser. * * For example: @@ -1199,8 +1199,8 @@ ExecMergeJoin(PlanState *pstate) break; /* - * SKIPOUTER_ADVANCE: advance over an outer tuple that is - * known not to join to any inner tuple. + * EXEC_MJ_SKIPOUTER_ADVANCE: advance over an outer tuple that + * is known not to join to any inner tuple. * * Before advancing, we check to see if we must emit an * outer-join fill tuple for this outer tuple. @@ -1261,8 +1261,8 @@ ExecMergeJoin(PlanState *pstate) break; /* - * SKIPINNER_ADVANCE: advance over an inner tuple that is - * known not to join to any outer tuple. + * EXEC_MJ_SKIPINNER_ADVANCE: advance over an inner tuple that + * is known not to join to any outer tuple. * * Before advancing, we check to see if we must emit an * outer-join fill tuple for this inner tuple. From 91d3580535238abf93c67a6d3dce64f0e8c3cc6d Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Wed, 23 Feb 2022 11:22:46 +0100 Subject: [PATCH 039/108] Use test functions in pg_rewind test module Commit 61081e75c introduced pg_rewind along with the test suite, which ensured that subroutines didn't incur more than one test to plan. Now that we no longer explicitly plan tests (since 549ec201d), we can use the usual Test::More functions. Reviewed-by: Andrew Dunstan Discussion: https://postgr.es/m/AA527525-F0CC-4AA2-AF98-543CABFDAF59@yesql.se --- src/bin/pg_rewind/t/RewindTest.pm | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm index 2fedc626cc..5651602858 100644 --- a/src/bin/pg_rewind/t/RewindTest.pm +++ b/src/bin/pg_rewind/t/RewindTest.pm @@ -102,21 +102,10 @@ sub check_query ], '>', \$stdout, '2>', \$stderr; - # We don't use ok() for the exit code and stderr, because we want this - # check to be just a single test. - if (!$result) - { - fail("$test_name: psql exit code"); - } - elsif ($stderr ne '') - { - diag $stderr; - fail("$test_name: psql no stderr"); - } - else - { - is($stdout, $expected_stdout, "$test_name: query result matches"); - } + is($result, 1, "$test_name: psql exit code"); + is($stderr, '', "$test_name: psql no stderr"); + is($stdout, $expected_stdout, "$test_name: query result matches"); + return; } From 6da65a3f9a9deae4fdcc768c612b0c8f52759f75 Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Wed, 23 Feb 2022 14:22:16 +0100 Subject: [PATCH 040/108] Add function to pump IPC process until string match Refactor the recovery tests to not carry a local duplicated copy of the pump_until function which pumps a process until a defined string is seen on a stream. This reduces duplication, and is in preparation for another patch which will also use this functionality. Reviewed-by: Michael Paquier Discussion https://postgr.es/m/YgynUafCyIu3jIhC@paquier.xyz --- src/test/perl/PostgreSQL/Test/Utils.pm | 23 +++++++++++ src/test/recovery/t/013_crash_restart.pl | 46 +++++---------------- src/test/recovery/t/022_crash_temp_files.pl | 45 ++++---------------- 3 files changed, 41 insertions(+), 73 deletions(-) diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index 2c0c72f57a..46cd746796 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -73,6 +73,7 @@ our @EXPORT = qw( system_log run_log run_command + pump_until command_ok command_fails @@ -408,6 +409,28 @@ sub run_command =pod +=item pump_until(proc, timeout, stream, until) + +Pump until string is matched on the specified stream, or timeout occurs. + +=cut + +sub pump_until +{ + my ($proc, $timeout, $stream, $until) = @_; + $proc->pump_nb(); + while (1) + { + last if $$stream =~ /$until/; + return 0 if ($timeout->is_expired); + return 0 if (not $proc->pumpable()); + $proc->pump(); + } + return 1; +} + +=pod + =item generate_ascii_string(from_char, to_char) Generate a string made of the given range of ASCII characters. diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index 3b740eb6f3..be31de37c5 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -71,7 +71,7 @@ INSERT INTO alive VALUES($$committed-before-sigquit$$); SELECT pg_backend_pid(); ]; -ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), 'acquired pid for SIGQUIT'); my $pid = $killme_stdout; chomp($pid); @@ -83,7 +83,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status; ]; -ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigquit/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigquit/m), 'inserted in-progress-before-sigquit'); $killme_stdout = ''; $killme_stderr = ''; @@ -96,7 +96,7 @@ SELECT $$psql-connected$$; SELECT pg_sleep(3600); ]; -ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m), +ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m), 'monitor connected'); $monitor_stdout = ''; $monitor_stderr = ''; @@ -113,6 +113,7 @@ ]; ok( pump_until( $killme, + $psql_timeout, \$killme_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly|connection to server was lost/m ), @@ -126,6 +127,7 @@ # sending. ok( pump_until( $monitor, + $psql_timeout, \$monitor_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly|connection to server was lost/m ), @@ -148,7 +150,7 @@ $killme_stdin .= q[ SELECT pg_backend_pid(); ]; -ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), "acquired pid for SIGKILL"); $pid = $killme_stdout; chomp($pid); @@ -161,7 +163,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status; ]; -ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigkill/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'inserted in-progress-before-sigkill'); $killme_stdout = ''; $killme_stderr = ''; @@ -173,7 +175,7 @@ SELECT $$psql-connected$$; SELECT pg_sleep(3600); ]; -ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m), +ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m), 'monitor connected'); $monitor_stdout = ''; $monitor_stderr = ''; @@ -191,6 +193,7 @@ ]; ok( pump_until( $killme, + $psql_timeout, \$killme_stderr, qr/server closed the connection unexpectedly|connection to server was lost/m ), @@ -202,6 +205,7 @@ # sending. ok( pump_until( $monitor, + $psql_timeout, \$monitor_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly|connection to server was lost/m ), @@ -240,34 +244,4 @@ $node->stop(); -# Pump until string is matched, or timeout occurs -sub pump_until -{ - my ($proc, $stream, $untl) = @_; - $proc->pump_nb(); - while (1) - { - last if $$stream =~ /$untl/; - if ($psql_timeout->is_expired) - { - diag("aborting wait: program timed out"); - diag("stream contents: >>", $$stream, "<<"); - diag("pattern searched for: ", $untl); - - return 0; - } - if (not $proc->pumpable()) - { - diag("aborting wait: program died"); - diag("stream contents: >>", $$stream, "<<"); - diag("pattern searched for: ", $untl); - - return 0; - } - $proc->pump(); - } - return 1; - -} - done_testing(); diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl index 6ab3092874..49dd86e848 100644 --- a/src/test/recovery/t/022_crash_temp_files.pl +++ b/src/test/recovery/t/022_crash_temp_files.pl @@ -57,7 +57,7 @@ $killme_stdin .= q[ SELECT pg_backend_pid(); ]; -ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), 'acquired pid for SIGKILL'); my $pid = $killme_stdout; chomp($pid); @@ -86,7 +86,7 @@ INSERT INTO tab_crash (a) VALUES(1); SELECT $$insert-tuple-to-lock-next-insert$$; ]; -pump_until($killme2, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m); $killme_stdout2 = ''; $killme_stderr2 = ''; @@ -99,7 +99,7 @@ SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; -ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigkill/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; $killme_stderr = ''; @@ -121,7 +121,7 @@ BEGIN SELECT $$insert-tuple-lock-waiting$$; ]; -pump_until($killme2, \$killme_stdout2, qr/insert-tuple-lock-waiting/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m); $killme_stdout2 = ''; $killme_stderr2 = ''; @@ -158,7 +158,7 @@ BEGIN $killme_stdin .= q[ SELECT pg_backend_pid(); ]; -ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), 'acquired pid for SIGKILL'); $pid = $killme_stdout; chomp($pid); @@ -175,7 +175,7 @@ BEGIN INSERT INTO tab_crash (a) VALUES(1); SELECT $$insert-tuple-to-lock-next-insert$$; ]; -pump_until($killme2, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m); $killme_stdout2 = ''; $killme_stderr2 = ''; @@ -188,7 +188,7 @@ BEGIN SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; -ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigkill/m), +ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; $killme_stderr = ''; @@ -210,7 +210,7 @@ BEGIN SELECT $$insert-tuple-lock-waiting$$; ]; -pump_until($killme2, \$killme_stdout2, qr/insert-tuple-lock-waiting/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m); $killme_stdout2 = ''; $killme_stderr2 = ''; @@ -242,33 +242,4 @@ BEGIN $node->stop(); -# Pump until string is matched, or timeout occurs -sub pump_until -{ - my ($proc, $stream, $untl) = @_; - $proc->pump_nb(); - while (1) - { - last if $$stream =~ /$untl/; - if ($psql_timeout->is_expired) - { - diag("aborting wait: program timed out"); - diag("stream contents: >>", $$stream, "<<"); - diag("pattern searched for: ", $untl); - - return 0; - } - if (not $proc->pumpable()) - { - diag("aborting wait: program died"); - diag("stream contents: >>", $$stream, "<<"); - diag("pattern searched for: ", $untl); - - return 0; - } - $proc->pump(); - } - return 1; -} - done_testing(); From c7d7e1203958952e0ef67d336c58f1e7094e7634 Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Wed, 23 Feb 2022 14:23:50 +0100 Subject: [PATCH 041/108] Remove duplicated word in comment Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/B7C15416-BD61-4926-9843-5C557BCD7007@yesql.se --- src/test/recovery/t/013_crash_restart.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index be31de37c5..3976e339c0 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -28,7 +28,7 @@ $node->init(allows_streaming => 1); $node->start(); -# by default PostgreSQL::Test::Cluster doesn't doesn't restart after a crash +# by default PostgreSQL::Test::Cluster doesn't restart after a crash $node->safe_psql( 'postgres', q[ALTER SYSTEM SET restart_after_crash = 1; From 0475a97f744d2fea3676b2e69405d20358eac07a Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Wed, 23 Feb 2022 14:24:43 +0100 Subject: [PATCH 042/108] Quick exit on log stream child exit in pg_basebackup If the log streaming child process (thread on Windows) dies during backup then the whole backup will be aborted at the end of the backup. Instead, trap ungraceful termination of the log streaming child and exit early. This also adds a TAP test for simulating this by terminating the responsible backend. Reviewed-by: Michael Paquier Reviewed-by: Bharath Rupireddy Reviewed-by: Magnus Hagander Discussion: https://postgr.es/m/0F69E282-97F9-4DB7-8D6D-F927AA6340C8@yesql.se Discussion: https://postgr.es/m/VI1PR83MB0189818B82C19059CB62E26199A89@VI1PR83MB0189.EURPRD83.prod.outlook.com --- src/bin/pg_basebackup/pg_basebackup.c | 47 +++++++++++++++++++- src/bin/pg_basebackup/t/010_pg_basebackup.pl | 34 ++++++++++++++ 2 files changed, 79 insertions(+), 2 deletions(-) diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 8c77c533e6..c1ed7aeeee 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -174,6 +174,8 @@ static int bgpipe[2] = {-1, -1}; /* Handle to child process */ static pid_t bgchild = -1; static bool in_log_streamer = false; +/* Flag to indicate if child process exited unexpectedly */ +static volatile sig_atomic_t bgchild_exited = false; /* End position for xlog streaming, empty string if unknown yet */ static XLogRecPtr xlogendptr; @@ -277,6 +279,18 @@ disconnect_atexit(void) } #ifndef WIN32 +/* + * If the bgchild exits prematurely and raises a SIGCHLD signal, we can abort + * processing rather than wait until the backup has finished and error out at + * that time. On Windows, we use a background thread which can communicate + * without the need for a signal handler. + */ +static void +sigchld_handler(SIGNAL_ARGS) +{ + bgchild_exited = true; +} + /* * On windows, our background thread dies along with the process. But on * Unix, if we have started a subprocess, we want to kill it off so it @@ -285,7 +299,7 @@ disconnect_atexit(void) static void kill_bgchild_atexit(void) { - if (bgchild > 0) + if (bgchild > 0 && !bgchild_exited) kill(bgchild, SIGTERM); } #endif @@ -572,17 +586,28 @@ LogStreamerMain(logstreamer_param *param) stream.do_sync); if (!ReceiveXlogStream(param->bgconn, &stream)) - + { /* * Any errors will already have been reported in the function process, * but we need to tell the parent that we didn't shutdown in a nice * way. */ +#ifdef WIN32 + /* + * In order to signal the main thread of an ungraceful exit we + * set the same flag that we use on Unix to signal SIGCHLD. + */ + bgchild_exited = true; +#endif return 1; + } if (!stream.walmethod->finish()) { pg_log_error("could not finish writing WAL files: %m"); +#ifdef WIN32 + bgchild_exited = true; +#endif return 1; } @@ -1134,6 +1159,12 @@ ReceiveCopyData(PGconn *conn, WriteDataCallback callback, exit(1); } + if (bgchild_exited) + { + pg_log_error("background process terminated unexpectedly"); + exit(1); + } + (*callback) (r, copybuf, callback_data); PQfreemem(copybuf); @@ -2882,6 +2913,18 @@ main(int argc, char **argv) } atexit(disconnect_atexit); +#ifndef WIN32 + /* + * Trap SIGCHLD to be able to handle the WAL stream process exiting. There + * is no SIGCHLD on Windows, there we rely on the background thread setting + * the signal variable on unexpected but graceful exit. If the WAL stream + * thread crashes on Windows it will bring down the entire process as it's + * a thread, so there is nothing to catch should that happen. A crash on + * UNIX will be caught by the signal handler. + */ + pqsignal(SIGCHLD, sigchld_handler); +#endif + /* * Set umask so that directories/files are created with the same * permissions as directories/files in the source data directory. diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index 75d6810d3e..8cb8cfe045 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -776,4 +776,38 @@ rmtree("$tempdir/backup_gzip3"); } +# Test background stream process terminating before the basebackup has +# finished, the main process should exit gracefully with an error message on +# stderr. To reduce the risk of timing related issues we invoke the base +# backup with rate throttling enabled. +$node->safe_psql('postgres', + q{CREATE TABLE t AS SELECT a FROM generate_series(1,10000) AS a;}); + +my $sigchld_bb_timeout = IPC::Run::timer(60); +my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', ''); +my $sigchld_bb = IPC::Run::start( + [ + @pg_basebackup_defs, '--wal-method=stream', '-D', "$tempdir/sigchld", + '--max-rate=32', '-d', $node->connstr('postgres') + ], + '<', + \$sigchld_bb_stdin, + '>', + \$sigchld_bb_stdout, + '2>', + \$sigchld_bb_stderr, + $sigchld_bb_timeout); + +is($node->poll_query_until('postgres', + "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE " . + "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' " . + "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"), + "1", + "Walsender killed"); + +ok(pump_until($sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr, + qr/background process terminated unexpectedly/), + 'background process exit message'); +$sigchld_bb->finish(); + done_testing(); From bd74c4037c4ee268db46e983bcc0f1e0a9f7ab72 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 23 Feb 2022 11:10:46 -0500 Subject: [PATCH 043/108] Re-allow underscore as first character of custom GUC names. Commit 3db826bd5 intended that valid_custom_variable_name's rules for valid identifiers match those of scan.l. However, I (tgl) had some kind of brain fade and put "_" in the wrong list. Fix by Japin Li, per bug #17415 from Daniel Polski. Discussion: https://postgr.es/m/17415-ebdb683d7e09a51c@postgresql.org --- src/backend/utils/misc/guc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index e4afd07bfe..bf7ec0d466 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -5474,13 +5474,13 @@ valid_custom_variable_name(const char *name) name_start = true; } else if (strchr("ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz", *p) != NULL || + "abcdefghijklmnopqrstuvwxyz_", *p) != NULL || IS_HIGHBIT_SET(*p)) { /* okay as first or non-first character */ name_start = false; } - else if (!name_start && strchr("0123456789_$", *p) != NULL) + else if (!name_start && strchr("0123456789$", *p) != NULL) /* okay as non-first character */ ; else return false; From cfb4e209ec15d4a0c44efa98b2788be806a43a92 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Thu, 24 Feb 2022 08:54:39 +0530 Subject: [PATCH 044/108] Fix one of the tests introduced in commit 52e4f0cd47. In the Publisher-Subscriber setup, after performing a DML operation on the publisher, we need to wait for it to be replayed on the subscriber before querying the same data on the subscriber. One of the tests missed the wait step. As per buildfarm. Author: Peter Smith Discussion: https://postgr.es/m/CAHut+Pv=e9Qd1TSYo8Og6x6Abfz3b9_htwinLp4ENPgV45DACQ@mail.gmail.com --- src/test/subscription/t/028_row_filter.pl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test/subscription/t/028_row_filter.pl b/src/test/subscription/t/028_row_filter.pl index 88dc865829..89bb364e9d 100644 --- a/src/test/subscription/t/028_row_filter.pl +++ b/src/test/subscription/t/028_row_filter.pl @@ -677,6 +677,8 @@ $node_publisher->safe_psql('postgres', "UPDATE tab_rowfilter_toast SET b = '1'"); +$node_publisher->wait_for_catchup($appname); + # Check expected replicated rows for tab_rowfilter_toast # tab_rowfilter_toast filter: (a = repeat('1234567890', 200) AND b < '10') # UPDATE old (repeat('1234567890', 200) ,'1234567890') NO From 04e706d4238f98a98e1c0b1a02db9d4280b96f04 Mon Sep 17 00:00:00 2001 From: Etsuro Fujita Date: Thu, 24 Feb 2022 14:30:00 +0900 Subject: [PATCH 045/108] postgres_fdw: Add support for parallel commit. postgres_fdw commits remote (sub)transactions opened on remote server(s) in a local (sub)transaction one by one when the local (sub)transaction commits. This patch allows it to commit the remote (sub)transactions in parallel to improve performance. This is enabled by the server option "parallel_commit". The default is false. Etsuro Fujita, reviewed by Fujii Masao and David Zhang. Discussion: http://postgr.es/m/CAPmGK17dAZCXvwnfpr1eTfknTGdt%3DhYTV9405Gt5SqPOX8K84w%40mail.gmail.com --- contrib/postgres_fdw/connection.c | 223 ++++++++++++++++-- .../postgres_fdw/expected/postgres_fdw.out | 78 +++++- contrib/postgres_fdw/option.c | 2 + contrib/postgres_fdw/sql/postgres_fdw.sql | 46 ++++ doc/src/sgml/postgres-fdw.sgml | 46 ++++ 5 files changed, 376 insertions(+), 19 deletions(-) diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index f753c6e232..8c64d42dda 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -58,6 +58,7 @@ typedef struct ConnCacheEntry bool have_prep_stmt; /* have we prepared any stmts in this xact? */ bool have_error; /* have any subxacts aborted in this xact? */ bool changing_xact_state; /* xact state change in process */ + bool parallel_commit; /* do we commit (sub)xacts in parallel? */ bool invalidated; /* true if reconnect is pending */ bool keep_connections; /* setting value of keep_connections * server option */ @@ -92,6 +93,9 @@ static PGconn *connect_pg_server(ForeignServer *server, UserMapping *user); static void disconnect_pg_server(ConnCacheEntry *entry); static void check_conn_params(const char **keywords, const char **values, UserMapping *user); static void configure_remote_session(PGconn *conn); +static void do_sql_command_begin(PGconn *conn, const char *sql); +static void do_sql_command_end(PGconn *conn, const char *sql, + bool consume_input); static void begin_remote_xact(ConnCacheEntry *entry); static void pgfdw_xact_callback(XactEvent event, void *arg); static void pgfdw_subxact_callback(SubXactEvent event, @@ -100,6 +104,7 @@ static void pgfdw_subxact_callback(SubXactEvent event, void *arg); static void pgfdw_inval_callback(Datum arg, int cacheid, uint32 hashvalue); static void pgfdw_reject_incomplete_xact_state_change(ConnCacheEntry *entry); +static void pgfdw_reset_xact_state(ConnCacheEntry *entry, bool toplevel); static bool pgfdw_cancel_query(PGconn *conn); static bool pgfdw_exec_cleanup_query(PGconn *conn, const char *query, bool ignore_errors); @@ -107,6 +112,9 @@ static bool pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime, PGresult **result, bool *timed_out); static void pgfdw_abort_cleanup(ConnCacheEntry *entry, const char *sql, bool toplevel); +static void pgfdw_finish_pre_commit_cleanup(List *pending_entries); +static void pgfdw_finish_pre_subcommit_cleanup(List *pending_entries, + int curlevel); static bool UserMappingPasswordRequired(UserMapping *user); static bool disconnect_cached_connections(Oid serverid); @@ -316,14 +324,20 @@ make_new_connection(ConnCacheEntry *entry, UserMapping *user) * is changed will be closed and re-made later. * * By default, all the connections to any foreign servers are kept open. + * + * Also determine whether to commit (sub)transactions opened on the remote + * server in parallel at (sub)transaction end. */ entry->keep_connections = true; + entry->parallel_commit = false; foreach(lc, server->options) { DefElem *def = (DefElem *) lfirst(lc); if (strcmp(def->defname, "keep_connections") == 0) entry->keep_connections = defGetBoolean(def); + else if (strcmp(def->defname, "parallel_commit") == 0) + entry->parallel_commit = defGetBoolean(def); } /* Now try to make the connection */ @@ -623,10 +637,30 @@ configure_remote_session(PGconn *conn) void do_sql_command(PGconn *conn, const char *sql) { - PGresult *res; + do_sql_command_begin(conn, sql); + do_sql_command_end(conn, sql, false); +} +static void +do_sql_command_begin(PGconn *conn, const char *sql) +{ if (!PQsendQuery(conn, sql)) pgfdw_report_error(ERROR, NULL, conn, false, sql); +} + +static void +do_sql_command_end(PGconn *conn, const char *sql, bool consume_input) +{ + PGresult *res; + + /* + * If requested, consume whatever data is available from the socket. + * (Note that if all data is available, this allows pgfdw_get_result to + * call PQgetResult without forcing the overhead of WaitLatchOrSocket, + * which would be large compared to the overhead of PQconsumeInput.) + */ + if (consume_input && !PQconsumeInput(conn)) + pgfdw_report_error(ERROR, NULL, conn, false, sql); res = pgfdw_get_result(conn, sql); if (PQresultStatus(res) != PGRES_COMMAND_OK) pgfdw_report_error(ERROR, res, conn, true, sql); @@ -888,6 +922,7 @@ pgfdw_xact_callback(XactEvent event, void *arg) { HASH_SEQ_STATUS scan; ConnCacheEntry *entry; + List *pending_entries = NIL; /* Quick exit if no connections were touched in this transaction. */ if (!xact_got_connection) @@ -925,6 +960,12 @@ pgfdw_xact_callback(XactEvent event, void *arg) /* Commit all remote transactions during pre-commit */ entry->changing_xact_state = true; + if (entry->parallel_commit) + { + do_sql_command_begin(entry->conn, "COMMIT TRANSACTION"); + pending_entries = lappend(pending_entries, entry); + continue; + } do_sql_command(entry->conn, "COMMIT TRANSACTION"); entry->changing_xact_state = false; @@ -981,23 +1022,15 @@ pgfdw_xact_callback(XactEvent event, void *arg) } /* Reset state to show we're out of a transaction */ - entry->xact_depth = 0; + pgfdw_reset_xact_state(entry, true); + } - /* - * If the connection isn't in a good idle state, it is marked as - * invalid or keep_connections option of its server is disabled, then - * discard it to recover. Next GetConnection will open a new - * connection. - */ - if (PQstatus(entry->conn) != CONNECTION_OK || - PQtransactionStatus(entry->conn) != PQTRANS_IDLE || - entry->changing_xact_state || - entry->invalidated || - !entry->keep_connections) - { - elog(DEBUG3, "discarding connection %p", entry->conn); - disconnect_pg_server(entry); - } + /* If there are any pending connections, finish cleaning them up */ + if (pending_entries) + { + Assert(event == XACT_EVENT_PARALLEL_PRE_COMMIT || + event == XACT_EVENT_PRE_COMMIT); + pgfdw_finish_pre_commit_cleanup(pending_entries); } /* @@ -1021,6 +1054,7 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, HASH_SEQ_STATUS scan; ConnCacheEntry *entry; int curlevel; + List *pending_entries = NIL; /* Nothing to do at subxact start, nor after commit. */ if (!(event == SUBXACT_EVENT_PRE_COMMIT_SUB || @@ -1063,6 +1097,12 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, /* Commit all remote subtransactions during pre-commit */ snprintf(sql, sizeof(sql), "RELEASE SAVEPOINT s%d", curlevel); entry->changing_xact_state = true; + if (entry->parallel_commit) + { + do_sql_command_begin(entry->conn, sql); + pending_entries = lappend(pending_entries, entry); + continue; + } do_sql_command(entry->conn, sql); entry->changing_xact_state = false; } @@ -1076,7 +1116,14 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, } /* OK, we're outta that level of subtransaction */ - entry->xact_depth--; + pgfdw_reset_xact_state(entry, false); + } + + /* If there are any pending connections, finish cleaning them up */ + if (pending_entries) + { + Assert(event == SUBXACT_EVENT_PRE_COMMIT_SUB); + pgfdw_finish_pre_subcommit_cleanup(pending_entries, curlevel); } } @@ -1169,6 +1216,40 @@ pgfdw_reject_incomplete_xact_state_change(ConnCacheEntry *entry) server->servername))); } +/* + * Reset state to show we're out of a (sub)transaction. + */ +static void +pgfdw_reset_xact_state(ConnCacheEntry *entry, bool toplevel) +{ + if (toplevel) + { + /* Reset state to show we're out of a transaction */ + entry->xact_depth = 0; + + /* + * If the connection isn't in a good idle state, it is marked as + * invalid or keep_connections option of its server is disabled, then + * discard it to recover. Next GetConnection will open a new + * connection. + */ + if (PQstatus(entry->conn) != CONNECTION_OK || + PQtransactionStatus(entry->conn) != PQTRANS_IDLE || + entry->changing_xact_state || + entry->invalidated || + !entry->keep_connections) + { + elog(DEBUG3, "discarding connection %p", entry->conn); + disconnect_pg_server(entry); + } + } + else + { + /* Reset state to show we're out of a subtransaction */ + entry->xact_depth--; + } +} + /* * Cancel the currently-in-progress query (whose query text we do not have) * and ignore the result. Returns true if we successfully cancel the query @@ -1456,6 +1537,112 @@ pgfdw_abort_cleanup(ConnCacheEntry *entry, const char *sql, bool toplevel) entry->changing_xact_state = false; } +/* + * Finish pre-commit cleanup of connections on each of which we've sent a + * COMMIT command to the remote server. + */ +static void +pgfdw_finish_pre_commit_cleanup(List *pending_entries) +{ + ConnCacheEntry *entry; + List *pending_deallocs = NIL; + ListCell *lc; + + Assert(pending_entries); + + /* + * Get the result of the COMMIT command for each of the pending entries + */ + foreach(lc, pending_entries) + { + entry = (ConnCacheEntry *) lfirst(lc); + + Assert(entry->changing_xact_state); + /* + * We might already have received the result on the socket, so pass + * consume_input=true to try to consume it first + */ + do_sql_command_end(entry->conn, "COMMIT TRANSACTION", true); + entry->changing_xact_state = false; + + /* Do a DEALLOCATE ALL in parallel if needed */ + if (entry->have_prep_stmt && entry->have_error) + { + /* Ignore errors (see notes in pgfdw_xact_callback) */ + if (PQsendQuery(entry->conn, "DEALLOCATE ALL")) + { + pending_deallocs = lappend(pending_deallocs, entry); + continue; + } + } + entry->have_prep_stmt = false; + entry->have_error = false; + + pgfdw_reset_xact_state(entry, true); + } + + /* No further work if no pending entries */ + if (!pending_deallocs) + return; + + /* + * Get the result of the DEALLOCATE command for each of the pending + * entries + */ + foreach(lc, pending_deallocs) + { + PGresult *res; + + entry = (ConnCacheEntry *) lfirst(lc); + + /* Ignore errors (see notes in pgfdw_xact_callback) */ + while ((res = PQgetResult(entry->conn)) != NULL) + { + PQclear(res); + /* Stop if the connection is lost (else we'll loop infinitely) */ + if (PQstatus(entry->conn) == CONNECTION_BAD) + break; + } + entry->have_prep_stmt = false; + entry->have_error = false; + + pgfdw_reset_xact_state(entry, true); + } +} + +/* + * Finish pre-subcommit cleanup of connections on each of which we've sent a + * RELEASE command to the remote server. + */ +static void +pgfdw_finish_pre_subcommit_cleanup(List *pending_entries, int curlevel) +{ + ConnCacheEntry *entry; + char sql[100]; + ListCell *lc; + + Assert(pending_entries); + + /* + * Get the result of the RELEASE command for each of the pending entries + */ + snprintf(sql, sizeof(sql), "RELEASE SAVEPOINT s%d", curlevel); + foreach(lc, pending_entries) + { + entry = (ConnCacheEntry *) lfirst(lc); + + Assert(entry->changing_xact_state); + /* + * We might already have received the result on the socket, so pass + * consume_input=true to try to consume it first + */ + do_sql_command_end(entry->conn, sql, true); + entry->changing_xact_state = false; + + pgfdw_reset_xact_state(entry, false); + } +} + /* * List active foreign server connections. * diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index 057342083c..f210f91188 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -9509,7 +9509,7 @@ DO $d$ END; $d$; ERROR: invalid option "password" -HINT: Valid options in this context are: service, passfile, channel_binding, connect_timeout, dbname, host, hostaddr, port, options, application_name, keepalives, keepalives_idle, keepalives_interval, keepalives_count, tcp_user_timeout, sslmode, sslcompression, sslcert, sslkey, sslrootcert, sslcrl, sslcrldir, sslsni, requirepeer, ssl_min_protocol_version, ssl_max_protocol_version, gssencmode, krbsrvname, gsslib, target_session_attrs, use_remote_estimate, fdw_startup_cost, fdw_tuple_cost, extensions, updatable, truncatable, fetch_size, batch_size, async_capable, keep_connections +HINT: Valid options in this context are: service, passfile, channel_binding, connect_timeout, dbname, host, hostaddr, port, options, application_name, keepalives, keepalives_idle, keepalives_interval, keepalives_count, tcp_user_timeout, sslmode, sslcompression, sslcert, sslkey, sslrootcert, sslcrl, sslcrldir, sslsni, requirepeer, ssl_min_protocol_version, ssl_max_protocol_version, gssencmode, krbsrvname, gsslib, target_session_attrs, use_remote_estimate, fdw_startup_cost, fdw_tuple_cost, extensions, updatable, truncatable, fetch_size, batch_size, async_capable, parallel_commit, keep_connections CONTEXT: SQL statement "ALTER SERVER loopback_nopw OPTIONS (ADD password 'dummypw')" PL/pgSQL function inline_code_block line 3 at EXECUTE -- If we add a password for our user mapping instead, we should get a different @@ -10933,3 +10933,79 @@ SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity --Clean up RESET postgres_fdw.application_name; RESET debug_discard_caches; +-- =================================================================== +-- test parallel commit +-- =================================================================== +ALTER SERVER loopback OPTIONS (ADD parallel_commit 'true'); +ALTER SERVER loopback2 OPTIONS (ADD parallel_commit 'true'); +CREATE TABLE ploc1 (f1 int, f2 text); +CREATE FOREIGN TABLE prem1 (f1 int, f2 text) + SERVER loopback OPTIONS (table_name 'ploc1'); +CREATE TABLE ploc2 (f1 int, f2 text); +CREATE FOREIGN TABLE prem2 (f1 int, f2 text) + SERVER loopback2 OPTIONS (table_name 'ploc2'); +BEGIN; +INSERT INTO prem1 VALUES (101, 'foo'); +INSERT INTO prem2 VALUES (201, 'bar'); +COMMIT; +SELECT * FROM prem1; + f1 | f2 +-----+----- + 101 | foo +(1 row) + +SELECT * FROM prem2; + f1 | f2 +-----+----- + 201 | bar +(1 row) + +BEGIN; +SAVEPOINT s; +INSERT INTO prem1 VALUES (102, 'foofoo'); +INSERT INTO prem2 VALUES (202, 'barbar'); +RELEASE SAVEPOINT s; +COMMIT; +SELECT * FROM prem1; + f1 | f2 +-----+-------- + 101 | foo + 102 | foofoo +(2 rows) + +SELECT * FROM prem2; + f1 | f2 +-----+-------- + 201 | bar + 202 | barbar +(2 rows) + +-- This tests executing DEALLOCATE ALL against foreign servers in parallel +-- during pre-commit +BEGIN; +SAVEPOINT s; +INSERT INTO prem1 VALUES (103, 'baz'); +INSERT INTO prem2 VALUES (203, 'qux'); +ROLLBACK TO SAVEPOINT s; +RELEASE SAVEPOINT s; +INSERT INTO prem1 VALUES (104, 'bazbaz'); +INSERT INTO prem2 VALUES (204, 'quxqux'); +COMMIT; +SELECT * FROM prem1; + f1 | f2 +-----+-------- + 101 | foo + 102 | foofoo + 104 | bazbaz +(3 rows) + +SELECT * FROM prem2; + f1 | f2 +-----+-------- + 201 | bar + 202 | barbar + 204 | quxqux +(3 rows) + +ALTER SERVER loopback OPTIONS (DROP parallel_commit); +ALTER SERVER loopback2 OPTIONS (DROP parallel_commit); diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index 2c6b2894b9..572591a558 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -121,6 +121,7 @@ postgres_fdw_validator(PG_FUNCTION_ARGS) strcmp(def->defname, "updatable") == 0 || strcmp(def->defname, "truncatable") == 0 || strcmp(def->defname, "async_capable") == 0 || + strcmp(def->defname, "parallel_commit") == 0 || strcmp(def->defname, "keep_connections") == 0) { /* these accept only boolean values */ @@ -249,6 +250,7 @@ InitPgFdwOptions(void) /* async_capable is available on both server and table */ {"async_capable", ForeignServerRelationId, false}, {"async_capable", ForeignTableRelationId, false}, + {"parallel_commit", ForeignServerRelationId, false}, {"keep_connections", ForeignServerRelationId, false}, {"password_required", UserMappingRelationId, false}, diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index 6c9f579c41..95b6b7192e 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -3515,3 +3515,49 @@ SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity --Clean up RESET postgres_fdw.application_name; RESET debug_discard_caches; + +-- =================================================================== +-- test parallel commit +-- =================================================================== +ALTER SERVER loopback OPTIONS (ADD parallel_commit 'true'); +ALTER SERVER loopback2 OPTIONS (ADD parallel_commit 'true'); + +CREATE TABLE ploc1 (f1 int, f2 text); +CREATE FOREIGN TABLE prem1 (f1 int, f2 text) + SERVER loopback OPTIONS (table_name 'ploc1'); +CREATE TABLE ploc2 (f1 int, f2 text); +CREATE FOREIGN TABLE prem2 (f1 int, f2 text) + SERVER loopback2 OPTIONS (table_name 'ploc2'); + +BEGIN; +INSERT INTO prem1 VALUES (101, 'foo'); +INSERT INTO prem2 VALUES (201, 'bar'); +COMMIT; +SELECT * FROM prem1; +SELECT * FROM prem2; + +BEGIN; +SAVEPOINT s; +INSERT INTO prem1 VALUES (102, 'foofoo'); +INSERT INTO prem2 VALUES (202, 'barbar'); +RELEASE SAVEPOINT s; +COMMIT; +SELECT * FROM prem1; +SELECT * FROM prem2; + +-- This tests executing DEALLOCATE ALL against foreign servers in parallel +-- during pre-commit +BEGIN; +SAVEPOINT s; +INSERT INTO prem1 VALUES (103, 'baz'); +INSERT INTO prem2 VALUES (203, 'qux'); +ROLLBACK TO SAVEPOINT s; +RELEASE SAVEPOINT s; +INSERT INTO prem1 VALUES (104, 'bazbaz'); +INSERT INTO prem2 VALUES (204, 'quxqux'); +COMMIT; +SELECT * FROM prem1; +SELECT * FROM prem2; + +ALTER SERVER loopback OPTIONS (DROP parallel_commit); +ALTER SERVER loopback2 OPTIONS (DROP parallel_commit); diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml index dc57fe4b0d..8ebf0dc3a0 100644 --- a/doc/src/sgml/postgres-fdw.sgml +++ b/doc/src/sgml/postgres-fdw.sgml @@ -456,6 +456,52 @@ OPTIONS (ADD password_required 'false'); + + Transaction Management Options + + + When multiple remote (sub)transactions are involved in a local + (sub)transaction, by default postgres_fdw commits + those remote (sub)transactions one by one when the local (sub)transaction + commits. + Performance can be improved with the following option: + + + + + + parallel_commit (boolean) + + + This option controls whether postgres_fdw commits + remote (sub)transactions opened on a foreign server in a local + (sub)transaction in parallel when the local (sub)transaction commits. + This option can only be specified for foreign servers, not per-table. + The default is false. + + + + If multiple foreign servers with this option enabled are involved in + a local (sub)transaction, multiple remote (sub)transactions opened on + those foreign servers in the local (sub)transaction are committed in + parallel across those foreign servers when the local (sub)transaction + commits. + + + + For a foreign server with this option enabled, if many remote + (sub)transactions are opened on the foreign server in a local + (sub)transaction, this option might increase the remote server’s load + when the local (sub)transaction commits, so be careful when using this + option. + + + + + + + + Updatability Options From fcc28178c6943d7df72b484a87fdb7e06d0c1079 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Thu, 24 Feb 2022 16:11:34 +0900 Subject: [PATCH 046/108] Clean up and simplify code in a couple of set-returning functions The following set-returning functions have their logic simplified, to be more consistent with other in-core areas: - pg_prepared_statement()'s tuple descriptor is now created with get_call_result_type() instead of being created from scratch, saving from some duplication with pg_proc.dat. - show_all_file_settings(), similarly, now uses get_call_result_type() to build its tuple descriptor instead of creating it from scratch. - pg_options_to_table() made use of a static routine called only once. This commit removes this internal routine to make the function easier to follow. - pg_config() was using a unique logic style, doing checks on the tuple descriptor passed down in expectedDesc, but it has no need to do so. This switches the function to use a tuplestore with a tuple descriptor retrieved from get_call_result_type(), instead. This simplifies an upcoming patch aimed at refactoring the way tuplestores are created and checked in set-returning functions, this change making sense as its own independent cleanup by shaving some code. Author: Melanie Plageman, Michael Paquier Reviewed-by: Justin Pryzby Discussion: https://postgr.es/m/CAAKRu_azyd1Z3W_r7Ou4sorTjRCs+PxeHw1CWJeXKofkE6TuZg@mail.gmail.com --- src/backend/commands/prepare.c | 31 ++++---------- src/backend/foreign/foreign.c | 37 +++++++---------- src/backend/utils/misc/guc.c | 20 ++------- src/backend/utils/misc/pg_config.c | 65 ++++++++---------------------- src/backend/utils/mmgr/portalmem.c | 23 +++-------- 5 files changed, 48 insertions(+), 128 deletions(-) diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index e0c985ef8b..dce30aed6c 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -22,6 +22,7 @@ #include "catalog/pg_type.h" #include "commands/createas.h" #include "commands/prepare.h" +#include "funcapi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/analyze.h" @@ -716,30 +717,13 @@ pg_prepared_statement(PG_FUNCTION_ARGS) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + /* need to build tuplestore in query context */ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); - /* - * build tupdesc for result tuples. This must match the definition of the - * pg_prepared_statements view in system_views.sql - */ - tupdesc = CreateTemplateTupleDesc(7); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "prepare_time", - TIMESTAMPTZOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 4, "parameter_types", - REGTYPEARRAYOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 5, "from_sql", - BOOLOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 6, "generic_plans", - INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 7, "custom_plans", - INT8OID, -1, 0); - /* * We put all the tuples into a tuplestore in one scan of the hashtable. * This avoids any issue of the hashtable possibly changing between calls. @@ -747,6 +731,9 @@ pg_prepared_statement(PG_FUNCTION_ARGS) tupstore = tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; /* generate junk in short-term context */ MemoryContextSwitchTo(oldcontext); @@ -778,10 +765,6 @@ pg_prepared_statement(PG_FUNCTION_ARGS) } } - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - return (Datum) 0; } diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c index d910bc2fbe..c3406c3b9d 100644 --- a/src/backend/foreign/foreign.c +++ b/src/backend/foreign/foreign.c @@ -499,17 +499,19 @@ IsImportableForeignTable(const char *tablename, /* - * deflist_to_tuplestore - Helper function to convert DefElem list to - * tuplestore usable in SRF. + * pg_options_to_table - Convert options array to name/value table + * + * This is useful to provide details for information_schema and pg_dump. */ -static void -deflist_to_tuplestore(ReturnSetInfo *rsinfo, List *options) +Datum +pg_options_to_table(PG_FUNCTION_ARGS) { + Datum array = PG_GETARG_DATUM(0); ListCell *cell; + List *options; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupdesc; Tuplestorestate *tupstore; - Datum values[2]; - bool nulls[2]; MemoryContext per_query_ctx; MemoryContext oldcontext; @@ -524,6 +526,9 @@ deflist_to_tuplestore(ReturnSetInfo *rsinfo, List *options) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); + options = untransformRelOptions(array); + rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); @@ -536,9 +541,13 @@ deflist_to_tuplestore(ReturnSetInfo *rsinfo, List *options) rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; + MemoryContextSwitchTo(oldcontext); + foreach(cell, options) { DefElem *def = lfirst(cell); + Datum values[2]; + bool nulls[2]; values[0] = CStringGetTextDatum(def->defname); nulls[0] = false; @@ -555,22 +564,6 @@ deflist_to_tuplestore(ReturnSetInfo *rsinfo, List *options) tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - MemoryContextSwitchTo(oldcontext); -} - - -/* - * Convert options array to name/value table. Useful for information - * schema and pg_dump. - */ -Datum -pg_options_to_table(PG_FUNCTION_ARGS) -{ - Datum array = PG_GETARG_DATUM(0); - - deflist_to_tuplestore((ReturnSetInfo *) fcinfo->resultinfo, - untransformRelOptions(array)); - return (Datum) 0; } diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index bf7ec0d466..1e3650184b 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -10174,6 +10174,9 @@ show_all_file_settings(PG_FUNCTION_ARGS) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + /* Scan the config files using current context as workspace */ conf = ProcessConfigFileInternal(PGC_SIGHUP, false, DEBUG3); @@ -10181,23 +10184,6 @@ show_all_file_settings(PG_FUNCTION_ARGS) per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); - /* Build a tuple descriptor for our result type */ - tupdesc = CreateTemplateTupleDesc(NUM_PG_FILE_SETTINGS_ATTS); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "sourcefile", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sourceline", - INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "seqno", - INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 4, "name", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 5, "setting", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 6, "applied", - BOOLOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 7, "error", - TEXTOID, -1, 0); - /* Build a tuplestore to return our results in */ tupstore = tuplestore_begin_heap(true, false, work_mem); rsinfo->returnMode = SFRM_Materialize; diff --git a/src/backend/utils/misc/pg_config.c b/src/backend/utils/misc/pg_config.c index 2dc875ebfb..e646a41910 100644 --- a/src/backend/utils/misc/pg_config.c +++ b/src/backend/utils/misc/pg_config.c @@ -26,14 +26,10 @@ pg_config(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; Tuplestorestate *tupstore; - HeapTuple tuple; TupleDesc tupdesc; - AttInMetadata *attinmeta; - MemoryContext per_query_ctx; MemoryContext oldcontext; ConfigData *configdata; size_t configdata_len; - char *values[2]; int i = 0; /* check to see if caller supports us returning a tuplestore */ @@ -41,65 +37,38 @@ pg_config(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize) || - rsinfo->expectedDesc == NULL) + if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); - /* get the requested return tuple description */ - tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); + /* Build tuplestore to hold the result rows */ + oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - /* - * Check to make sure we have a reasonable tuple descriptor - */ - if (tupdesc->natts != 2 || - TupleDescAttr(tupdesc, 0)->atttypid != TEXTOID || - TupleDescAttr(tupdesc, 1)->atttypid != TEXTOID) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("query-specified return tuple and " - "function return type are not compatible"))); - - /* OK to use it */ - attinmeta = TupleDescGetAttInMetadata(tupdesc); - - /* let the caller know we're sending back a tuplestore */ + tupstore = tuplestore_begin_heap(true, false, work_mem); rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; - /* initialize our tuplestore */ - tupstore = tuplestore_begin_heap(true, false, work_mem); + MemoryContextSwitchTo(oldcontext); configdata = get_configdata(my_exec_path, &configdata_len); for (i = 0; i < configdata_len; i++) { - values[0] = configdata[i].name; - values[1] = configdata[i].setting; - - tuple = BuildTupleFromCStrings(attinmeta, values); - tuplestore_puttuple(tupstore, tuple); - } + Datum values[2]; + bool nulls[2]; - /* - * no longer need the tuple descriptor reference created by - * TupleDescGetAttInMetadata() - */ - ReleaseTupleDesc(tupdesc); + memset(values, 0, sizeof(values)); + memset(nulls, 0, sizeof(nulls)); - rsinfo->setResult = tupstore; + values[0] = CStringGetTextDatum(configdata[i].name); + values[1] = CStringGetTextDatum(configdata[i].setting); - /* - * SFRM_Materialize mode expects us to return a NULL Datum. The actual - * tuples are in our tuplestore and passed back through rsinfo->setResult. - * rsinfo->setDesc is set to the tuple description that we actually used - * to build our tuples with, so the caller can verify we did what it was - * expecting. - */ - rsinfo->setDesc = tupdesc; - MemoryContextSwitchTo(oldcontext); + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } return (Datum) 0; } diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 7885344164..21ad87c024 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -21,6 +21,7 @@ #include "access/xact.h" #include "catalog/pg_type.h" #include "commands/portalcmds.h" +#include "funcapi.h" #include "miscadmin.h" #include "storage/ipc.h" #include "utils/builtins.h" @@ -1152,23 +1153,8 @@ pg_cursor(PG_FUNCTION_ARGS) per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); - /* - * build tupdesc for result tuples. This must match the definition of the - * pg_cursors view in system_views.sql - */ - tupdesc = CreateTemplateTupleDesc(6); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable", - BOOLOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary", - BOOLOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable", - BOOLOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time", - TIMESTAMPTZOID, -1, 0); + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); /* * We put all the tuples into a tuplestore in one scan of the hashtable. @@ -1177,6 +1163,9 @@ pg_cursor(PG_FUNCTION_ARGS) tupstore = tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; /* generate junk in short-term context */ MemoryContextSwitchTo(oldcontext); From e77216fcb021bb19d83b348db084adfe8d918118 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Thu, 24 Feb 2022 16:54:59 +0900 Subject: [PATCH 047/108] Simplify more checks related to set-returning functions This makes more consistent the SRF-related checks in the area of PL/pgSQL, PL/Perl, PL/Tcl, pageinspect and some of the JSON worker functions, making it easier to grep for the same error patterns through the code, reducing a bit the translation work. It is worth noting that each_worker_jsonb()/each_worker() in jsonfuncs.c and pageinspect's brin_page_items() were doing a check on expectedDesc that is not required as they fetch their tuple descriptor directly from get_call_result_type(). This looks like a set of copy-paste errors that have spread over the years. This commit is a continuation of the changes begun in 07daca5, for any remaining code paths on sight. Like fcc2817, this makes the code more consistent, easing the integration of a larger patch that will refactor the way tuplestores are created and checked in a good portion of the set-returning functions present in core. I have worked my way through the changes of this patch by myself, and Ranier has proposed the same changes in a different thread in parallel, though there were some inconsistencies related in expectedDesc in what was proposed by him. Author: Michael Paquier, Ranier Vilela Discussion: https://postgr.es/m/CAAKRu_azyd1Z3W_r7Ou4sorTjRCs+PxeHw1CWJeXKofkE6TuZg@mail.gmail.com Discussion: https://postgr.es/m/CAEudQApm=AFuJjEHLBjBcJbxcw4pBMwg2sHwXyCXYcbBOj3hpg@mail.gmail.com --- contrib/pageinspect/brinfuncs.c | 3 +- src/backend/utils/adt/jsonfuncs.c | 61 ++++++++++++++++++------------- src/pl/plperl/plperl.c | 11 ++++-- src/pl/plpgsql/src/pl_exec.c | 19 +++++++--- src/pl/tcl/pltcl.c | 8 +++- 5 files changed, 63 insertions(+), 39 deletions(-) diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c index 50892b5cc2..683749a150 100644 --- a/contrib/pageinspect/brinfuncs.c +++ b/contrib/pageinspect/brinfuncs.c @@ -148,8 +148,7 @@ brin_page_items(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize) || - rsinfo->expectedDesc == NULL) + if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 0273f883d4..2457061f97 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -1927,21 +1927,19 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0 || - rsi->expectedDesc == NULL) + if (!rsi || !IsA(rsi, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsi->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that " - "cannot accept a set"))); + errmsg("materialize mode required, but it is not allowed in this context"))); rsi->returnMode = SFRM_Materialize; if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("function returning record called in context " - "that cannot accept type record"))); + elog(ERROR, "return type must be a row type"); old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); @@ -2039,13 +2037,15 @@ each_worker(FunctionCallInfo fcinfo, bool as_text) rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0 || - rsi->expectedDesc == NULL) + if (!rsi || !IsA(rsi, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + + if (!(rsi->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that " - "cannot accept a set"))); + errmsg("materialize mode required, but it is not allowed in this context"))); rsi->returnMode = SFRM_Materialize; @@ -2227,13 +2227,16 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0 || + if (!rsi || !IsA(rsi, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + + if (!(rsi->allowedModes & SFRM_Materialize) || rsi->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that " - "cannot accept a set"))); + errmsg("materialize mode required, but it is not allowed in this context"))); rsi->returnMode = SFRM_Materialize; @@ -2336,13 +2339,16 @@ elements_worker(FunctionCallInfo fcinfo, const char *funcname, bool as_text) rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0 || + if (!rsi || !IsA(rsi, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + + if (!(rsi->allowedModes & SFRM_Materialize) || rsi->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that " - "cannot accept a set"))); + errmsg("materialize mode required, but it is not allowed in this context"))); rsi->returnMode = SFRM_Materialize; @@ -3798,12 +3804,15 @@ populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname, rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0) + if (!rsi || !IsA(rsi, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + + if (!(rsi->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that " - "cannot accept a set"))); + errmsg("materialize mode required, but it is not allowed in this context"))); rsi->returnMode = SFRM_Materialize; diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index b5879c2947..81d9c46e00 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -2414,12 +2414,15 @@ plperl_func_handler(PG_FUNCTION_ARGS) if (prodesc->fn_retisset) { /* Check context before allowing the call to go through */ - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0) + if (!rsi || !IsA(rsi, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that " - "cannot accept a set"))); + errmsg("set-valued function called in context that cannot accept a set"))); + + if (!(rsi->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); } activate_interpreter(prodesc->interp); diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index 70c4a75295..9674c29250 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -629,11 +629,16 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, ReturnSetInfo *rsi = estate.rsi; /* Check caller can handle a set result */ - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0) + if (!rsi || !IsA(rsi, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); + + if (!(rsi->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + rsi->returnMode = SFRM_Materialize; /* If we produced any tuples, send back the result */ @@ -3645,13 +3650,17 @@ exec_init_tuple_store(PLpgSQL_execstate *estate) /* * Check caller can handle a set result in the way we want */ - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0 || - rsi->expectedDesc == NULL) + if (!rsi || !IsA(rsi, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsi->allowedModes & SFRM_Materialize) || + rsi->expectedDesc == NULL) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + /* * Switch to the right memory context and resource owner for storing the * tuplestore for return set. If we're within a subtransaction opened for diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index ab759833db..c5fad05e12 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -829,12 +829,16 @@ pltcl_func_handler(PG_FUNCTION_ARGS, pltcl_call_state *call_state, { ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0) + if (!rsi || !IsA(rsi, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsi->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + call_state->rsi = rsi; call_state->tuple_store_cxt = rsi->econtext->ecxt_per_query_memory; call_state->tuple_store_owner = CurrentResourceOwner; From 6c46e8a5dfc9f49e673d76fc6ae097b81d7740ef Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 24 Feb 2022 16:15:12 +0200 Subject: [PATCH 048/108] Fix data loss on crash after sorted GiST index build. If a checkpoint happens during sorted GiST index build, and the system crashes after the checkpoint and after the index build has finished, the data written to the index before the checkpoint started could be lost. The checkpoint won't fsync it, and it won't be replayed at crash recovery either. Fix by calling smgrimmedsync() after the index build, just like in B-tree index build. Backpatch to v14 where the sorted GiST index build was introduced. Reported-by: Melanie Plageman Discussion: https://www.postgresql.org/message-id/CAAKRu_ZJJynimxKj5xYBSziL62-iEtPE+fx-B=JzR=jUtP92mw@mail.gmail.com --- src/backend/access/gist/gistbuild.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 4db896a533..e081e6571a 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -467,6 +467,18 @@ gist_indexsortbuild(GISTBuildState *state) pfree(levelstate->pages[0]); pfree(levelstate); + + /* + * When we WAL-logged index pages, we must nonetheless fsync index files. + * Since we're building outside shared buffers, a CHECKPOINT occurring + * during the build has no way to flush the previously written data to + * disk (indeed it won't know the index even exists). A crash later on + * would replay WAL from the checkpoint, therefore it wouldn't replay our + * earlier WAL entries. If we do not fsync those pages here, they might + * still not be on disk when the crash occurs. + */ + if (RelationNeedsWAL(state->indexrel)) + smgrimmedsync(RelationGetSmgr(state->indexrel), MAIN_FORKNUM); } /* From 31d8d4740ffb21c9898a21b5018c31e92af6935d Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Thu, 24 Feb 2022 20:58:18 +0100 Subject: [PATCH 049/108] Guard against reallocation failure in pg_regress realloc() will return NULL on a failed reallocation, so the destination pointer must be inspected to avoid null pointer dereference. Further, assigning the return value to the source pointer leak the allocation in the case of reallocation failure. Fix by using pg_realloc instead which has full error handling. Reviewed-by: Tom Lane Discussion: https://postgr.es/m/9FC7E603-9246-4C62-B466-A39CFAF454AE@yesql.se --- src/test/regress/pg_regress.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index e6f71c7582..db8427dd9b 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -774,7 +774,7 @@ fmtHba(const char *raw) const char *rp; char *wp; - wp = ret = realloc(ret, 3 + strlen(raw) * 2); + wp = ret = pg_realloc(ret, 3 + strlen(raw) * 2); *wp++ = '"'; for (rp = raw; *rp; rp++) From cf879d3069a3f025824b4a3fa3086137b34bad48 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Thu, 24 Feb 2022 18:31:07 -0800 Subject: [PATCH 050/108] Remove unnecessary heap_tuple_needs_freeze argument. The buffer argument hasn't been used since the function was first added by commit bbb6e559c4. The sibling heap_prepare_freeze_tuple function doesn't have such an argument either. Remove it. --- src/backend/access/heap/heapam.c | 2 +- src/backend/access/heap/vacuumlazy.c | 2 +- src/include/access/heapam.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 2240cfd936..59d43e2ba9 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -7141,7 +7141,7 @@ heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple) */ bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, - MultiXactId cutoff_multi, Buffer buf) + MultiXactId cutoff_multi) { TransactionId xid; diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 242511a235..f48e699b91 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -2015,7 +2015,7 @@ lazy_scan_noprune(LVRelState *vacrel, tupleheader = (HeapTupleHeader) PageGetItem(page, itemid); if (heap_tuple_needs_freeze(tupleheader, vacrel->FreezeLimit, - vacrel->MultiXactCutoff, buf)) + vacrel->MultiXactCutoff)) { if (vacrel->aggressive) { diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 0ad87730e1..b46ab7d739 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -168,7 +168,7 @@ extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi); extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, - MultiXactId cutoff_multi, Buffer buf); + MultiXactId cutoff_multi); extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple); extern void simple_heap_insert(Relation relation, HeapTuple tup); From 22eb12cfff3e842bb35427e1ec819d64daabd5a1 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Fri, 25 Feb 2022 07:51:21 +0530 Subject: [PATCH 051/108] Fix few values in pg_proc for pg_stat_get_replication_slot. The function pg_stat_get_replication_slot() is not a SRF but marked incorrectly in the pg_proc. Reported-by: Michael Paquier Discussion: https://postgr.es/m/YhMk4RjoMK3CCXy2@paquier.xyz --- src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_proc.dat | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 1addb568ef..14194afe1c 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202202221 +#define CATALOG_VERSION_NO 202202251 #endif diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 7f1ee97f55..7de8cfc7e9 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -5370,9 +5370,8 @@ proargnames => '{pid,status,receive_start_lsn,receive_start_tli,written_lsn,flushed_lsn,received_tli,last_msg_send_time,last_msg_receipt_time,latest_end_lsn,latest_end_time,slot_name,sender_host,sender_port,conninfo}', prosrc => 'pg_stat_get_wal_receiver' }, { oid => '6169', descr => 'statistics: information about replication slot', - proname => 'pg_stat_get_replication_slot', prorows => '1', proisstrict => 'f', - proretset => 't', provolatile => 's', proparallel => 'r', - prorettype => 'record', proargtypes => 'text', + proname => 'pg_stat_get_replication_slot', proisstrict => 'f', provolatile => 's', + proparallel => 'r', prorettype => 'record', proargtypes => 'text', proallargtypes => '{text,text,int8,int8,int8,int8,int8,int8,int8,int8,timestamptz}', proargmodes => '{i,o,o,o,o,o,o,o,o,o,o}', proargnames => '{slot_name,slot_name,spill_txns,spill_count,spill_bytes,stream_txns,stream_count,stream_bytes,total_txns,total_bytes,stats_reset}', From 73c61a50a1555007001d29844dcdb10b4f982a73 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Thu, 24 Feb 2022 19:01:54 -0800 Subject: [PATCH 052/108] vacuumlazy.c: Remove obsolete num_tuples field. Commit 49c9d9fc unified VACUUM VERBOSE and autovacuum logging. It neglected to remove an old vacrel field that was only used by the old VACUUM VERBOSE, so remove it now. The previous num_tuples approach doesn't seem to have any real advantage over the approach VACUUM VERBOSE takes now (also the approach used by the autovacuum logging code), which is to show new_rel_tuples. new_rel_tuples is the possibly-estimated total number of tuples left in the table, whereas num_tuples meant the number of tuples encountered during the VACUUM operation, after pruning, without regard for tuples from pages skipped via the visibility map. In passing, reorder a related vacrel field for consistency. --- src/backend/access/heap/vacuumlazy.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index f48e699b91..40101e0cb8 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -213,10 +213,9 @@ typedef struct LVRelState /* Counters that follow are only for scanned_pages */ int64 tuples_deleted; /* # deleted from table */ int64 lpdead_items; /* # deleted from indexes */ + int64 live_tuples; /* # live tuples remaining */ int64 recently_dead_tuples; /* # dead, but not yet removable */ int64 missed_dead_tuples; /* # removable, but not removed */ - int64 num_tuples; /* total number of nonremovable tuples */ - int64 live_tuples; /* live tuples (reltuples estimate) */ } LVRelState; /* @@ -816,10 +815,9 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers) vacrel->num_index_scans = 0; vacrel->tuples_deleted = 0; vacrel->lpdead_items = 0; + vacrel->live_tuples = 0; vacrel->recently_dead_tuples = 0; vacrel->missed_dead_tuples = 0; - vacrel->num_tuples = 0; - vacrel->live_tuples = 0; vistest = GlobalVisTestFor(vacrel->rel); @@ -1572,9 +1570,8 @@ lazy_scan_prune(LVRelState *vacrel, HTSV_Result res; int tuples_deleted, lpdead_items, - recently_dead_tuples, - num_tuples, - live_tuples; + live_tuples, + recently_dead_tuples; int nnewlpdead; int nfrozen; OffsetNumber deadoffsets[MaxHeapTuplesPerPage]; @@ -1589,9 +1586,8 @@ lazy_scan_prune(LVRelState *vacrel, /* Initialize (or reset) page-level counters */ tuples_deleted = 0; lpdead_items = 0; - recently_dead_tuples = 0; - num_tuples = 0; live_tuples = 0; + recently_dead_tuples = 0; /* * Prune all HOT-update chains in this page. @@ -1788,8 +1784,7 @@ lazy_scan_prune(LVRelState *vacrel, * Check tuple left behind after pruning to see if needs to be frozen * now. */ - num_tuples++; - prunestate->hastup = true; + prunestate->hastup = true; /* page won't be truncatable */ if (heap_prepare_freeze_tuple(tuple.t_data, vacrel->relfrozenxid, vacrel->relminmxid, @@ -1928,9 +1923,8 @@ lazy_scan_prune(LVRelState *vacrel, /* Finally, add page-local counts to whole-VACUUM counts */ vacrel->tuples_deleted += tuples_deleted; vacrel->lpdead_items += lpdead_items; - vacrel->recently_dead_tuples += recently_dead_tuples; - vacrel->num_tuples += num_tuples; vacrel->live_tuples += live_tuples; + vacrel->recently_dead_tuples += recently_dead_tuples; } /* @@ -1963,7 +1957,6 @@ lazy_scan_noprune(LVRelState *vacrel, OffsetNumber offnum, maxoff; int lpdead_items, - num_tuples, live_tuples, recently_dead_tuples, missed_dead_tuples; @@ -1976,7 +1969,6 @@ lazy_scan_noprune(LVRelState *vacrel, *recordfreespace = false; /* for now */ lpdead_items = 0; - num_tuples = 0; live_tuples = 0; recently_dead_tuples = 0; missed_dead_tuples = 0; @@ -2031,7 +2023,6 @@ lazy_scan_noprune(LVRelState *vacrel, vacrel->freeze_cutoffs_valid = false; } - num_tuples++; ItemPointerSet(&(tuple.t_self), blkno, offnum); tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); tuple.t_len = ItemIdGetLength(itemid); @@ -2096,7 +2087,6 @@ lazy_scan_noprune(LVRelState *vacrel, * forever, for vanishingly little benefit.) */ *hastup = true; - num_tuples += lpdead_items; missed_dead_tuples += lpdead_items; } @@ -2146,10 +2136,9 @@ lazy_scan_noprune(LVRelState *vacrel, /* * Finally, add relevant page-local counts to whole-VACUUM counts */ + vacrel->live_tuples += live_tuples; vacrel->recently_dead_tuples += recently_dead_tuples; vacrel->missed_dead_tuples += missed_dead_tuples; - vacrel->num_tuples += num_tuples; - vacrel->live_tuples += live_tuples; if (missed_dead_tuples > 0) vacrel->missed_dead_pages++; From cd83cb953606b94966981056e79dbb6c48751055 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Fri, 25 Feb 2022 10:30:05 -0800 Subject: [PATCH 053/108] pg_waldump: Fix error message for WAL files smaller than XLOG_BLCKSZ. When opening a WAL file smaller than XLOG_BLCKSZ (e.g. 0 bytes long) while determining the wal_segment_size, pg_waldump checked errno, despite errno not being set by the short read. Resulting in a bogus error message. Author: Kyotaro Horiguchi Discussion: https://postgr.es/m/20220214.181847.775024684568733277.horikyota.ntt@gmail.com Backpatch: 11-, the bug was introducedin fc49e24fa --- src/bin/pg_waldump/pg_waldump.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c index a6251e1a96..2340dc247b 100644 --- a/src/bin/pg_waldump/pg_waldump.c +++ b/src/bin/pg_waldump/pg_waldump.c @@ -222,15 +222,12 @@ search_directory(const char *directory, const char *fname) WalSegSz), fname, WalSegSz); } + else if (r < 0) + fatal_error("could not read file \"%s\": %m", + fname); else - { - if (errno != 0) - fatal_error("could not read file \"%s\": %m", - fname); - else - fatal_error("could not read file \"%s\": read %d of %d", - fname, r, XLOG_BLCKSZ); - } + fatal_error("could not read file \"%s\": read %d of %d", + fname, r, XLOG_BLCKSZ); close(fd); return true; } From 638300fef541fb9393caa1ee8821a639816301d1 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 25 Feb 2022 17:40:21 -0500 Subject: [PATCH 054/108] Disallow execution of SPI functions during plperl function compilation. Perl can be convinced to execute user-defined code during compilation of a plperl function (or at least a plperlu function). That's not such a big problem as long as the activity is confined within the Perl interpreter, and it's not clear we could do anything about that anyway. However, if such code tries to use plperl's SPI functions, we have a bigger problem. In the first place, those functions are likely to crash because current_call_data->prodesc isn't set up yet. In the second place, because it isn't set up, we lack critical info such as whether the function is supposed to be read-only. And in the third place, this path allows code execution during function validation, which is strongly discouraged because of the potential for security exploits. Hence, reject execution of the SPI functions until compilation is finished. While here, add check_spi_usage_allowed() calls to various functions that hadn't gotten the memo about checking that. I think that perhaps plperl_sv_to_literal may have been intentionally omitted on the grounds that it was safe at the time; but if so, the addition of transforms functionality changed that. The others are more recently added and seem to be flat-out oversights. Per report from Mark Murawski. Back-patch to all supported branches. Discussion: https://postgr.es/m/9acdf918-7fff-4f40-f750-2ffa84f083d2@intellasoft.net --- src/pl/plperl/plperl.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 81d9c46e00..81bb480bc2 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -264,6 +264,7 @@ static plperl_proc_desc *compile_plperl_function(Oid fn_oid, static SV *plperl_hash_from_tuple(HeapTuple tuple, TupleDesc tupdesc, bool include_generated); static SV *plperl_hash_from_datum(Datum attr); +static void check_spi_usage_allowed(void); static SV *plperl_ref_from_pg_array(Datum arg, Oid typid); static SV *split_array(plperl_array_info *info, int first, int last, int nest); static SV *make_array_ref(plperl_array_info *info, int first, int last); @@ -1429,13 +1430,15 @@ plperl_sv_to_datum(SV *sv, Oid typid, int32 typmod, char * plperl_sv_to_literal(SV *sv, char *fqtypename) { - Datum str = CStringGetDatum(fqtypename); - Oid typid = DirectFunctionCall1(regtypein, str); + Oid typid; Oid typoutput; Datum datum; bool typisvarlena, isnull; + check_spi_usage_allowed(); + + typid = DirectFunctionCall1(regtypein, CStringGetDatum(fqtypename)); if (!OidIsValid(typid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -3097,6 +3100,21 @@ check_spi_usage_allowed(void) /* simple croak as we don't want to involve PostgreSQL code */ croak("SPI functions can not be used in END blocks"); } + + /* + * Disallow SPI usage if we're not executing a fully-compiled plperl + * function. It might seem impossible to get here in that case, but there + * are cases where Perl will try to execute code during compilation. If + * we proceed we are likely to crash trying to dereference the prodesc + * pointer. Working around that might be possible, but it seems unwise + * because it'd allow code execution to happen while validating a + * function, which is undesirable. + */ + if (current_call_data == NULL || current_call_data->prodesc == NULL) + { + /* simple croak as we don't want to involve PostgreSQL code */ + croak("SPI functions can not be used during function compilation"); + } } @@ -3217,6 +3235,8 @@ plperl_return_next(SV *sv) { MemoryContext oldcontext = CurrentMemoryContext; + check_spi_usage_allowed(); + PG_TRY(); { plperl_return_next_internal(sv); @@ -3961,6 +3981,8 @@ plperl_spi_commit(void) { MemoryContext oldcontext = CurrentMemoryContext; + check_spi_usage_allowed(); + PG_TRY(); { SPI_commit(); @@ -3986,6 +4008,8 @@ plperl_spi_rollback(void) { MemoryContext oldcontext = CurrentMemoryContext; + check_spi_usage_allowed(); + PG_TRY(); { SPI_rollback(); @@ -4023,6 +4047,11 @@ plperl_util_elog(int level, SV *msg) MemoryContext oldcontext = CurrentMemoryContext; char *volatile cmsg = NULL; + /* + * We intentionally omit check_spi_usage_allowed() here, as this seems + * safe to allow even in the contexts that that function rejects. + */ + PG_TRY(); { cmsg = sv2cstr(msg); From fe0972ee5e6f8a663c5cf3f24ef98987c503da95 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Fri, 25 Feb 2022 16:58:48 -0800 Subject: [PATCH 055/108] Add further debug info to help debug 019_replslot_limit.pl failures. See also afdeff10526. Failures after that commit provided a few more hints, but not yet enough to understand what's going on. In 019_replslot_limit.pl shut down nodes with fast instead of immediate mode if we observe the failure mode. That should tell us whether the failures we're observing are just a timing issue under high load. PGCTLTIMEOUT should prevent buildfarm animals from hanging endlessly. Also adds a bit more logging to replication slot drop and ShutdownPostgres(). Discussion: https://postgr.es/m/20220225192941.hqnvefgdzaro6gzg@alap3.anarazel.de --- src/backend/replication/slot.c | 13 +++++++++++++ src/backend/storage/lmgr/lwlock.c | 7 +++++++ src/backend/utils/init/postinit.c | 17 +++++++++++++++++ src/include/storage/lwlock.h | 1 + src/test/recovery/t/019_replslot_limit.pl | 18 +++++++++++++++++- 5 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 3d39fddaae..f238a392ae 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -569,6 +569,10 @@ ReplicationSlotCleanup(void) if (!s->in_use) continue; + /* unlocked read of active_pid is ok for debugging purposes */ + elog(DEBUG3, "temporary replication slot cleanup: %d in use, active_pid: %d", + i, s->active_pid); + SpinLockAcquire(&s->mutex); if (s->active_pid == MyProcPid) { @@ -629,6 +633,9 @@ ReplicationSlotDropPtr(ReplicationSlot *slot) char path[MAXPGPATH]; char tmppath[MAXPGPATH]; + /* temp debugging aid to analyze 019_replslot_limit failures */ + elog(DEBUG3, "replication slot drop: %s: begin", NameStr(slot->data.name)); + /* * If some other backend ran this code concurrently with us, we might try * to delete a slot with a certain name while someone else was trying to @@ -679,6 +686,9 @@ ReplicationSlotDropPtr(ReplicationSlot *slot) path, tmppath))); } + elog(DEBUG3, "replication slot drop: %s: removed on-disk", + NameStr(slot->data.name)); + /* * The slot is definitely gone. Lock out concurrent scans of the array * long enough to kill it. It's OK to clear the active PID here without @@ -734,6 +744,9 @@ ReplicationSlotDropPtr(ReplicationSlot *slot) * a slot while we're still cleaning up the detritus of the old one. */ LWLockRelease(ReplicationSlotAllocationLock); + + elog(DEBUG3, "replication slot drop: %s: done", + NameStr(slot->data.name)); } /* diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 7b0dea4abe..8f7f1b2f7c 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -1945,3 +1945,10 @@ LWLockHeldByMeInMode(LWLock *l, LWLockMode mode) } return false; } + +/* temp debugging aid to analyze 019_replslot_limit failures */ +int +LWLockHeldCount(void) +{ + return num_held_lwlocks; +} diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index a29fa0b3e6..86d193c89f 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -1262,6 +1262,23 @@ ShutdownPostgres(int code, Datum arg) * them explicitly. */ LockReleaseAll(USER_LOCKMETHOD, true); + + /* + * temp debugging aid to analyze 019_replslot_limit failures + * + * If an error were thrown outside of a transaction nothing up to now + * would have released lwlocks. We probably will add an + * LWLockReleaseAll(). But for now make it easier to understand such cases + * by warning if any lwlocks are held. + */ +#ifdef USE_ASSERT_CHECKING + { + int held_lwlocks = LWLockHeldCount(); + if (held_lwlocks) + elog(WARNING, "holding %d lwlocks at the end of ShutdownPostgres()", + held_lwlocks); + } +#endif } diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index 124977cf7e..c3d5889d7b 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -121,6 +121,7 @@ extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val); extern void LWLockReleaseAll(void); extern bool LWLockHeldByMe(LWLock *lock); extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode); +extern int LWLockHeldCount(void); extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval); extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value); diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index 0c9da9bf27..9bb71b62c0 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -335,7 +335,23 @@ $node_primary3->wait_for_catchup($node_standby3); my $senderpid = $node_primary3->safe_psql('postgres', "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'"); -like($senderpid, qr/^[0-9]+$/, "have walsender pid $senderpid"); + +# We've seen occasional cases where multiple walsender pids are active. An +# immediate shutdown may hide evidence of a locking bug. So if multiple +# walsenders are observed, shut down in fast mode, and collect some more +# information. +if (not like($senderpid, qr/^[0-9]+$/, "have walsender pid $senderpid")) +{ + my ($stdout, $stderr); + $node_primary3->psql('postgres', + "\\a\\t\nSELECT * FROM pg_stat_activity", + stdout => \$stdout, stderr => \$stderr); + diag $stdout, $stderr; + $node_primary3->stop('fast'); + $node_standby3->stop('fast'); + die "could not determine walsender pid, can't continue"; +} + my $receiverpid = $node_standby3->safe_psql('postgres', "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walreceiver'"); like($receiverpid, qr/^[0-9]+$/, "have walreceiver pid $receiverpid"); From a89850a57e0557bd3faab32398eb2d9536f6e2a4 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Sat, 26 Feb 2022 10:38:37 +0530 Subject: [PATCH 056/108] Fix typo in logicalfuncs.c. Author: Bharath Rupireddy Discussion: https://postgr.es/m/CALj2ACX1mVtw8LWEnZgnpPdk2bPFR1xX2ZN+8GfXCffyip_9=Q@mail.gmail.com --- src/backend/replication/logical/logicalfuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c index 3609fa7d5b..3bd770a3ba 100644 --- a/src/backend/replication/logical/logicalfuncs.c +++ b/src/backend/replication/logical/logicalfuncs.c @@ -40,7 +40,7 @@ #include "utils/regproc.h" #include "utils/resowner.h" -/* private date for writing out data */ +/* Private data for writing out data */ typedef struct DecodingOutputState { Tuplestorestate *tupstore; From d33aeefd9b7c8c76f584432717dc944505565e52 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sat, 26 Feb 2022 16:06:24 -0800 Subject: [PATCH 057/108] Fix warning on mingw due to pid_t width, introduced in fe0972ee5e6. --- src/backend/replication/slot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index f238a392ae..caa6b29756 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -571,7 +571,7 @@ ReplicationSlotCleanup(void) /* unlocked read of active_pid is ok for debugging purposes */ elog(DEBUG3, "temporary replication slot cleanup: %d in use, active_pid: %d", - i, s->active_pid); + i, (int) s->active_pid); SpinLockAcquire(&s->mutex); if (s->active_pid == MyProcPid) From 1155d8b8d52ed8705fd8386eaa64fb05c04170c6 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sat, 26 Feb 2022 16:43:54 -0800 Subject: [PATCH 058/108] Fix use of wrong variable in pg_receivewal's get_destination_dir(). The global variable wrongly used is always the one passed to get_destination_dir(), so there currently are no negative consequences. Author: Bharath Rupireddy Discussion: https://postgr.es/m/CALj2ACUT0C2LQwhyLXTQdj8T9SxZa5j7cmu-UOz0cZ8_D5edjg@mail.gmail.com --- src/bin/pg_basebackup/pg_receivewal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c index ccb215c398..ce661a9ce4 100644 --- a/src/bin/pg_basebackup/pg_receivewal.c +++ b/src/bin/pg_basebackup/pg_receivewal.c @@ -240,7 +240,7 @@ get_destination_dir(char *dest_folder) dir = opendir(dest_folder); if (dir == NULL) { - pg_log_error("could not open directory \"%s\": %m", basedir); + pg_log_error("could not open directory \"%s\": %m", dest_folder); exit(1); } From ac25173cdbc40b310a7e72d9557c45a699f1f7b3 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sat, 26 Feb 2022 16:51:47 -0800 Subject: [PATCH 059/108] Convert src/interfaces/libpq/test to a tap test. The old form of the test needed a bunch of custom infrastructure. These days tap tests provide the necessary infrastructure to do better. We discussed whether to move this test to src/test/modules, alongside libpq_pipeline, but concluded that the opposite direction would be better. libpq_pipeline will be moved at a later date, once the buildfarm and msvc build infrastructure is ready for it. The invocation of the tap test will be added in the next commit. It involves just enough buildsystem changes to be worth commiting separately. Can't happen the other way round because prove errors out when invoked without tests. Discussion: https://postgr.es/m/20220223203031.ezrd73ohvjgfksow@alap3.anarazel.de --- src/interfaces/libpq/t/001_uri.pl | 244 +++++++++++++++++++++++++ src/interfaces/libpq/test/.gitignore | 2 - src/interfaces/libpq/test/Makefile | 7 +- src/interfaces/libpq/test/README | 7 - src/interfaces/libpq/test/expected.out | 171 ----------------- src/interfaces/libpq/test/regress.in | 57 ------ src/interfaces/libpq/test/regress.pl | 65 ------- 7 files changed, 246 insertions(+), 307 deletions(-) create mode 100644 src/interfaces/libpq/t/001_uri.pl delete mode 100644 src/interfaces/libpq/test/README delete mode 100644 src/interfaces/libpq/test/expected.out delete mode 100644 src/interfaces/libpq/test/regress.in delete mode 100644 src/interfaces/libpq/test/regress.pl diff --git a/src/interfaces/libpq/t/001_uri.pl b/src/interfaces/libpq/t/001_uri.pl new file mode 100644 index 0000000000..90f370f8fd --- /dev/null +++ b/src/interfaces/libpq/t/001_uri.pl @@ -0,0 +1,244 @@ +# Copyright (c) 2021-2022, PostgreSQL Global Development Group +use strict; +use warnings; + +use PostgreSQL::Test::Utils; +use Test::More; +use IPC::Run; + + +# List of URIs tests. For each test the first element is the input string, the +# second the expected stdout and the third the expected stderr. +my @tests = ( + [ + q{postgresql://uri-user:secret@host:12345/db}, + q{user='uri-user' password='secret' dbname='db' host='host' port='12345' (inet)}, + q{}, + ], + [ + q{postgresql://uri-user@host:12345/db}, + q{user='uri-user' dbname='db' host='host' port='12345' (inet)}, q{}, + ], + [ + q{postgresql://uri-user@host/db}, + q{user='uri-user' dbname='db' host='host' (inet)}, q{}, + ], + [ + q{postgresql://host:12345/db}, + q{dbname='db' host='host' port='12345' (inet)}, q{}, + ], + [ q{postgresql://host/db}, q{dbname='db' host='host' (inet)}, q{}, ], + [ + q{postgresql://uri-user@host:12345/}, + q{user='uri-user' host='host' port='12345' (inet)}, + q{}, + ], + [ + q{postgresql://uri-user@host/}, + q{user='uri-user' host='host' (inet)}, + q{}, + ], + [ q{postgresql://uri-user@}, q{user='uri-user' (local)}, q{}, ], + [ q{postgresql://host:12345/}, q{host='host' port='12345' (inet)}, q{}, ], + [ q{postgresql://host:12345}, q{host='host' port='12345' (inet)}, q{}, ], + [ q{postgresql://host/db}, q{dbname='db' host='host' (inet)}, q{}, ], + [ q{postgresql://host/}, q{host='host' (inet)}, q{}, ], + [ q{postgresql://host}, q{host='host' (inet)}, q{}, ], + [ q{postgresql://}, q{(local)}, q{}, ], + [ + q{postgresql://?hostaddr=127.0.0.1}, q{hostaddr='127.0.0.1' (inet)}, + q{}, + ], + [ + q{postgresql://example.com?hostaddr=63.1.2.4}, + q{host='example.com' hostaddr='63.1.2.4' (inet)}, + q{}, + ], + [ q{postgresql://%68ost/}, q{host='host' (inet)}, q{}, ], + [ + q{postgresql://host/db?user=uri-user}, + q{user='uri-user' dbname='db' host='host' (inet)}, + q{}, + ], + [ + q{postgresql://host/db?user=uri-user&port=12345}, + q{user='uri-user' dbname='db' host='host' port='12345' (inet)}, + q{}, + ], + [ + q{postgresql://host/db?u%73er=someotheruser&port=12345}, + q{user='someotheruser' dbname='db' host='host' port='12345' (inet)}, + q{}, + ], + [ + q{postgresql://host/db?u%7aer=someotheruser&port=12345}, q{}, + q{uri-regress: invalid URI query parameter: "uzer"}, + ], + [ + q{postgresql://host:12345?user=uri-user}, + q{user='uri-user' host='host' port='12345' (inet)}, + q{}, + ], + [ + q{postgresql://host?user=uri-user}, + q{user='uri-user' host='host' (inet)}, + q{}, + ], + [ q{postgresql://host?}, q{host='host' (inet)}, q{}, ], + [ + q{postgresql://[::1]:12345/db}, + q{dbname='db' host='::1' port='12345' (inet)}, + q{}, + ], + [ q{postgresql://[::1]/db}, q{dbname='db' host='::1' (inet)}, q{}, ], + [ + q{postgresql://[2001:db8::1234]/}, q{host='2001:db8::1234' (inet)}, + q{}, + ], + [ + q{postgresql://[200z:db8::1234]/}, q{host='200z:db8::1234' (inet)}, + q{}, + ], + [ q{postgresql://[::1]}, q{host='::1' (inet)}, q{}, ], + [ q{postgres://}, q{(local)}, q{}, ], + [ q{postgres:///}, q{(local)}, q{}, ], + [ q{postgres:///db}, q{dbname='db' (local)}, q{}, ], + [ + q{postgres://uri-user@/db}, q{user='uri-user' dbname='db' (local)}, + q{}, + ], + [ + q{postgres://?host=/path/to/socket/dir}, + q{host='/path/to/socket/dir' (local)}, + q{}, + ], + [ + q{postgresql://host?uzer=}, q{}, + q{uri-regress: invalid URI query parameter: "uzer"}, + ], + [ + q{postgre://}, + q{}, + q{uri-regress: missing "=" after "postgre://" in connection info string}, + ], + [ + q{postgres://[::1}, + q{}, + q{uri-regress: end of string reached when looking for matching "]" in IPv6 host address in URI: "postgres://[::1"}, + ], + [ + q{postgres://[]}, + q{}, + q{uri-regress: IPv6 host address may not be empty in URI: "postgres://[]"}, + ], + [ + q{postgres://[::1]z}, + q{}, + q{uri-regress: unexpected character "z" at position 17 in URI (expected ":" or "/"): "postgres://[::1]z"}, + ], + [ + q{postgresql://host?zzz}, + q{}, + q{uri-regress: missing key/value separator "=" in URI query parameter: "zzz"}, + ], + [ + q{postgresql://host?value1&value2}, + q{}, + q{uri-regress: missing key/value separator "=" in URI query parameter: "value1"}, + ], + [ + q{postgresql://host?key=key=value}, + q{}, + q{uri-regress: extra key/value separator "=" in URI query parameter: "key"}, + ], + [ + q{postgres://host?dbname=%XXfoo}, q{}, + q{uri-regress: invalid percent-encoded token: "%XXfoo"}, + ], + [ + q{postgresql://a%00b}, + q{}, + q{uri-regress: forbidden value %00 in percent-encoded value: "a%00b"}, + ], + [ + q{postgresql://%zz}, q{}, + q{uri-regress: invalid percent-encoded token: "%zz"}, + ], + [ + q{postgresql://%1}, q{}, + q{uri-regress: invalid percent-encoded token: "%1"}, + ], + [ + q{postgresql://%}, q{}, + q{uri-regress: invalid percent-encoded token: "%"}, + ], + [ q{postgres://@host}, q{host='host' (inet)}, q{}, ], + [ q{postgres://host:/}, q{host='host' (inet)}, q{}, ], + [ q{postgres://:12345/}, q{port='12345' (local)}, q{}, ], + [ + q{postgres://otheruser@?host=/no/such/directory}, + q{user='otheruser' host='/no/such/directory' (local)}, + q{}, + ], + [ + q{postgres://otheruser@/?host=/no/such/directory}, + q{user='otheruser' host='/no/such/directory' (local)}, + q{}, + ], + [ + q{postgres://otheruser@:12345?host=/no/such/socket/path}, + q{user='otheruser' host='/no/such/socket/path' port='12345' (local)}, + q{}, + ], + [ + q{postgres://otheruser@:12345/db?host=/path/to/socket}, + q{user='otheruser' dbname='db' host='/path/to/socket' port='12345' (local)}, + q{}, + ], + [ + q{postgres://:12345/db?host=/path/to/socket}, + q{dbname='db' host='/path/to/socket' port='12345' (local)}, + q{}, + ], + [ + q{postgres://:12345?host=/path/to/socket}, + q{host='/path/to/socket' port='12345' (local)}, + q{}, + ], + [ + q{postgres://%2Fvar%2Flib%2Fpostgresql/dbname}, + q{dbname='dbname' host='/var/lib/postgresql' (local)}, + q{}, + ]); + +# test to run for each of the above test definitions +sub test_uri +{ + local $Test::Builder::Level = $Test::Builder::Level + 1; + + my $uri; + my %expect; + my %result; + + ($uri, $expect{stdout}, $expect{stderr}) = @$_; + + $expect{'exit'} = $expect{stderr} eq ''; + + my $cmd = [ 'uri-regress', $uri ]; + $result{exit} = IPC::Run::run $cmd, '>', \$result{stdout}, '2>', + \$result{stderr}; + + chomp($result{stdout}); + chomp($result{stderr}); + + # use is_deeply so there's one test result for each test above, without + # loosing the information whether stdout/stderr mismatched. + is_deeply(\%result, \%expect, $uri); +} + +foreach (@tests) +{ + test_uri($_); +} + +done_testing(); diff --git a/src/interfaces/libpq/test/.gitignore b/src/interfaces/libpq/test/.gitignore index 5387b3b6d9..5e803d8816 100644 --- a/src/interfaces/libpq/test/.gitignore +++ b/src/interfaces/libpq/test/.gitignore @@ -1,3 +1 @@ /uri-regress -/regress.diff -/regress.out diff --git a/src/interfaces/libpq/test/Makefile b/src/interfaces/libpq/test/Makefile index 4832fab9d2..5421215906 100644 --- a/src/interfaces/libpq/test/Makefile +++ b/src/interfaces/libpq/test/Makefile @@ -1,3 +1,5 @@ +# src/interfaces/libpq/test/Makefile + subdir = src/interfaces/libpq/test top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global @@ -13,10 +15,5 @@ PROGS = uri-regress all: $(PROGS) -installcheck: all - SRCDIR='$(top_srcdir)' SUBDIR='$(subdir)' \ - $(PERL) $(top_srcdir)/$(subdir)/regress.pl - clean distclean maintainer-clean: rm -f $(PROGS) *.o - rm -f regress.out regress.diff diff --git a/src/interfaces/libpq/test/README b/src/interfaces/libpq/test/README deleted file mode 100644 index a05eb6bb3b..0000000000 --- a/src/interfaces/libpq/test/README +++ /dev/null @@ -1,7 +0,0 @@ -This is a testsuite for testing libpq URI connection string syntax. - -To run the suite, use 'make installcheck' command. It works by -running 'regress.pl' from this directory with appropriate environment -set up, which in turn feeds up lines from 'regress.in' to -'uri-regress' test program and compares the output against the correct -one in 'expected.out' file. diff --git a/src/interfaces/libpq/test/expected.out b/src/interfaces/libpq/test/expected.out deleted file mode 100644 index d375e82b4a..0000000000 --- a/src/interfaces/libpq/test/expected.out +++ /dev/null @@ -1,171 +0,0 @@ -trying postgresql://uri-user:secret@host:12345/db -user='uri-user' password='secret' dbname='db' host='host' port='12345' (inet) - -trying postgresql://uri-user@host:12345/db -user='uri-user' dbname='db' host='host' port='12345' (inet) - -trying postgresql://uri-user@host/db -user='uri-user' dbname='db' host='host' (inet) - -trying postgresql://host:12345/db -dbname='db' host='host' port='12345' (inet) - -trying postgresql://host/db -dbname='db' host='host' (inet) - -trying postgresql://uri-user@host:12345/ -user='uri-user' host='host' port='12345' (inet) - -trying postgresql://uri-user@host/ -user='uri-user' host='host' (inet) - -trying postgresql://uri-user@ -user='uri-user' (local) - -trying postgresql://host:12345/ -host='host' port='12345' (inet) - -trying postgresql://host:12345 -host='host' port='12345' (inet) - -trying postgresql://host/db -dbname='db' host='host' (inet) - -trying postgresql://host/ -host='host' (inet) - -trying postgresql://host -host='host' (inet) - -trying postgresql:// -(local) - -trying postgresql://?hostaddr=127.0.0.1 -hostaddr='127.0.0.1' (inet) - -trying postgresql://example.com?hostaddr=63.1.2.4 -host='example.com' hostaddr='63.1.2.4' (inet) - -trying postgresql://%68ost/ -host='host' (inet) - -trying postgresql://host/db?user=uri-user -user='uri-user' dbname='db' host='host' (inet) - -trying postgresql://host/db?user=uri-user&port=12345 -user='uri-user' dbname='db' host='host' port='12345' (inet) - -trying postgresql://host/db?u%73er=someotheruser&port=12345 -user='someotheruser' dbname='db' host='host' port='12345' (inet) - -trying postgresql://host/db?u%7aer=someotheruser&port=12345 -uri-regress: invalid URI query parameter: "uzer" - -trying postgresql://host:12345?user=uri-user -user='uri-user' host='host' port='12345' (inet) - -trying postgresql://host?user=uri-user -user='uri-user' host='host' (inet) - -trying postgresql://host? -host='host' (inet) - -trying postgresql://[::1]:12345/db -dbname='db' host='::1' port='12345' (inet) - -trying postgresql://[::1]/db -dbname='db' host='::1' (inet) - -trying postgresql://[2001:db8::1234]/ -host='2001:db8::1234' (inet) - -trying postgresql://[200z:db8::1234]/ -host='200z:db8::1234' (inet) - -trying postgresql://[::1] -host='::1' (inet) - -trying postgres:// -(local) - -trying postgres:/// -(local) - -trying postgres:///db -dbname='db' (local) - -trying postgres://uri-user@/db -user='uri-user' dbname='db' (local) - -trying postgres://?host=/path/to/socket/dir -host='/path/to/socket/dir' (local) - -trying postgresql://host?uzer= -uri-regress: invalid URI query parameter: "uzer" - -trying postgre:// -uri-regress: missing "=" after "postgre://" in connection info string - -trying postgres://[::1 -uri-regress: end of string reached when looking for matching "]" in IPv6 host address in URI: "postgres://[::1" - -trying postgres://[] -uri-regress: IPv6 host address may not be empty in URI: "postgres://[]" - -trying postgres://[::1]z -uri-regress: unexpected character "z" at position 17 in URI (expected ":" or "/"): "postgres://[::1]z" - -trying postgresql://host?zzz -uri-regress: missing key/value separator "=" in URI query parameter: "zzz" - -trying postgresql://host?value1&value2 -uri-regress: missing key/value separator "=" in URI query parameter: "value1" - -trying postgresql://host?key=key=value -uri-regress: extra key/value separator "=" in URI query parameter: "key" - -trying postgres://host?dbname=%XXfoo -uri-regress: invalid percent-encoded token: "%XXfoo" - -trying postgresql://a%00b -uri-regress: forbidden value %00 in percent-encoded value: "a%00b" - -trying postgresql://%zz -uri-regress: invalid percent-encoded token: "%zz" - -trying postgresql://%1 -uri-regress: invalid percent-encoded token: "%1" - -trying postgresql://% -uri-regress: invalid percent-encoded token: "%" - -trying postgres://@host -host='host' (inet) - -trying postgres://host:/ -host='host' (inet) - -trying postgres://:12345/ -port='12345' (local) - -trying postgres://otheruser@?host=/no/such/directory -user='otheruser' host='/no/such/directory' (local) - -trying postgres://otheruser@/?host=/no/such/directory -user='otheruser' host='/no/such/directory' (local) - -trying postgres://otheruser@:12345?host=/no/such/socket/path -user='otheruser' host='/no/such/socket/path' port='12345' (local) - -trying postgres://otheruser@:12345/db?host=/path/to/socket -user='otheruser' dbname='db' host='/path/to/socket' port='12345' (local) - -trying postgres://:12345/db?host=/path/to/socket -dbname='db' host='/path/to/socket' port='12345' (local) - -trying postgres://:12345?host=/path/to/socket -host='/path/to/socket' port='12345' (local) - -trying postgres://%2Fvar%2Flib%2Fpostgresql/dbname -dbname='dbname' host='/var/lib/postgresql' (local) - diff --git a/src/interfaces/libpq/test/regress.in b/src/interfaces/libpq/test/regress.in deleted file mode 100644 index de034f3914..0000000000 --- a/src/interfaces/libpq/test/regress.in +++ /dev/null @@ -1,57 +0,0 @@ -postgresql://uri-user:secret@host:12345/db -postgresql://uri-user@host:12345/db -postgresql://uri-user@host/db -postgresql://host:12345/db -postgresql://host/db -postgresql://uri-user@host:12345/ -postgresql://uri-user@host/ -postgresql://uri-user@ -postgresql://host:12345/ -postgresql://host:12345 -postgresql://host/db -postgresql://host/ -postgresql://host -postgresql:// -postgresql://?hostaddr=127.0.0.1 -postgresql://example.com?hostaddr=63.1.2.4 -postgresql://%68ost/ -postgresql://host/db?user=uri-user -postgresql://host/db?user=uri-user&port=12345 -postgresql://host/db?u%73er=someotheruser&port=12345 -postgresql://host/db?u%7aer=someotheruser&port=12345 -postgresql://host:12345?user=uri-user -postgresql://host?user=uri-user -postgresql://host? -postgresql://[::1]:12345/db -postgresql://[::1]/db -postgresql://[2001:db8::1234]/ -postgresql://[200z:db8::1234]/ -postgresql://[::1] -postgres:// -postgres:/// -postgres:///db -postgres://uri-user@/db -postgres://?host=/path/to/socket/dir -postgresql://host?uzer= -postgre:// -postgres://[::1 -postgres://[] -postgres://[::1]z -postgresql://host?zzz -postgresql://host?value1&value2 -postgresql://host?key=key=value -postgres://host?dbname=%XXfoo -postgresql://a%00b -postgresql://%zz -postgresql://%1 -postgresql://% -postgres://@host -postgres://host:/ -postgres://:12345/ -postgres://otheruser@?host=/no/such/directory -postgres://otheruser@/?host=/no/such/directory -postgres://otheruser@:12345?host=/no/such/socket/path -postgres://otheruser@:12345/db?host=/path/to/socket -postgres://:12345/db?host=/path/to/socket -postgres://:12345?host=/path/to/socket -postgres://%2Fvar%2Flib%2Fpostgresql/dbname diff --git a/src/interfaces/libpq/test/regress.pl b/src/interfaces/libpq/test/regress.pl deleted file mode 100644 index 70691dabe6..0000000000 --- a/src/interfaces/libpq/test/regress.pl +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/perl - -# Copyright (c) 2021-2022, PostgreSQL Global Development Group - -use strict; -use warnings; - -# use of SRCDIR/SUBDIR is required for supporting VPath builds -my $srcdir = $ENV{'SRCDIR'} or die 'SRCDIR environment variable is not set'; -my $subdir = $ENV{'SUBDIR'} or die 'SUBDIR environment variable is not set'; - -my $regress_in = "$srcdir/$subdir/regress.in"; -my $expected_out = "$srcdir/$subdir/expected.out"; - -# the output file should land in the build_dir of VPath, or just in -# the current dir, if VPath isn't used -my $regress_out = "regress.out"; - -# open input file first, so possible error isn't sent to redirected STDERR -open(my $regress_in_fh, "<", $regress_in) - or die "can't open $regress_in for reading: $!"; - -# save STDOUT/ERR and redirect both to regress.out -open(my $oldout_fh, ">&", \*STDOUT) or die "can't dup STDOUT: $!"; -open(my $olderr_fh, ">&", \*STDERR) or die "can't dup STDERR: $!"; - -open(STDOUT, ">", $regress_out) - or die "can't open $regress_out for writing: $!"; -open(STDERR, ">&", \*STDOUT) or die "can't dup STDOUT: $!"; - -# read lines from regress.in and run uri-regress on them -while (<$regress_in_fh>) -{ - chomp; - print "trying $_\n"; - system("./uri-regress \"$_\""); - print "\n"; -} - -# restore STDOUT/ERR so we can print the outcome to the user -open(STDERR, ">&", $olderr_fh) - or die; # can't complain as STDERR is still duped -open(STDOUT, ">&", $oldout_fh) or die "can't restore STDOUT: $!"; - -# just in case -close $regress_in_fh; - -my $diff_status = system( - "diff -c \"$srcdir/$subdir/expected.out\" regress.out >regress.diff"); - -print "=" x 70, "\n"; -if ($diff_status == 0) -{ - print "All tests passed\n"; - exit 0; -} -else -{ - print < Date: Sat, 26 Feb 2022 16:51:47 -0800 Subject: [PATCH 060/108] Run tap tests in src/interfaces/libpq. To be able to run binaries in the test/ directory, prove_[install]check need to be executable in a single shell invocation, so that test/ can be added to PATH. Discussion: https://postgr.es/m/20220223203031.ezrd73ohvjgfksow@alap3.anarazel.de --- src/Makefile.global.in | 12 ++++++------ src/interfaces/libpq/.gitignore | 1 + src/interfaces/libpq/Makefile | 11 +++++++++-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/Makefile.global.in b/src/Makefile.global.in index c980444233..bbdc1c4bda 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -448,8 +448,8 @@ ifeq ($(enable_tap_tests),yes) ifndef PGXS define prove_installcheck -rm -rf '$(CURDIR)'/tmp_check -$(MKDIR_P) '$(CURDIR)'/tmp_check +rm -rf '$(CURDIR)'/tmp_check && \ +$(MKDIR_P) '$(CURDIR)'/tmp_check && \ cd $(srcdir) && \ TESTDIR='$(CURDIR)' PATH="$(bindir):$(CURDIR):$$PATH" \ PGPORT='6$(DEF_PGPORT)' top_builddir='$(CURDIR)/$(top_builddir)' \ @@ -458,8 +458,8 @@ cd $(srcdir) && \ endef else # PGXS case define prove_installcheck -rm -rf '$(CURDIR)'/tmp_check -$(MKDIR_P) '$(CURDIR)'/tmp_check +rm -rf '$(CURDIR)'/tmp_check && \ +$(MKDIR_P) '$(CURDIR)'/tmp_check && \ cd $(srcdir) && \ TESTDIR='$(CURDIR)' PATH="$(bindir):$(CURDIR):$$PATH" \ PGPORT='6$(DEF_PGPORT)' top_builddir='$(top_builddir)' \ @@ -469,8 +469,8 @@ endef endif # PGXS define prove_check -rm -rf '$(CURDIR)'/tmp_check -$(MKDIR_P) '$(CURDIR)'/tmp_check +rm -rf '$(CURDIR)'/tmp_check && \ +$(MKDIR_P) '$(CURDIR)'/tmp_check && \ cd $(srcdir) && \ TESTDIR='$(CURDIR)' $(with_temp_install) PGPORT='6$(DEF_PGPORT)' \ PG_REGRESS='$(CURDIR)/$(top_builddir)/src/test/regress/pg_regress' \ diff --git a/src/interfaces/libpq/.gitignore b/src/interfaces/libpq/.gitignore index 7478dc344a..829d683ed2 100644 --- a/src/interfaces/libpq/.gitignore +++ b/src/interfaces/libpq/.gitignore @@ -1,2 +1,3 @@ /exports.list /libpq-refs-stamp +/tmp_check/ diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 844c95d47d..3c53393fa4 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -137,8 +137,14 @@ install: all installdirs install-lib $(INSTALL_DATA) $(srcdir)/pqexpbuffer.h '$(DESTDIR)$(includedir_internal)' $(INSTALL_DATA) $(srcdir)/pg_service.conf.sample '$(DESTDIR)$(datadir)/pg_service.conf.sample' -installcheck: - $(MAKE) -C test $@ +test-build: + $(MAKE) -C test all + +check: test-build all + PATH="$(CURDIR)/test:$$PATH" && $(prove_check) + +installcheck: test-build all + PATH="$(CURDIR)/test:$$PATH" && $(prove_installcheck) installdirs: installdirs-lib $(MKDIR_P) '$(DESTDIR)$(includedir)' '$(DESTDIR)$(includedir_internal)' '$(DESTDIR)$(datadir)' @@ -153,6 +159,7 @@ uninstall: uninstall-lib clean distclean: clean-lib $(MAKE) -C test $@ + rm -rf tmp_check rm -f $(OBJS) pthread.h libpq-refs-stamp # Might be left over from a Win32 client-only build rm -f pg_config_paths.h From e3d41d08a17549fdc60a8b9450c0511c11d666d7 Mon Sep 17 00:00:00 2001 From: Dean Rasheed Date: Sun, 27 Feb 2022 10:15:46 +0000 Subject: [PATCH 061/108] Apply auto-vectorization to the inner loop of div_var_fast(). This loop is basically the same as the inner loop of mul_var(), which was auto-vectorized in commit 8870917623, but the compiler will only consider auto-vectorizing the div_var_fast() loop if the assignment target div[qi + i] is replaced by div_qi[i], where div_qi = &div[qi]. Additionally, since the compiler doesn't know that qdigit is guaranteed to fit in a 16-bit NumericDigit, cast it to NumericDigit before multiplying to make the resulting auto-vectorized code more efficient (avoiding unnecessary multiplication of the high 16 bits). While at it, per suggestion from Tom Lane, change var1digit in mul_var() to be a NumericDigit rather than an int for the same reason. This actually makes no difference with modern gcc, but it might help other compilers generate more efficient assembly. Dean Rasheed, reviewed by Tom Lane. Discussion: https://postgr.es/m/CAEZATCVwsBi-ND-t82Cuuh1=8ee6jdOpzsmGN+CUZB6yjLg9jw@mail.gmail.com --- src/backend/utils/adt/numeric.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 3208789f75..effc4b886c 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -8323,7 +8323,7 @@ mul_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result, */ for (i1 = Min(var1ndigits - 1, res_ndigits - 3); i1 >= 0; i1--) { - int var1digit = var1digits[i1]; + NumericDigit var1digit = var1digits[i1]; if (var1digit == 0) continue; @@ -8908,13 +8908,22 @@ div_var_fast(const NumericVar *var1, const NumericVar *var2, * which would make the new value simply div[qi] mod vardigits[0]. * The lower-order terms in qdigit can change this result by not * more than about twice INT_MAX/NBASE, so overflow is impossible. + * + * This inner loop is the performance bottleneck for division, so + * code it in the same way as the inner loop of mul_var() so that + * it can be auto-vectorized. We cast qdigit to NumericDigit + * before multiplying to allow the compiler to generate more + * efficient code (using 16-bit multiplication), which is safe + * since we know that the quotient digit is off by at most one, so + * there is no overflow risk. */ if (qdigit != 0) { int istop = Min(var2ndigits, div_ndigits - qi + 1); + int *div_qi = &div[qi]; for (i = 0; i < istop; i++) - div[qi + i] -= qdigit * var2digits[i]; + div_qi[i] -= ((NumericDigit) qdigit) * var2digits[i]; } } From d996d648f333b04ae3da3c5853120f6f37601fb2 Mon Sep 17 00:00:00 2001 From: Dean Rasheed Date: Sun, 27 Feb 2022 10:41:12 +0000 Subject: [PATCH 062/108] Simplify the inner loop of numeric division in div_var(). In the standard numeric division algorithm, the inner loop multiplies the divisor by the next quotient digit and subtracts that from the working dividend. As suggested by the original code comment, the separate "carry" and "borrow" variables (from the multiplication and subtraction steps respectively) can be folded together into a single variable. Doing so significantly improves performance, as well as simplifying the code. Dean Rasheed, reviewed by Tom Lane. Discussion: https://postgr.es/m/CAEZATCVwsBi-ND-t82Cuuh1=8ee6jdOpzsmGN+CUZB6yjLg9jw@mail.gmail.com --- src/backend/utils/adt/numeric.c | 36 ++++++++++++++------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index effc4b886c..47475bf695 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -8605,31 +8605,25 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result, /* As above, need do nothing more when quotient digit is 0 */ if (qhat > 0) { + NumericDigit *dividend_j = ÷nd[j]; + /* * Multiply the divisor by qhat, and subtract that from the - * working dividend. "carry" tracks the multiplication, - * "borrow" the subtraction (could we fold these together?) + * working dividend. The multiplication and subtraction are + * folded together here, noting that qhat <= NBASE (since it + * might be one too large), and so the intermediate result + * "tmp_result" is in the range [-NBASE^2, NBASE - 1], and + * "borrow" is in the range [0, NBASE]. */ - carry = 0; borrow = 0; for (i = var2ndigits; i >= 0; i--) { - carry += divisor[i] * qhat; - borrow -= carry % NBASE; - carry = carry / NBASE; - borrow += dividend[j + i]; - if (borrow < 0) - { - dividend[j + i] = borrow + NBASE; - borrow = -1; - } - else - { - dividend[j + i] = borrow; - borrow = 0; - } + int tmp_result; + + tmp_result = dividend_j[i] - borrow - divisor[i] * qhat; + borrow = (NBASE - 1 - tmp_result) / NBASE; + dividend_j[i] = tmp_result + borrow * NBASE; } - Assert(carry == 0); /* * If we got a borrow out of the top dividend digit, then @@ -8645,15 +8639,15 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result, carry = 0; for (i = var2ndigits; i >= 0; i--) { - carry += dividend[j + i] + divisor[i]; + carry += dividend_j[i] + divisor[i]; if (carry >= NBASE) { - dividend[j + i] = carry - NBASE; + dividend_j[i] = carry - NBASE; carry = 1; } else { - dividend[j + i] = carry; + dividend_j[i] = carry; carry = 0; } } From d1b307eef2818fe24760cc2c168d7d65d59775a8 Mon Sep 17 00:00:00 2001 From: Dean Rasheed Date: Sun, 27 Feb 2022 11:12:30 +0000 Subject: [PATCH 063/108] Optimise numeric division for one and two base-NBASE digit divisors. Formerly div_var() had "fast path" short division code that was significantly faster when the divisor was just one base-NBASE digit, but otherwise used long division. This commit adds a new function div_var_int() that divides by an arbitrary 32-bit integer, using the fast short division algorithm, and updates both div_var() and div_var_fast() to use it for one and two digit divisors. In the case of div_var(), this is slightly faster in the one-digit case, because it avoids some digit array copying, and is much faster in the two-digit case where it replaces long division. For div_var_fast(), it is much faster in both cases because the main div_var_fast() algorithm is optimised for larger inputs. Additionally, optimise exp() and ln() by using div_var_int(), allowing a NumericVar to be replaced by an int in a couple of places, most notably in the Taylor series code. This produces a significant speedup of exp(), ln() and the numeric_big regression test. Dean Rasheed, reviewed by Tom Lane. Discussion: https://postgr.es/m/CAEZATCVwsBi-ND-t82Cuuh1=8ee6jdOpzsmGN+CUZB6yjLg9jw@mail.gmail.com --- src/backend/utils/adt/numeric.c | 223 ++++++++++++++++++++++++++------ 1 file changed, 180 insertions(+), 43 deletions(-) diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 47475bf695..975d7dcf47 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -551,6 +551,8 @@ static void div_var(const NumericVar *var1, const NumericVar *var2, int rscale, bool round); static void div_var_fast(const NumericVar *var1, const NumericVar *var2, NumericVar *result, int rscale, bool round); +static void div_var_int(const NumericVar *var, int ival, int ival_weight, + NumericVar *result, int rscale, bool round); static int select_div_scale(const NumericVar *var1, const NumericVar *var2); static void mod_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result); @@ -8451,8 +8453,33 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result, errmsg("division by zero"))); /* - * Now result zero check + * If the divisor has just one or two digits, delegate to div_var_int(), + * which uses fast short division. */ + if (var2ndigits <= 2) + { + int idivisor; + int idivisor_weight; + + idivisor = var2->digits[0]; + idivisor_weight = var2->weight; + if (var2ndigits == 2) + { + idivisor = idivisor * NBASE + var2->digits[1]; + idivisor_weight--; + } + if (var2->sign == NUMERIC_NEG) + idivisor = -idivisor; + + div_var_int(var1, idivisor, idivisor_weight, result, rscale, round); + return; + } + + /* + * Otherwise, perform full long division. + */ + + /* Result zero check */ if (var1ndigits == 0) { zero_var(result); @@ -8510,23 +8537,6 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result, alloc_var(result, res_ndigits); res_digits = result->digits; - if (var2ndigits == 1) - { - /* - * If there's only a single divisor digit, we can use a fast path (cf. - * Knuth section 4.3.1 exercise 16). - */ - divisor1 = divisor[1]; - carry = 0; - for (i = 0; i < res_ndigits; i++) - { - carry = carry * NBASE + dividend[i + 1]; - res_digits[i] = carry / divisor1; - carry = carry % divisor1; - } - } - else - { /* * The full multiple-place algorithm is taken from Knuth volume 2, * Algorithm 4.3.1D. @@ -8659,7 +8669,6 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result, /* And we're done with this quotient digit */ res_digits[j] = qhat; } - } pfree(dividend); @@ -8735,8 +8744,33 @@ div_var_fast(const NumericVar *var1, const NumericVar *var2, errmsg("division by zero"))); /* - * Now result zero check + * If the divisor has just one or two digits, delegate to div_var_int(), + * which uses fast short division. */ + if (var2ndigits <= 2) + { + int idivisor; + int idivisor_weight; + + idivisor = var2->digits[0]; + idivisor_weight = var2->weight; + if (var2ndigits == 2) + { + idivisor = idivisor * NBASE + var2->digits[1]; + idivisor_weight--; + } + if (var2->sign == NUMERIC_NEG) + idivisor = -idivisor; + + div_var_int(var1, idivisor, idivisor_weight, result, rscale, round); + return; + } + + /* + * Otherwise, perform full long division. + */ + + /* Result zero check */ if (var1ndigits == 0) { zero_var(result); @@ -9008,6 +9042,118 @@ div_var_fast(const NumericVar *var1, const NumericVar *var2, } +/* + * div_var_int() - + * + * Divide a numeric variable by a 32-bit integer with the specified weight. + * The quotient var / (ival * NBASE^ival_weight) is stored in result. + */ +static void +div_var_int(const NumericVar *var, int ival, int ival_weight, + NumericVar *result, int rscale, bool round) +{ + NumericDigit *var_digits = var->digits; + int var_ndigits = var->ndigits; + int res_sign; + int res_weight; + int res_ndigits; + NumericDigit *res_buf; + NumericDigit *res_digits; + uint32 divisor; + int i; + + /* Guard against division by zero */ + if (ival == 0) + ereport(ERROR, + errcode(ERRCODE_DIVISION_BY_ZERO), + errmsg("division by zero")); + + /* Result zero check */ + if (var_ndigits == 0) + { + zero_var(result); + result->dscale = rscale; + return; + } + + /* + * Determine the result sign, weight and number of digits to calculate. + * The weight figured here is correct if the emitted quotient has no + * leading zero digits; otherwise strip_var() will fix things up. + */ + if (var->sign == NUMERIC_POS) + res_sign = ival > 0 ? NUMERIC_POS : NUMERIC_NEG; + else + res_sign = ival > 0 ? NUMERIC_NEG : NUMERIC_POS; + res_weight = var->weight - ival_weight; + /* The number of accurate result digits we need to produce: */ + res_ndigits = res_weight + 1 + (rscale + DEC_DIGITS - 1) / DEC_DIGITS; + /* ... but always at least 1 */ + res_ndigits = Max(res_ndigits, 1); + /* If rounding needed, figure one more digit to ensure correct result */ + if (round) + res_ndigits++; + + res_buf = digitbuf_alloc(res_ndigits + 1); + res_buf[0] = 0; /* spare digit for later rounding */ + res_digits = res_buf + 1; + + /* + * Now compute the quotient digits. This is the short division algorithm + * described in Knuth volume 2, section 4.3.1 exercise 16, except that we + * allow the divisor to exceed the internal base. + * + * In this algorithm, the carry from one digit to the next is at most + * divisor - 1. Therefore, while processing the next digit, carry may + * become as large as divisor * NBASE - 1, and so it requires a 64-bit + * integer if this exceeds UINT_MAX. + */ + divisor = Abs(ival); + + if (divisor <= UINT_MAX / NBASE) + { + /* carry cannot overflow 32 bits */ + uint32 carry = 0; + + for (i = 0; i < res_ndigits; i++) + { + carry = carry * NBASE + (i < var_ndigits ? var_digits[i] : 0); + res_digits[i] = (NumericDigit) (carry / divisor); + carry = carry % divisor; + } + } + else + { + /* carry may exceed 32 bits */ + uint64 carry = 0; + + for (i = 0; i < res_ndigits; i++) + { + carry = carry * NBASE + (i < var_ndigits ? var_digits[i] : 0); + res_digits[i] = (NumericDigit) (carry / divisor); + carry = carry % divisor; + } + } + + /* Store the quotient in result */ + digitbuf_free(result->buf); + result->ndigits = res_ndigits; + result->buf = res_buf; + result->digits = res_digits; + result->weight = res_weight; + result->sign = res_sign; + + /* Round or truncate to target rscale (and set result->dscale) */ + if (round) + round_var(result, rscale); + else + trunc_var(result, rscale); + + /* Strip leading/trailing zeroes */ + strip_var(result); +} + + /* * Default scale selection for division * @@ -9783,7 +9929,7 @@ exp_var(const NumericVar *arg, NumericVar *result, int rscale) { NumericVar x; NumericVar elem; - NumericVar ni; + int ni; double val; int dweight; int ndiv2; @@ -9792,7 +9938,6 @@ exp_var(const NumericVar *arg, NumericVar *result, int rscale) init_var(&x); init_var(&elem); - init_var(&ni); set_var_from_var(arg, &x); @@ -9820,15 +9965,13 @@ exp_var(const NumericVar *arg, NumericVar *result, int rscale) /* * Reduce x to the range -0.01 <= x <= 0.01 (approximately) by dividing by - * 2^n, to improve the convergence rate of the Taylor series. + * 2^ndiv2, to improve the convergence rate of the Taylor series. + * + * Note that the overflow check above ensures that Abs(x) < 6000, which + * means that ndiv2 <= 20 here. */ if (Abs(val) > 0.01) { - NumericVar tmp; - - init_var(&tmp); - set_var_from_var(&const_two, &tmp); - ndiv2 = 1; val /= 2; @@ -9836,13 +9979,10 @@ exp_var(const NumericVar *arg, NumericVar *result, int rscale) { ndiv2++; val /= 2; - add_var(&tmp, &tmp, &tmp); } local_rscale = x.dscale + ndiv2; - div_var_fast(&x, &tmp, &x, local_rscale, true); - - free_var(&tmp); + div_var_int(&x, 1 << ndiv2, 0, &x, local_rscale, true); } else ndiv2 = 0; @@ -9870,16 +10010,16 @@ exp_var(const NumericVar *arg, NumericVar *result, int rscale) add_var(&const_one, &x, result); mul_var(&x, &x, &elem, local_rscale); - set_var_from_var(&const_two, &ni); - div_var_fast(&elem, &ni, &elem, local_rscale, true); + ni = 2; + div_var_int(&elem, ni, 0, &elem, local_rscale, true); while (elem.ndigits != 0) { add_var(result, &elem, result); mul_var(&elem, &x, &elem, local_rscale); - add_var(&ni, &const_one, &ni); - div_var_fast(&elem, &ni, &elem, local_rscale, true); + ni++; + div_var_int(&elem, ni, 0, &elem, local_rscale, true); } /* @@ -9899,7 +10039,6 @@ exp_var(const NumericVar *arg, NumericVar *result, int rscale) free_var(&x); free_var(&elem); - free_var(&ni); } @@ -9993,7 +10132,7 @@ ln_var(const NumericVar *arg, NumericVar *result, int rscale) { NumericVar x; NumericVar xx; - NumericVar ni; + int ni; NumericVar elem; NumericVar fact; int nsqrt; @@ -10012,7 +10151,6 @@ ln_var(const NumericVar *arg, NumericVar *result, int rscale) init_var(&x); init_var(&xx); - init_var(&ni); init_var(&elem); init_var(&fact); @@ -10073,13 +10211,13 @@ ln_var(const NumericVar *arg, NumericVar *result, int rscale) set_var_from_var(result, &xx); mul_var(result, result, &x, local_rscale); - set_var_from_var(&const_one, &ni); + ni = 1; for (;;) { - add_var(&ni, &const_two, &ni); + ni += 2; mul_var(&xx, &x, &xx, local_rscale); - div_var_fast(&xx, &ni, &elem, local_rscale, true); + div_var_int(&xx, ni, 0, &elem, local_rscale, true); if (elem.ndigits == 0) break; @@ -10095,7 +10233,6 @@ ln_var(const NumericVar *arg, NumericVar *result, int rscale) free_var(&x); free_var(&xx); - free_var(&ni); free_var(&elem); free_var(&fact); } From 667726fbe50f21d7d3ce5d5c5949a45c2496b60f Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Mon, 28 Feb 2022 10:53:56 +0900 Subject: [PATCH 064/108] pg_stat_statements: Remove unnecessary call to GetUserId() The same is done a couple of lines above, so there is no need for the same, extra, call. Author: Dong Wook Lee Reviewed-by: Julien Rouhaud Discussion: https://postgr.es/m/CAAcBya+szDd1Y6dJU4_dbH_Ye3=G=8O1oQGG01kv3Tpie7wELQ@mail.gmail.com --- contrib/pg_stat_statements/pg_stat_statements.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 38d92a89cc..d803253cea 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -1508,7 +1508,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, pgssEntry *entry; /* Superusers or members of pg_read_all_stats members are allowed */ - is_allowed_role = is_member_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS); + is_allowed_role = is_member_of_role(userid, ROLE_PG_READ_ALL_STATS); /* hash table must exist already */ if (!pgss || !pgss_hash) From fbee60f6a4ff2561f5a5af23959a29967f53fbde Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 22 Feb 2022 13:42:38 +0100 Subject: [PATCH 065/108] Improve some psql test code Split psql_like() into two functions psql_like() and psql_fails_like() and make them mirror the existing command_like() and command_fails_like() more closely. In particular, follow the universal convention that the test name is the last argument. Discussion: https://www.postgresql.org/message-id/3199e176-424e-1bef-f180-c1548466c2da@enterprisedb.com --- src/bin/psql/t/001_basic.pl | 59 ++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl index ba3dd846ba..f416e0ab5e 100644 --- a/src/bin/psql/t/001_basic.pl +++ b/src/bin/psql/t/001_basic.pl @@ -12,40 +12,36 @@ program_version_ok('psql'); program_options_handling_ok('psql'); -my ($stdout, $stderr); -my $result; - -# Execute a psql command and check its result patterns. +# Execute a psql command and check its output. sub psql_like { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $node = shift; - my $test_name = shift; - my $query = shift; - my $expected_stdout = shift; - my $expected_stderr = shift; + my ($node, $sql, $expected_stdout, $test_name) = @_; + + my ($ret, $stdout, $stderr) = $node->psql('postgres', $sql); + + is($ret, 0, "$test_name: exit code 0"); + is($stderr, '', "$test_name: no stderr"); + like($stdout, $expected_stdout, "$test_name: matches"); + + return; +} + +# Execute a psql command and check that it fails and check the stderr. +sub psql_fails_like +{ + local $Test::Builder::Level = $Test::Builder::Level + 1; - die "cannot specify both expected stdout and stderr here" - if (defined($expected_stdout) && defined($expected_stderr)); + my ($node, $sql, $expected_stderr, $test_name) = @_; # Use the context of a WAL sender, some of the tests rely on that. my ($ret, $stdout, $stderr) = $node->psql( - 'postgres', $query, - on_error_die => 0, + 'postgres', $sql, replication => 'database'); - if (defined($expected_stdout)) - { - is($ret, 0, "$test_name: expected result code"); - is($stderr, '', "$test_name: no stderr"); - like($stdout, $expected_stdout, "$test_name: stdout matches"); - } - if (defined($expected_stderr)) - { - isnt($ret, 0, "$test_name: expected result code"); - like($stderr, $expected_stderr, "$test_name: stderr matches"); - } + isnt($ret, 0, "$test_name: exit code not 0"); + like($stderr, $expected_stderr, "$test_name: matches"); return; } @@ -53,6 +49,9 @@ sub psql_like # test --help=foo, analogous to program_help_ok() foreach my $arg (qw(commands variables)) { + my ($stdout, $stderr); + my $result; + $result = IPC::Run::run [ 'psql', "--help=$arg" ], '>', \$stdout, '2>', \$stderr; ok($result, "psql --help=$arg exit code 0"); @@ -70,15 +69,15 @@ sub psql_like }); $node->start; -psql_like($node, '\copyright', '\copyright', qr/Copyright/, undef); -psql_like($node, '\help without arguments', '\help', qr/ALTER/, undef); -psql_like($node, '\help with argument', '\help SELECT', qr/SELECT/, undef); +psql_like($node, '\copyright', qr/Copyright/, '\copyright'); +psql_like($node, '\help', qr/ALTER/, '\help without arguments'); +psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument'); # Test clean handling of unsupported replication command responses -psql_like( +psql_fails_like( $node, - 'handling of unexpected PQresultStatus', 'START_REPLICATION 0/0', - undef, qr/unexpected PQresultStatus: 8$/); + qr/unexpected PQresultStatus: 8$/, + 'handling of unexpected PQresultStatus'); done_testing(); From b15f254466aefbabcbed001929f6e09db59fd158 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 28 Feb 2022 11:31:30 -0500 Subject: [PATCH 066/108] Adjust interaction of libpq pipeline mode with errorMessage resets. Since commit ffa2e4670, libpq resets conn->errorMessage only when starting a new query. However, the later introduction of pipelining requires a further refinement: the "start of query" isn't necessarily when it's submitted to PQsendQueryStart. If we clear at that point then we risk dropping text for an error that the application has not noticed yet. Instead, when queuing a query while a previous query is still in flight, leave errorMessage alone; reset it when we begin to process the next query in pqPipelineProcessQueue. Perhaps this should be back-patched to v14 where ffa2e4670 came in. However I'm uncertain about whether it interacts with 618c16707. In the absence of user complaints, leave v14 alone. Discussion: https://postgr.es/m/1421785.1645723238@sss.pgh.pa.us --- src/interfaces/libpq/fe-exec.c | 51 +++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index 45dddaf556..0c39bc9abf 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -1380,10 +1380,7 @@ pqAppendCmdQueueEntry(PGconn *conn, PGcmdQueueEntry *entry) * state, we don't have to do anything. */ if (conn->asyncStatus == PGASYNC_IDLE) - { - pqClearConnErrorState(conn); pqPipelineProcessQueue(conn); - } break; } } @@ -1730,8 +1727,10 @@ PQsendQueryStart(PGconn *conn, bool newQuery) /* * If this is the beginning of a query cycle, reset the error state. + * However, in pipeline mode with something already queued, the error + * buffer belongs to that command and we shouldn't clear it. */ - if (newQuery) + if (newQuery && conn->cmd_queue_head == NULL) pqClearConnErrorState(conn); /* Don't try to send if we know there's no live connection. */ @@ -2149,11 +2148,8 @@ PQgetResult(PGconn *conn) /* * We're about to return the NULL that terminates the round of * results from the current query; prepare to send the results - * of the next query when we're called next. Also, since this - * is the start of the results of the next query, clear any - * prior error message. + * of the next query when we're called next. */ - pqClearConnErrorState(conn); pqPipelineProcessQueue(conn); } break; @@ -2362,6 +2358,14 @@ PQexecStart(PGconn *conn) if (!conn) return false; + /* + * Since this is the beginning of a query cycle, reset the error state. + * However, in pipeline mode with something already queued, the error + * buffer belongs to that command and we shouldn't clear it. + */ + if (conn->cmd_queue_head == NULL) + pqClearConnErrorState(conn); + if (conn->pipelineStatus != PQ_PIPELINE_OFF) { appendPQExpBufferStr(&conn->errorMessage, @@ -2369,11 +2373,6 @@ PQexecStart(PGconn *conn) return false; } - /* - * Since this is the beginning of a query cycle, reset the error state. - */ - pqClearConnErrorState(conn); - /* * Silently discard any prior query result that application didn't eat. * This is probably poor design, but it's here for backward compatibility. @@ -2928,8 +2927,11 @@ PQfn(PGconn *conn, /* * Since this is the beginning of a query cycle, reset the error state. + * However, in pipeline mode with something already queued, the error + * buffer belongs to that command and we shouldn't clear it. */ - pqClearConnErrorState(conn); + if (conn->cmd_queue_head == NULL) + pqClearConnErrorState(conn); if (conn->pipelineStatus != PQ_PIPELINE_OFF) { @@ -3099,6 +3101,12 @@ pqPipelineProcessQueue(PGconn *conn) conn->cmd_queue_head == NULL) return; + /* + * Reset the error state. This and the next couple of steps correspond to + * what PQsendQueryStart didn't do for this query. + */ + pqClearConnErrorState(conn); + /* Initialize async result-accumulation state */ pqClearAsyncResult(conn); @@ -3809,9 +3817,11 @@ PQsetnonblocking(PGconn *conn, int arg) * behavior. this is ok because either they are making a transition _from_ * or _to_ blocking mode, either way we can block them. * - * Clear error state in case pqFlush adds to it. + * Clear error state in case pqFlush adds to it, unless we're actively + * pipelining, in which case it seems best not to. */ - pqClearConnErrorState(conn); + if (conn->cmd_queue_head == NULL) + pqClearConnErrorState(conn); /* if we are going from blocking to non-blocking flush here */ if (pqFlush(conn)) @@ -4003,7 +4013,8 @@ PQescapeStringConn(PGconn *conn, return 0; } - pqClearConnErrorState(conn); + if (conn->cmd_queue_head == NULL) + pqClearConnErrorState(conn); return PQescapeStringInternal(conn, to, from, length, error, conn->client_encoding, @@ -4041,7 +4052,8 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident) if (!conn) return NULL; - pqClearConnErrorState(conn); + if (conn->cmd_queue_head == NULL) + pqClearConnErrorState(conn); /* Scan the string for characters that must be escaped. */ for (s = str; (s - str) < len && *s != '\0'; ++s) @@ -4306,7 +4318,8 @@ PQescapeByteaConn(PGconn *conn, if (!conn) return NULL; - pqClearConnErrorState(conn); + if (conn->cmd_queue_head == NULL) + pqClearConnErrorState(conn); return PQescapeByteaInternal(conn, from, from_length, to_length, conn->std_strings, From 2e517818f4af4abe93bf56442469944544f10d4b Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 28 Feb 2022 12:45:36 -0500 Subject: [PATCH 067/108] Fix SPI's handling of errors during transaction commit. SPI_commit previously left it up to the caller to recover from any error occurring during commit. Since that's complicated and requires use of low-level xact.c facilities, it's not too surprising that no caller got it right. Let's move the responsibility for cleanup into spi.c. Doing that requires redefining SPI_commit as starting a new transaction, so that it becomes equivalent to SPI_commit_and_chain except that you get default transaction characteristics instead of preserving the prior transaction's characteristics. We can make this pretty transparent API-wise by redefining SPI_start_transaction() as a no-op. Callers that expect to do something in between might be surprised, but available evidence is that no callers do so. Having made that API redefinition, we can fix this mess by having SPI_commit[_and_chain] trap errors and start a new, clean transaction before re-throwing the error. Likewise for SPI_rollback[_and_chain]. Some cleanup is also needed in AtEOXact_SPI, which was nowhere near smart enough to deal with SPI contexts nested inside a committing context. While plperl and pltcl need no changes beyond removing their now-useless SPI_start_transaction() calls, plpython needs some more work because it hadn't gotten the memo about catching commit/rollback errors in the first place. Such an error resulted in longjmp'ing out of the Python interpreter, which leaks Python stack entries at present and is reported to crash Python 3.11 altogether. Add the missing logic to catch such errors and convert them into Python exceptions. We are probably going to have to back-patch this once Python 3.11 ships, but it's a sufficiently basic change that I'm a bit nervous about doing so immediately. Let's let it bake awhile in HEAD first. Peter Eisentraut and Tom Lane Discussion: https://postgr.es/m/3375ffd8-d71c-2565-e348-a597d6e739e3@enterprisedb.com Discussion: https://postgr.es/m/17416-ed8fe5d7213d6c25@postgresql.org --- doc/src/sgml/spi.sgml | 51 ++-- src/backend/executor/spi.c | 221 +++++++++++++----- src/backend/tcop/postgres.c | 2 - src/backend/utils/mmgr/portalmem.c | 2 +- src/include/executor/spi.h | 1 - src/pl/plperl/expected/plperl_transaction.out | 48 ++++ src/pl/plperl/plperl.c | 2 - src/pl/plperl/sql/plperl_transaction.sql | 32 +++ src/pl/plpgsql/src/pl_exec.c | 6 - .../expected/plpython_transaction.out | 67 +++++- src/pl/plpython/plpy_plpymodule.c | 30 --- src/pl/plpython/plpy_spi.c | 94 ++++++++ src/pl/plpython/plpy_spi.h | 3 + src/pl/plpython/sql/plpython_transaction.sql | 30 +++ src/pl/tcl/expected/pltcl_transaction.out | 49 ++++ src/pl/tcl/pltcl.c | 2 - src/pl/tcl/sql/pltcl_transaction.sql | 37 +++ 17 files changed, 535 insertions(+), 142 deletions(-) diff --git a/doc/src/sgml/spi.sgml b/doc/src/sgml/spi.sgml index d710e2d0df..7581661fc4 100644 --- a/doc/src/sgml/spi.sgml +++ b/doc/src/sgml/spi.sgml @@ -99,10 +99,9 @@ int SPI_connect_ext(int options) Sets the SPI connection to be nonatomic, which - means that transaction control calls SPI_commit, - SPI_rollback, and - SPI_start_transaction are allowed. Otherwise, - calling these functions will result in an immediate error. + means that transaction control calls (SPI_commit, + SPI_rollback) are allowed. Otherwise, + calling those functions will result in an immediate error. @@ -5040,15 +5039,17 @@ void SPI_commit_and_chain(void) SPI_commit commits the current transaction. It is approximately equivalent to running the SQL - command COMMIT. After a transaction is committed, a new - transaction has to be started - using SPI_start_transaction before further database - actions can be executed. + command COMMIT. After the transaction is committed, a + new transaction is automatically started using default transaction + characteristics, so that the caller can continue using SPI facilities. + If there is a failure during commit, the current transaction is instead + rolled back and a new transaction is started, after which the error is + thrown in the usual way. - SPI_commit_and_chain is the same, but a new - transaction is immediately started with the same transaction + SPI_commit_and_chain is the same, but the new + transaction is started with the same transaction characteristics as the just finished one, like with the SQL command COMMIT AND CHAIN. @@ -5093,14 +5094,13 @@ void SPI_rollback_and_chain(void) SPI_rollback rolls back the current transaction. It is approximately equivalent to running the SQL - command ROLLBACK. After a transaction is rolled back, a - new transaction has to be started - using SPI_start_transaction before further database - actions can be executed. + command ROLLBACK. After the transaction is rolled back, + a new transaction is automatically started using default transaction + characteristics, so that the caller can continue using SPI facilities. - SPI_rollback_and_chain is the same, but a new - transaction is immediately started with the same transaction + SPI_rollback_and_chain is the same, but the new + transaction is started with the same transaction characteristics as the just finished one, like with the SQL command ROLLBACK AND CHAIN. @@ -5124,7 +5124,7 @@ void SPI_rollback_and_chain(void) SPI_start_transaction - start a new transaction + obsolete function @@ -5137,17 +5137,12 @@ void SPI_start_transaction(void) Description - SPI_start_transaction starts a new transaction. It - can only be called after SPI_commit - or SPI_rollback, as there is no transaction active at - that point. Normally, when an SPI-using procedure is called, there is already a - transaction active, so attempting to start another one before closing out - the current one will result in an error. - - - - This function can only be executed if the SPI connection has been set as - nonatomic in the call to SPI_connect_ext. + SPI_start_transaction does nothing, and exists + only for code compatibility with + earlier PostgreSQL releases. It used to + be required after calling SPI_commit + or SPI_rollback, but now those functions start + a new transaction automatically. diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index c93f90de9b..7971050746 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -156,7 +156,8 @@ SPI_connect_ext(int options) * XXX It could be better to use PortalContext as the parent context in * all cases, but we may not be inside a portal (consider deferred-trigger * execution). Perhaps CurTransactionContext could be an option? For now - * it doesn't matter because we clean up explicitly in AtEOSubXact_SPI(). + * it doesn't matter because we clean up explicitly in AtEOSubXact_SPI(); + * but see also AtEOXact_SPI(). */ _SPI_current->procCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : PortalContext, "SPI Proc", @@ -214,13 +215,13 @@ SPI_finish(void) return SPI_OK_FINISH; } +/* + * SPI_start_transaction is a no-op, kept for backwards compatibility. + * SPI callers are *always* inside a transaction. + */ void SPI_start_transaction(void) { - MemoryContext oldcontext = CurrentMemoryContext; - - StartTransactionCommand(); - MemoryContextSwitchTo(oldcontext); } static void @@ -228,6 +229,12 @@ _SPI_commit(bool chain) { MemoryContext oldcontext = CurrentMemoryContext; + /* + * Complain if we are in a context that doesn't permit transaction + * termination. (Note: here and _SPI_rollback should be the only places + * that throw ERRCODE_INVALID_TRANSACTION_TERMINATION, so that callers can + * test for that with security that they know what happened.) + */ if (_SPI_current->atomic) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), @@ -240,40 +247,74 @@ _SPI_commit(bool chain) * top-level transaction in such a block violates that idea. A future PL * implementation might have different ideas about this, in which case * this restriction would have to be refined or the check possibly be - * moved out of SPI into the PLs. + * moved out of SPI into the PLs. Note however that the code below relies + * on not being within a subtransaction. */ if (IsSubTransaction()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), errmsg("cannot commit while a subtransaction is active"))); - /* - * Hold any pinned portals that any PLs might be using. We have to do - * this before changing transaction state, since this will run - * user-defined code that might throw an error. - */ - HoldPinnedPortals(); + /* XXX this ain't re-entrant enough for my taste */ + if (chain) + SaveTransactionCharacteristics(); - /* Start the actual commit */ - _SPI_current->internal_xact = true; + /* Catch any error occurring during the COMMIT */ + PG_TRY(); + { + /* Protect current SPI stack entry against deletion */ + _SPI_current->internal_xact = true; - /* Release snapshots associated with portals */ - ForgetPortalSnapshots(); + /* + * Hold any pinned portals that any PLs might be using. We have to do + * this before changing transaction state, since this will run + * user-defined code that might throw an error. + */ + HoldPinnedPortals(); - if (chain) - SaveTransactionCharacteristics(); + /* Release snapshots associated with portals */ + ForgetPortalSnapshots(); - CommitTransactionCommand(); + /* Do the deed */ + CommitTransactionCommand(); - if (chain) - { + /* Immediately start a new transaction */ StartTransactionCommand(); - RestoreTransactionCharacteristics(); + if (chain) + RestoreTransactionCharacteristics(); + + MemoryContextSwitchTo(oldcontext); + + _SPI_current->internal_xact = false; } + PG_CATCH(); + { + ErrorData *edata; - MemoryContextSwitchTo(oldcontext); + /* Save error info in caller's context */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); - _SPI_current->internal_xact = false; + /* + * Abort the failed transaction. If this fails too, we'll just + * propagate the error out ... there's not that much we can do. + */ + AbortCurrentTransaction(); + + /* ... and start a new one */ + StartTransactionCommand(); + if (chain) + RestoreTransactionCharacteristics(); + + MemoryContextSwitchTo(oldcontext); + + _SPI_current->internal_xact = false; + + /* Now that we've cleaned up the transaction, re-throw the error */ + ReThrowError(edata); + } + PG_END_TRY(); } void @@ -293,6 +334,7 @@ _SPI_rollback(bool chain) { MemoryContext oldcontext = CurrentMemoryContext; + /* see under SPI_commit() */ if (_SPI_current->atomic) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), @@ -304,34 +346,68 @@ _SPI_rollback(bool chain) (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), errmsg("cannot roll back while a subtransaction is active"))); - /* - * Hold any pinned portals that any PLs might be using. We have to do - * this before changing transaction state, since this will run - * user-defined code that might throw an error, and in any case couldn't - * be run in an already-aborted transaction. - */ - HoldPinnedPortals(); + /* XXX this ain't re-entrant enough for my taste */ + if (chain) + SaveTransactionCharacteristics(); - /* Start the actual rollback */ - _SPI_current->internal_xact = true; + /* Catch any error occurring during the ROLLBACK */ + PG_TRY(); + { + /* Protect current SPI stack entry against deletion */ + _SPI_current->internal_xact = true; - /* Release snapshots associated with portals */ - ForgetPortalSnapshots(); + /* + * Hold any pinned portals that any PLs might be using. We have to do + * this before changing transaction state, since this will run + * user-defined code that might throw an error, and in any case + * couldn't be run in an already-aborted transaction. + */ + HoldPinnedPortals(); - if (chain) - SaveTransactionCharacteristics(); + /* Release snapshots associated with portals */ + ForgetPortalSnapshots(); - AbortCurrentTransaction(); + /* Do the deed */ + AbortCurrentTransaction(); - if (chain) - { + /* Immediately start a new transaction */ StartTransactionCommand(); - RestoreTransactionCharacteristics(); + if (chain) + RestoreTransactionCharacteristics(); + + MemoryContextSwitchTo(oldcontext); + + _SPI_current->internal_xact = false; } + PG_CATCH(); + { + ErrorData *edata; - MemoryContextSwitchTo(oldcontext); + /* Save error info in caller's context */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); - _SPI_current->internal_xact = false; + /* + * Try again to abort the failed transaction. If this fails too, + * we'll just propagate the error out ... there's not that much we can + * do. + */ + AbortCurrentTransaction(); + + /* ... and start a new one */ + StartTransactionCommand(); + if (chain) + RestoreTransactionCharacteristics(); + + MemoryContextSwitchTo(oldcontext); + + _SPI_current->internal_xact = false; + + /* Now that we've cleaned up the transaction, re-throw the error */ + ReThrowError(edata); + } + PG_END_TRY(); } void @@ -346,38 +422,55 @@ SPI_rollback_and_chain(void) _SPI_rollback(true); } -/* - * Clean up SPI state. Called on transaction end (of non-SPI-internal - * transactions) and when returning to the main loop on error. - */ -void -SPICleanup(void) -{ - _SPI_current = NULL; - _SPI_connected = -1; - /* Reset API global variables, too */ - SPI_processed = 0; - SPI_tuptable = NULL; - SPI_result = 0; -} - /* * Clean up SPI state at transaction commit or abort. */ void AtEOXact_SPI(bool isCommit) { - /* Do nothing if the transaction end was initiated by SPI. */ - if (_SPI_current && _SPI_current->internal_xact) - return; + bool found = false; - if (isCommit && _SPI_connected != -1) + /* + * Pop stack entries, stopping if we find one marked internal_xact (that + * one belongs to the caller of SPI_commit or SPI_abort). + */ + while (_SPI_connected >= 0) + { + _SPI_connection *connection = &(_SPI_stack[_SPI_connected]); + + if (connection->internal_xact) + break; + + found = true; + + /* + * We need not release the procedure's memory contexts explicitly, as + * they'll go away automatically when their parent context does; see + * notes in SPI_connect_ext. + */ + + /* + * Restore outer global variables and pop the stack entry. Unlike + * SPI_finish(), we don't risk switching to memory contexts that might + * be already gone. + */ + SPI_processed = connection->outer_processed; + SPI_tuptable = connection->outer_tuptable; + SPI_result = connection->outer_result; + + _SPI_connected--; + if (_SPI_connected < 0) + _SPI_current = NULL; + else + _SPI_current = &(_SPI_stack[_SPI_connected]); + } + + /* We should only find entries to pop during an ABORT. */ + if (found && isCommit) ereport(WARNING, (errcode(ERRCODE_WARNING), errmsg("transaction left non-empty SPI stack"), errhint("Check for missing \"SPI_finish\" calls."))); - - SPICleanup(); } /* diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 3c7d08209f..34c13a1113 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -43,7 +43,6 @@ #include "commands/async.h" #include "commands/prepare.h" #include "common/pg_prng.h" -#include "executor/spi.h" #include "jit/jit.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" @@ -4263,7 +4262,6 @@ PostgresMain(const char *dbname, const char *username) WalSndErrorCleanup(); PortalErrorCleanup(); - SPICleanup(); /* * We can't release replication slots inside AbortTransaction() as we diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 21ad87c024..afc03682d9 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -1261,7 +1261,7 @@ HoldPinnedPortals(void) */ if (portal->strategy != PORTAL_ONE_SELECT) ereport(ERROR, - (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot perform transaction commands inside a cursor loop that is not read-only"))); /* Verify it's in a suitable state to be held */ diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h index e20e7df780..6ec3851444 100644 --- a/src/include/executor/spi.h +++ b/src/include/executor/spi.h @@ -205,7 +205,6 @@ extern void SPI_commit_and_chain(void); extern void SPI_rollback(void); extern void SPI_rollback_and_chain(void); -extern void SPICleanup(void); extern void AtEOXact_SPI(bool isCommit); extern void AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid); extern bool SPI_inside_nonatomic_context(void); diff --git a/src/pl/plperl/expected/plperl_transaction.out b/src/pl/plperl/expected/plperl_transaction.out index 7ca0ef35fb..da4283cbce 100644 --- a/src/pl/plperl/expected/plperl_transaction.out +++ b/src/pl/plperl/expected/plperl_transaction.out @@ -192,5 +192,53 @@ SELECT * FROM pg_cursors; ------+-----------+-------------+-----------+---------------+--------------- (0 rows) +-- check handling of an error during COMMIT +CREATE TABLE testpk (id int PRIMARY KEY); +CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); +DO LANGUAGE plperl $$ +# this insert will fail during commit: +spi_exec_query("INSERT INTO testfk VALUES (0)"); +spi_commit(); +elog(WARNING, 'should not get here'); +$$; +ERROR: insert or update on table "testfk" violates foreign key constraint "testfk_f1_fkey" at line 4. +CONTEXT: PL/Perl anonymous code block +SELECT * FROM testpk; + id +---- +(0 rows) + +SELECT * FROM testfk; + f1 +---- +(0 rows) + +DO LANGUAGE plperl $$ +# this insert will fail during commit: +spi_exec_query("INSERT INTO testfk VALUES (0)"); +eval { + spi_commit(); +}; +if ($@) { + elog(INFO, $@); +} +# these inserts should work: +spi_exec_query("INSERT INTO testpk VALUES (1)"); +spi_exec_query("INSERT INTO testfk VALUES (1)"); +$$; +INFO: insert or update on table "testfk" violates foreign key constraint "testfk_f1_fkey" at line 5. + +SELECT * FROM testpk; + id +---- + 1 +(1 row) + +SELECT * FROM testfk; + f1 +---- + 1 +(1 row) + DROP TABLE test1; DROP TABLE test2; diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 81bb480bc2..edb93ec1c4 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -3986,7 +3986,6 @@ plperl_spi_commit(void) PG_TRY(); { SPI_commit(); - SPI_start_transaction(); } PG_CATCH(); { @@ -4013,7 +4012,6 @@ plperl_spi_rollback(void) PG_TRY(); { SPI_rollback(); - SPI_start_transaction(); } PG_CATCH(); { diff --git a/src/pl/plperl/sql/plperl_transaction.sql b/src/pl/plperl/sql/plperl_transaction.sql index 0a60799805..d10c8bee89 100644 --- a/src/pl/plperl/sql/plperl_transaction.sql +++ b/src/pl/plperl/sql/plperl_transaction.sql @@ -159,5 +159,37 @@ SELECT * FROM test1; SELECT * FROM pg_cursors; +-- check handling of an error during COMMIT +CREATE TABLE testpk (id int PRIMARY KEY); +CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); + +DO LANGUAGE plperl $$ +# this insert will fail during commit: +spi_exec_query("INSERT INTO testfk VALUES (0)"); +spi_commit(); +elog(WARNING, 'should not get here'); +$$; + +SELECT * FROM testpk; +SELECT * FROM testfk; + +DO LANGUAGE plperl $$ +# this insert will fail during commit: +spi_exec_query("INSERT INTO testfk VALUES (0)"); +eval { + spi_commit(); +}; +if ($@) { + elog(INFO, $@); +} +# these inserts should work: +spi_exec_query("INSERT INTO testpk VALUES (1)"); +spi_exec_query("INSERT INTO testfk VALUES (1)"); +$$; + +SELECT * FROM testpk; +SELECT * FROM testfk; + + DROP TABLE test1; DROP TABLE test2; diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index 9674c29250..915139378e 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -4916,10 +4916,7 @@ exec_stmt_commit(PLpgSQL_execstate *estate, PLpgSQL_stmt_commit *stmt) if (stmt->chain) SPI_commit_and_chain(); else - { SPI_commit(); - SPI_start_transaction(); - } /* * We need to build new simple-expression infrastructure, since the old @@ -4943,10 +4940,7 @@ exec_stmt_rollback(PLpgSQL_execstate *estate, PLpgSQL_stmt_rollback *stmt) if (stmt->chain) SPI_rollback_and_chain(); else - { SPI_rollback(); - SPI_start_transaction(); - } /* * We need to build new simple-expression infrastructure, since the old diff --git a/src/pl/plpython/expected/plpython_transaction.out b/src/pl/plpython/expected/plpython_transaction.out index 14152993c7..72d1e45a76 100644 --- a/src/pl/plpython/expected/plpython_transaction.out +++ b/src/pl/plpython/expected/plpython_transaction.out @@ -55,8 +55,11 @@ for i in range(0, 10): return 1 $$; SELECT transaction_test2(); -ERROR: invalid transaction termination -CONTEXT: PL/Python function "transaction_test2" +ERROR: spiexceptions.InvalidTransactionTermination: invalid transaction termination +CONTEXT: Traceback (most recent call last): + PL/Python function "transaction_test2", line 5, in + plpy.commit() +PL/Python function "transaction_test2" SELECT * FROM test1; a | b ---+--- @@ -70,7 +73,7 @@ plpy.execute("CALL transaction_test1()") return 1 $$; SELECT transaction_test3(); -ERROR: spiexceptions.InvalidTransactionTermination: invalid transaction termination +ERROR: spiexceptions.InvalidTransactionTermination: spiexceptions.InvalidTransactionTermination: invalid transaction termination CONTEXT: Traceback (most recent call last): PL/Python function "transaction_test3", line 2, in plpy.execute("CALL transaction_test1()") @@ -88,7 +91,7 @@ plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") return 1 $$; SELECT transaction_test4(); -ERROR: spiexceptions.InvalidTransactionTermination: invalid transaction termination +ERROR: spiexceptions.InvalidTransactionTermination: spiexceptions.InvalidTransactionTermination: invalid transaction termination CONTEXT: Traceback (most recent call last): PL/Python function "transaction_test4", line 2, in plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") @@ -100,8 +103,11 @@ s.enter() plpy.commit() $$; WARNING: forcibly aborting a subtransaction that has not been exited -ERROR: cannot commit while a subtransaction is active -CONTEXT: PL/Python anonymous code block +ERROR: spiexceptions.InvalidTransactionTermination: cannot commit while a subtransaction is active +CONTEXT: Traceback (most recent call last): + PL/Python anonymous code block, line 4, in + plpy.commit() +PL/Python anonymous code block -- commit inside cursor loop CREATE TABLE test2 (x int); INSERT INTO test2 VALUES (0), (1), (2), (3), (4); @@ -191,5 +197,54 @@ SELECT * FROM pg_cursors; ------+-----------+-------------+-----------+---------------+--------------- (0 rows) +-- check handling of an error during COMMIT +CREATE TABLE testpk (id int PRIMARY KEY); +CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); +DO LANGUAGE plpythonu $$ +# this insert will fail during commit: +plpy.execute("INSERT INTO testfk VALUES (0)") +plpy.commit() +plpy.warning('should not get here') +$$; +ERROR: spiexceptions.ForeignKeyViolation: insert or update on table "testfk" violates foreign key constraint "testfk_f1_fkey" +DETAIL: Key (f1)=(0) is not present in table "testpk". +CONTEXT: Traceback (most recent call last): + PL/Python anonymous code block, line 4, in + plpy.commit() +PL/Python anonymous code block +SELECT * FROM testpk; + id +---- +(0 rows) + +SELECT * FROM testfk; + f1 +---- +(0 rows) + +DO LANGUAGE plpythonu $$ +# this insert will fail during commit: +plpy.execute("INSERT INTO testfk VALUES (0)") +try: + plpy.commit() +except Exception as e: + plpy.info('sqlstate: %s' % (e.sqlstate)) +# these inserts should work: +plpy.execute("INSERT INTO testpk VALUES (1)") +plpy.execute("INSERT INTO testfk VALUES (1)") +$$; +INFO: sqlstate: 23503 +SELECT * FROM testpk; + id +---- + 1 +(1 row) + +SELECT * FROM testfk; + f1 +---- + 1 +(1 row) + DROP TABLE test1; DROP TABLE test2; diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c index 0365acc95b..907f89d153 100644 --- a/src/pl/plpython/plpy_plpymodule.c +++ b/src/pl/plpython/plpy_plpymodule.c @@ -40,8 +40,6 @@ static PyObject *PLy_fatal(PyObject *self, PyObject *args, PyObject *kw); static PyObject *PLy_quote_literal(PyObject *self, PyObject *args); static PyObject *PLy_quote_nullable(PyObject *self, PyObject *args); static PyObject *PLy_quote_ident(PyObject *self, PyObject *args); -static PyObject *PLy_commit(PyObject *self, PyObject *args); -static PyObject *PLy_rollback(PyObject *self, PyObject *args); /* A list of all known exceptions, generated from backend/utils/errcodes.txt */ @@ -577,31 +575,3 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) */ Py_RETURN_NONE; } - -static PyObject * -PLy_commit(PyObject *self, PyObject *args) -{ - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - - SPI_commit(); - SPI_start_transaction(); - - /* was cleared at transaction end, reset pointer */ - exec_ctx->scratch_ctx = NULL; - - Py_RETURN_NONE; -} - -static PyObject * -PLy_rollback(PyObject *self, PyObject *args) -{ - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - - SPI_rollback(); - SPI_start_transaction(); - - /* was cleared at transaction end, reset pointer */ - exec_ctx->scratch_ctx = NULL; - - Py_RETURN_NONE; -} diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c index 99c1b4f28f..86d70470a7 100644 --- a/src/pl/plpython/plpy_spi.c +++ b/src/pl/plpython/plpy_spi.c @@ -456,6 +456,100 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status) return (PyObject *) result; } +PyObject * +PLy_commit(PyObject *self, PyObject *args) +{ + MemoryContext oldcontext = CurrentMemoryContext; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); + + PG_TRY(); + { + SPI_commit(); + + /* was cleared at transaction end, reset pointer */ + exec_ctx->scratch_ctx = NULL; + } + PG_CATCH(); + { + ErrorData *edata; + PLyExceptionEntry *entry; + PyObject *exc; + + /* Save error info */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + + /* was cleared at transaction end, reset pointer */ + exec_ctx->scratch_ctx = NULL; + + /* Look up the correct exception */ + entry = hash_search(PLy_spi_exceptions, &(edata->sqlerrcode), + HASH_FIND, NULL); + + /* + * This could be a custom error code, if that's the case fallback to + * SPIError + */ + exc = entry ? entry->exc : PLy_exc_spi_error; + /* Make Python raise the exception */ + PLy_spi_exception_set(exc, edata); + FreeErrorData(edata); + + return NULL; + } + PG_END_TRY(); + + Py_RETURN_NONE; +} + +PyObject * +PLy_rollback(PyObject *self, PyObject *args) +{ + MemoryContext oldcontext = CurrentMemoryContext; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); + + PG_TRY(); + { + SPI_rollback(); + + /* was cleared at transaction end, reset pointer */ + exec_ctx->scratch_ctx = NULL; + } + PG_CATCH(); + { + ErrorData *edata; + PLyExceptionEntry *entry; + PyObject *exc; + + /* Save error info */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + + /* was cleared at transaction end, reset pointer */ + exec_ctx->scratch_ctx = NULL; + + /* Look up the correct exception */ + entry = hash_search(PLy_spi_exceptions, &(edata->sqlerrcode), + HASH_FIND, NULL); + + /* + * This could be a custom error code, if that's the case fallback to + * SPIError + */ + exc = entry ? entry->exc : PLy_exc_spi_error; + /* Make Python raise the exception */ + PLy_spi_exception_set(exc, edata); + FreeErrorData(edata); + + return NULL; + } + PG_END_TRY(); + + Py_RETURN_NONE; +} + /* * Utilities for running SPI functions in subtransactions. * diff --git a/src/pl/plpython/plpy_spi.h b/src/pl/plpython/plpy_spi.h index a5e2e60da7..98ccd21093 100644 --- a/src/pl/plpython/plpy_spi.h +++ b/src/pl/plpython/plpy_spi.h @@ -12,6 +12,9 @@ extern PyObject *PLy_spi_prepare(PyObject *self, PyObject *args); extern PyObject *PLy_spi_execute(PyObject *self, PyObject *args); extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit); +extern PyObject *PLy_commit(PyObject *self, PyObject *args); +extern PyObject *PLy_rollback(PyObject *self, PyObject *args); + typedef struct PLyExceptionEntry { int sqlstate; /* hash key, must be first */ diff --git a/src/pl/plpython/sql/plpython_transaction.sql b/src/pl/plpython/sql/plpython_transaction.sql index 33b37e5b7f..68588d9fb0 100644 --- a/src/pl/plpython/sql/plpython_transaction.sql +++ b/src/pl/plpython/sql/plpython_transaction.sql @@ -148,5 +148,35 @@ SELECT * FROM test1; SELECT * FROM pg_cursors; +-- check handling of an error during COMMIT +CREATE TABLE testpk (id int PRIMARY KEY); +CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); + +DO LANGUAGE plpythonu $$ +# this insert will fail during commit: +plpy.execute("INSERT INTO testfk VALUES (0)") +plpy.commit() +plpy.warning('should not get here') +$$; + +SELECT * FROM testpk; +SELECT * FROM testfk; + +DO LANGUAGE plpythonu $$ +# this insert will fail during commit: +plpy.execute("INSERT INTO testfk VALUES (0)") +try: + plpy.commit() +except Exception as e: + plpy.info('sqlstate: %s' % (e.sqlstate)) +# these inserts should work: +plpy.execute("INSERT INTO testpk VALUES (1)") +plpy.execute("INSERT INTO testfk VALUES (1)") +$$; + +SELECT * FROM testpk; +SELECT * FROM testfk; + + DROP TABLE test1; DROP TABLE test2; diff --git a/src/pl/tcl/expected/pltcl_transaction.out b/src/pl/tcl/expected/pltcl_transaction.out index 007204b99a..f557b79138 100644 --- a/src/pl/tcl/expected/pltcl_transaction.out +++ b/src/pl/tcl/expected/pltcl_transaction.out @@ -96,5 +96,54 @@ SELECT * FROM test1; ---+--- (0 rows) +-- check handling of an error during COMMIT +CREATE TABLE testpk (id int PRIMARY KEY); +CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); +CREATE PROCEDURE transaction_testfk() +LANGUAGE pltcl +AS $$ +# this insert will fail during commit: +spi_exec "INSERT INTO testfk VALUES (0)" +commit +elog WARNING "should not get here" +$$; +CALL transaction_testfk(); +ERROR: insert or update on table "testfk" violates foreign key constraint "testfk_f1_fkey" +SELECT * FROM testpk; + id +---- +(0 rows) + +SELECT * FROM testfk; + f1 +---- +(0 rows) + +CREATE OR REPLACE PROCEDURE transaction_testfk() +LANGUAGE pltcl +AS $$ +# this insert will fail during commit: +spi_exec "INSERT INTO testfk VALUES (0)" +if [catch {commit} msg] { + elog INFO $msg +} +# these inserts should work: +spi_exec "INSERT INTO testpk VALUES (1)" +spi_exec "INSERT INTO testfk VALUES (1)" +$$; +CALL transaction_testfk(); +INFO: insert or update on table "testfk" violates foreign key constraint "testfk_f1_fkey" +SELECT * FROM testpk; + id +---- + 1 +(1 row) + +SELECT * FROM testfk; + f1 +---- + 1 +(1 row) + DROP TABLE test1; DROP TABLE test2; diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index c5fad05e12..68c9bd1970 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -2935,7 +2935,6 @@ pltcl_commit(ClientData cdata, Tcl_Interp *interp, PG_TRY(); { SPI_commit(); - SPI_start_transaction(); } PG_CATCH(); { @@ -2975,7 +2974,6 @@ pltcl_rollback(ClientData cdata, Tcl_Interp *interp, PG_TRY(); { SPI_rollback(); - SPI_start_transaction(); } PG_CATCH(); { diff --git a/src/pl/tcl/sql/pltcl_transaction.sql b/src/pl/tcl/sql/pltcl_transaction.sql index c752faf665..bd759850a7 100644 --- a/src/pl/tcl/sql/pltcl_transaction.sql +++ b/src/pl/tcl/sql/pltcl_transaction.sql @@ -94,5 +94,42 @@ CALL transaction_test4b(); SELECT * FROM test1; +-- check handling of an error during COMMIT +CREATE TABLE testpk (id int PRIMARY KEY); +CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); + +CREATE PROCEDURE transaction_testfk() +LANGUAGE pltcl +AS $$ +# this insert will fail during commit: +spi_exec "INSERT INTO testfk VALUES (0)" +commit +elog WARNING "should not get here" +$$; + +CALL transaction_testfk(); + +SELECT * FROM testpk; +SELECT * FROM testfk; + +CREATE OR REPLACE PROCEDURE transaction_testfk() +LANGUAGE pltcl +AS $$ +# this insert will fail during commit: +spi_exec "INSERT INTO testfk VALUES (0)" +if [catch {commit} msg] { + elog INFO $msg +} +# these inserts should work: +spi_exec "INSERT INTO testpk VALUES (1)" +spi_exec "INSERT INTO testfk VALUES (1)" +$$; + +CALL transaction_testfk(); + +SELECT * FROM testpk; +SELECT * FROM testfk; + + DROP TABLE test1; DROP TABLE test2; From 12d768e70497afc5a57acf73c251316997b5175a Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 28 Feb 2022 12:54:12 -0500 Subject: [PATCH 068/108] Don't use static storage for SaveTransactionCharacteristics(). This is pretty queasy-making on general principles, and the more so once you notice that CommitTransactionCommand() is actually stomping on the values saved by _SPI_commit(). It's okay as long as the active values didn't change during HoldPinnedPortals(); but that's a larger assumption than I think we want to make, especially since the fix is so simple. Discussion: https://postgr.es/m/1533956.1645731245@sss.pgh.pa.us --- src/backend/access/transam/xact.c | 32 ++++++++++++++----------------- src/backend/executor/spi.c | 16 ++++++++-------- src/include/access/xact.h | 12 ++++++++++-- 3 files changed, 32 insertions(+), 28 deletions(-) diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index bb1f106946..adf763a8ea 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -2983,24 +2983,20 @@ StartTransactionCommand(void) * GUC system resets the characteristics at transaction end, so for example * just skipping the reset in StartTransaction() won't work.) */ -static int save_XactIsoLevel; -static bool save_XactReadOnly; -static bool save_XactDeferrable; - void -SaveTransactionCharacteristics(void) +SaveTransactionCharacteristics(SavedTransactionCharacteristics *s) { - save_XactIsoLevel = XactIsoLevel; - save_XactReadOnly = XactReadOnly; - save_XactDeferrable = XactDeferrable; + s->save_XactIsoLevel = XactIsoLevel; + s->save_XactReadOnly = XactReadOnly; + s->save_XactDeferrable = XactDeferrable; } void -RestoreTransactionCharacteristics(void) +RestoreTransactionCharacteristics(const SavedTransactionCharacteristics *s) { - XactIsoLevel = save_XactIsoLevel; - XactReadOnly = save_XactReadOnly; - XactDeferrable = save_XactDeferrable; + XactIsoLevel = s->save_XactIsoLevel; + XactReadOnly = s->save_XactReadOnly; + XactDeferrable = s->save_XactDeferrable; } @@ -3011,9 +3007,9 @@ void CommitTransactionCommand(void) { TransactionState s = CurrentTransactionState; + SavedTransactionCharacteristics savetc; - if (s->chain) - SaveTransactionCharacteristics(); + SaveTransactionCharacteristics(&savetc); switch (s->blockState) { @@ -3071,7 +3067,7 @@ CommitTransactionCommand(void) StartTransaction(); s->blockState = TBLOCK_INPROGRESS; s->chain = false; - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); } break; @@ -3097,7 +3093,7 @@ CommitTransactionCommand(void) StartTransaction(); s->blockState = TBLOCK_INPROGRESS; s->chain = false; - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); } break; @@ -3115,7 +3111,7 @@ CommitTransactionCommand(void) StartTransaction(); s->blockState = TBLOCK_INPROGRESS; s->chain = false; - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); } break; @@ -3182,7 +3178,7 @@ CommitTransactionCommand(void) StartTransaction(); s->blockState = TBLOCK_INPROGRESS; s->chain = false; - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); } } else if (s->blockState == TBLOCK_PREPARE) diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 7971050746..5b353cb93a 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -228,6 +228,7 @@ static void _SPI_commit(bool chain) { MemoryContext oldcontext = CurrentMemoryContext; + SavedTransactionCharacteristics savetc; /* * Complain if we are in a context that doesn't permit transaction @@ -255,9 +256,8 @@ _SPI_commit(bool chain) (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), errmsg("cannot commit while a subtransaction is active"))); - /* XXX this ain't re-entrant enough for my taste */ if (chain) - SaveTransactionCharacteristics(); + SaveTransactionCharacteristics(&savetc); /* Catch any error occurring during the COMMIT */ PG_TRY(); @@ -281,7 +281,7 @@ _SPI_commit(bool chain) /* Immediately start a new transaction */ StartTransactionCommand(); if (chain) - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); MemoryContextSwitchTo(oldcontext); @@ -305,7 +305,7 @@ _SPI_commit(bool chain) /* ... and start a new one */ StartTransactionCommand(); if (chain) - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); MemoryContextSwitchTo(oldcontext); @@ -333,6 +333,7 @@ static void _SPI_rollback(bool chain) { MemoryContext oldcontext = CurrentMemoryContext; + SavedTransactionCharacteristics savetc; /* see under SPI_commit() */ if (_SPI_current->atomic) @@ -346,9 +347,8 @@ _SPI_rollback(bool chain) (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), errmsg("cannot roll back while a subtransaction is active"))); - /* XXX this ain't re-entrant enough for my taste */ if (chain) - SaveTransactionCharacteristics(); + SaveTransactionCharacteristics(&savetc); /* Catch any error occurring during the ROLLBACK */ PG_TRY(); @@ -373,7 +373,7 @@ _SPI_rollback(bool chain) /* Immediately start a new transaction */ StartTransactionCommand(); if (chain) - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); MemoryContextSwitchTo(oldcontext); @@ -398,7 +398,7 @@ _SPI_rollback(bool chain) /* ... and start a new one */ StartTransactionCommand(); if (chain) - RestoreTransactionCharacteristics(); + RestoreTransactionCharacteristics(&savetc); MemoryContextSwitchTo(oldcontext); diff --git a/src/include/access/xact.h b/src/include/access/xact.h index 17a6fa4abd..062cc7e17d 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -135,6 +135,14 @@ typedef enum typedef void (*SubXactCallback) (SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid, void *arg); +/* Data structure for Save/RestoreTransactionCharacteristics */ +typedef struct SavedTransactionCharacteristics +{ + int save_XactIsoLevel; + bool save_XactReadOnly; + bool save_XactDeferrable; +} SavedTransactionCharacteristics; + /* ---------------- * transaction-related XLOG entries @@ -399,8 +407,8 @@ extern bool TransactionIdIsCurrentTransactionId(TransactionId xid); extern void CommandCounterIncrement(void); extern void ForceSyncCommit(void); extern void StartTransactionCommand(void); -extern void SaveTransactionCharacteristics(void); -extern void RestoreTransactionCharacteristics(void); +extern void SaveTransactionCharacteristics(SavedTransactionCharacteristics *s); +extern void RestoreTransactionCharacteristics(const SavedTransactionCharacteristics *s); extern void CommitTransactionCommand(void); extern void AbortCurrentTransaction(void); extern void BeginTransactionBlock(void); From a59c79564bdc209a5bc7b02d706f0d7352eb82fa Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 28 Feb 2022 14:12:52 -0500 Subject: [PATCH 069/108] Allow root-owned SSL private keys in libpq, not only the backend. This change makes libpq apply the same private-key-file ownership and permissions checks that we have used in the backend since commit 9a83564c5. Namely, that the private key can be owned by either the current user or root (with different file permissions allowed in the two cases). This allows system-wide management of key files, which is just as sensible on the client side as the server, particularly when the client is itself some application daemon. Sync the comments about this between libpq and the backend, too. David Steele Discussion: https://postgr.es/m/f4b7bc55-97ac-9e69-7398-335e212f7743@pgmasters.net --- src/backend/libpq/be-secure-common.c | 28 ++++++++--------- src/interfaces/libpq/fe-secure-openssl.c | 40 ++++++++++++++++++++++-- 2 files changed, 50 insertions(+), 18 deletions(-) diff --git a/src/backend/libpq/be-secure-common.c b/src/backend/libpq/be-secure-common.c index 7e9a64d08e..7a9de524db 100644 --- a/src/backend/libpq/be-secure-common.c +++ b/src/backend/libpq/be-secure-common.c @@ -143,6 +143,7 @@ check_ssl_key_file_permissions(const char *ssl_key_file, bool isServerStart) return false; } + /* Key file must be a regular file */ if (!S_ISREG(buf.st_mode)) { ereport(loglevel, @@ -153,9 +154,19 @@ check_ssl_key_file_permissions(const char *ssl_key_file, bool isServerStart) } /* - * Refuse to load key files owned by users other than us or root. + * Refuse to load key files owned by users other than us or root, and + * require no public access to the key file. If the file is owned by us, + * require mode 0600 or less. If owned by root, require 0640 or less to + * allow read access through either our gid or a supplementary gid that + * allows us to read system-wide certificates. * - * XXX surely we can check this on Windows somehow, too. + * Note that similar checks are performed in + * src/interfaces/libpq/fe-secure-openssl.c so any changes here may need + * to be made there as well. + * + * Ideally we would do similar permissions checks on Windows, but it is + * not clear how that would work since Unix-style permissions may not be + * available. */ #if !defined(WIN32) && !defined(__CYGWIN__) if (buf.st_uid != geteuid() && buf.st_uid != 0) @@ -166,20 +177,7 @@ check_ssl_key_file_permissions(const char *ssl_key_file, bool isServerStart) ssl_key_file))); return false; } -#endif - /* - * Require no public access to key file. If the file is owned by us, - * require mode 0600 or less. If owned by root, require 0640 or less to - * allow read access through our gid, or a supplementary gid that allows - * to read system-wide certificates. - * - * XXX temporarily suppress check when on Windows, because there may not - * be proper support for Unix-y file permissions. Need to think of a - * reasonable check to apply on Windows. (See also the data directory - * permission check in postmaster.c) - */ -#if !defined(WIN32) && !defined(__CYGWIN__) if ((buf.st_uid == geteuid() && buf.st_mode & (S_IRWXG | S_IRWXO)) || (buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO))) { diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index f6e563a2e5..d81218a4cc 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -1245,11 +1245,45 @@ initialize_SSL(PGconn *conn) fnbuf); return -1; } -#ifndef WIN32 - if (!S_ISREG(buf.st_mode) || buf.st_mode & (S_IRWXG | S_IRWXO)) + + /* Key file must be a regular file */ + if (!S_ISREG(buf.st_mode)) + { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("private key file \"%s\" is not a regular file"), + fnbuf); + return -1; + } + + /* + * Refuse to load key files owned by users other than us or root, and + * require no public access to the key file. If the file is owned by + * us, require mode 0600 or less. If owned by root, require 0640 or + * less to allow read access through either our gid or a supplementary + * gid that allows us to read system-wide certificates. + * + * Note that similar checks are performed in + * src/backend/libpq/be-secure-common.c so any changes here may need + * to be made there as well. + * + * Ideally we would do similar permissions checks on Windows, but it + * is not clear how that would work since Unix-style permissions may + * not be available. + */ +#if !defined(WIN32) && !defined(__CYGWIN__) + if (buf.st_uid != geteuid() && buf.st_uid != 0) + { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("private key file \"%s\" must be owned by the current user or root\n"), + fnbuf); + return -1; + } + + if ((buf.st_uid == geteuid() && buf.st_mode & (S_IRWXG | S_IRWXO)) || + (buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO))) { appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("private key file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n"), + libpq_gettext("private key file \"%s\" has group or world access; file must have permissions u=rw (0600) or less if owned by the current user, or permissions u=rw,g=r (0640) or less if owned by root\n"), fnbuf); return -1; } From 54bd1e43ca56e323aef309dc2dc0e1391825ce68 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 28 Feb 2022 15:36:54 -0500 Subject: [PATCH 070/108] Handle integer overflow in interval justification functions. justify_interval, justify_hours, and justify_days didn't check for overflow when promoting hours to days or days to months; but that's possible when the upper field's value is already large. Detect and report any such overflow. Also, we can avoid unnecessary overflow in some cases in justify_interval by pre-justifying the days field. (Thanks to Nathan Bossart for this idea.) Joe Koshakow Discussion: https://postgr.es/m/CAAvxfHeNqsJ2xYFbPUf_8nNQUiJqkag04NW6aBQQ0dbZsxfWHA@mail.gmail.com --- src/backend/utils/adt/timestamp.c | 35 ++++++++++++++++++++++--- src/test/regress/expected/interval.out | 36 ++++++++++++++++++++++++++ src/test/regress/sql/interval.sql | 12 +++++++++ 3 files changed, 79 insertions(+), 4 deletions(-) diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index 36f8a84bcc..ae36ff3328 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -2717,12 +2717,33 @@ interval_justify_interval(PG_FUNCTION_ARGS) result->day = span->day; result->time = span->time; + /* pre-justify days if it might prevent overflow */ + if ((result->day > 0 && result->time > 0) || + (result->day < 0 && result->time < 0)) + { + wholemonth = result->day / DAYS_PER_MONTH; + result->day -= wholemonth * DAYS_PER_MONTH; + if (pg_add_s32_overflow(result->month, wholemonth, &result->month)) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("interval out of range"))); + } + + /* + * Since TimeOffset is int64, abs(wholeday) can't exceed about 1.07e8. If + * we pre-justified then abs(result->day) is less than DAYS_PER_MONTH, so + * this addition can't overflow. If we didn't pre-justify, then day and + * time are of different signs, so it still can't overflow. + */ TMODULO(result->time, wholeday, USECS_PER_DAY); - result->day += wholeday; /* could overflow... */ + result->day += wholeday; wholemonth = result->day / DAYS_PER_MONTH; result->day -= wholemonth * DAYS_PER_MONTH; - result->month += wholemonth; + if (pg_add_s32_overflow(result->month, wholemonth, &result->month)) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("interval out of range"))); if (result->month > 0 && (result->day < 0 || (result->day == 0 && result->time < 0))) @@ -2772,7 +2793,10 @@ interval_justify_hours(PG_FUNCTION_ARGS) result->time = span->time; TMODULO(result->time, wholeday, USECS_PER_DAY); - result->day += wholeday; /* could overflow... */ + if (pg_add_s32_overflow(result->day, wholeday, &result->day)) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("interval out of range"))); if (result->day > 0 && result->time < 0) { @@ -2808,7 +2832,10 @@ interval_justify_days(PG_FUNCTION_ARGS) wholemonth = result->day / DAYS_PER_MONTH; result->day -= wholemonth * DAYS_PER_MONTH; - result->month += wholemonth; + if (pg_add_s32_overflow(result->month, wholemonth, &result->month)) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("interval out of range"))); if (result->month > 0 && result->day < 0) { diff --git a/src/test/regress/expected/interval.out b/src/test/regress/expected/interval.out index accd4a7d90..146f7c55d0 100644 --- a/src/test/regress/expected/interval.out +++ b/src/test/regress/expected/interval.out @@ -396,6 +396,10 @@ SELECT justify_days(interval '6 months 36 days 5 hours 4 minutes 3 seconds') as @ 7 mons 6 days 5 hours 4 mins 3 secs (1 row) +SELECT justify_hours(interval '2147483647 days 24 hrs'); +ERROR: interval out of range +SELECT justify_days(interval '2147483647 months 30 days'); +ERROR: interval out of range -- test justify_interval() SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour"; 1 month -1 hour @@ -403,6 +407,38 @@ SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour"; @ 29 days 23 hours (1 row) +SELECT justify_interval(interval '2147483647 days 24 hrs'); + justify_interval +------------------------------- + @ 5965232 years 4 mons 8 days +(1 row) + +SELECT justify_interval(interval '-2147483648 days -24 hrs'); + justify_interval +----------------------------------- + @ 5965232 years 4 mons 9 days ago +(1 row) + +SELECT justify_interval(interval '2147483647 months 30 days'); +ERROR: interval out of range +SELECT justify_interval(interval '-2147483648 months -30 days'); +ERROR: interval out of range +SELECT justify_interval(interval '2147483647 months 30 days -24 hrs'); + justify_interval +---------------------------------- + @ 178956970 years 7 mons 29 days +(1 row) + +SELECT justify_interval(interval '-2147483648 months -30 days 24 hrs'); + justify_interval +-------------------------------------- + @ 178956970 years 8 mons 29 days ago +(1 row) + +SELECT justify_interval(interval '2147483647 months -30 days 1440 hrs'); +ERROR: interval out of range +SELECT justify_interval(interval '-2147483648 months 30 days -1440 hrs'); +ERROR: interval out of range -- test fractional second input, and detection of duplicate units SET DATESTYLE = 'ISO'; SET IntervalStyle TO postgres; diff --git a/src/test/regress/sql/interval.sql b/src/test/regress/sql/interval.sql index 6d532398bd..c31f0eec05 100644 --- a/src/test/regress/sql/interval.sql +++ b/src/test/regress/sql/interval.sql @@ -149,10 +149,22 @@ select '100000000y 10mon -1000000000d -100000h -10min -10.000001s ago'::interval SELECT justify_hours(interval '6 months 3 days 52 hours 3 minutes 2 seconds') as "6 mons 5 days 4 hours 3 mins 2 seconds"; SELECT justify_days(interval '6 months 36 days 5 hours 4 minutes 3 seconds') as "7 mons 6 days 5 hours 4 mins 3 seconds"; +SELECT justify_hours(interval '2147483647 days 24 hrs'); +SELECT justify_days(interval '2147483647 months 30 days'); + -- test justify_interval() SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour"; +SELECT justify_interval(interval '2147483647 days 24 hrs'); +SELECT justify_interval(interval '-2147483648 days -24 hrs'); +SELECT justify_interval(interval '2147483647 months 30 days'); +SELECT justify_interval(interval '-2147483648 months -30 days'); +SELECT justify_interval(interval '2147483647 months 30 days -24 hrs'); +SELECT justify_interval(interval '-2147483648 months -30 days 24 hrs'); +SELECT justify_interval(interval '2147483647 months -30 days 1440 hrs'); +SELECT justify_interval(interval '-2147483648 months 30 days -1440 hrs'); + -- test fractional second input, and detection of duplicate units SET DATESTYLE = 'ISO'; SET IntervalStyle TO postgres; From 7a85073290856554416353a89799a4c04d09b74b Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Tue, 1 Mar 2022 06:17:52 +0530 Subject: [PATCH 071/108] Reconsider pg_stat_subscription_workers view. It was decided (refer to the Discussion link below) that the stats collector is not an appropriate place to store the error information of subscription workers. This patch changes the pg_stat_subscription_workers view (introduced by commit 8d74fc96db) so that it stores only statistics counters: apply_error_count and sync_error_count, and has one entry for each subscription. The removed error information such as error-XID and the error message would be stored in another way in the future which is more reliable and persistent. After removing these error details, there is no longer any relation information, so the subscription statistics are now a cluster-wide statistics. The patch also changes the view name to pg_stat_subscription_stats since the word "worker" is an implementation detail that we use one worker for one tablesync and one apply. Author: Masahiko Sawada, based on suggestions by Andres Freund Reviewed-by: Peter Smith, Haiying Tang, Takamichi Osumi, Amit Kapila Discussion: https://postgr.es/m/20220125063131.4cmvsxbz2tdg6g65@alap3.anarazel.de --- doc/src/sgml/logical-replication.sgml | 4 +- doc/src/sgml/monitoring.sgml | 99 +-- src/backend/catalog/system_functions.sql | 4 +- src/backend/catalog/system_views.sql | 27 +- src/backend/postmaster/pgstat.c | 656 +++++++++----------- src/backend/replication/logical/worker.c | 44 +- src/backend/utils/adt/pgstatfuncs.c | 156 ++--- src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_proc.dat | 27 +- src/include/pgstat.h | 129 ++-- src/test/regress/expected/rules.out | 23 +- src/test/subscription/t/026_stats.pl | 102 +++ src/test/subscription/t/026_worker_stats.pl | 165 ----- src/tools/pgindent/typedefs.list | 8 +- 14 files changed, 582 insertions(+), 864 deletions(-) create mode 100644 src/test/subscription/t/026_stats.pl delete mode 100644 src/test/subscription/t/026_worker_stats.pl diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index 96b4886e08..fb4472356d 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -346,9 +346,7 @@ A conflict will produce an error and will stop the replication; it must be resolved manually by the user. Details about the conflict can be found in - - pg_stat_subscription_workers and the - subscriber's server log. + the subscriber's server log. diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index bf7625d988..9fb62fec8e 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -628,11 +628,10 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - pg_stat_subscription_workerspg_stat_subscription_workers - One row per subscription worker, showing statistics about errors - that occurred on that subscription worker. - See - pg_stat_subscription_workers for details. + pg_stat_subscription_statspg_stat_subscription_stats + One row per subscription, showing statistics about errors. + See + pg_stat_subscription_stats for details. @@ -3063,23 +3062,20 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - - <structname>pg_stat_subscription_workers</structname> + + <structname>pg_stat_subscription_stats</structname> - pg_stat_subscription_workers + pg_stat_subscription_stats - The pg_stat_subscription_workers view will contain - one row per subscription worker on which errors have occurred, for workers - applying logical replication changes and workers handling the initial data - copy of the subscribed tables. The statistics entry is removed when the - corresponding subscription is dropped. + The pg_stat_subscription_stats view will contain + one row per subscription. -
- <structname>pg_stat_subscription_workers</structname> View +
+ <structname>pg_stat_subscription_stats</structname> View @@ -3113,72 +3109,31 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - subrelid oid + apply_error_count bigint - OID of the relation that the worker is synchronizing; null for the - main apply worker - - - - - - last_error_relid oid - - - OID of the relation that the worker was processing when the - error occurred + Number of times an error occurred while applying changes - last_error_command text + sync_error_count bigint - Name of command being applied when the error occurred. This field - is null if the error was reported during the initial data copy. + Number of times an error occurred during the initial table + synchronization - last_error_xid xid - - - Transaction ID of the publisher node being applied when the error - occurred. This field is null if the error was reported - during the initial data copy. - - - - - - last_error_count uint8 - - - Number of consecutive times the error occurred - - - - - - last_error_message text - - - The error message - - - - - - last_error_time timestamp with time zone + stats_reset timestamp with time zone - Last time at which this error occurred + Time at which these statistics were last reset -
@@ -5320,22 +5275,16 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - pg_stat_reset_subscription_worker + pg_stat_reset_subscription_stats - pg_stat_reset_subscription_worker ( subid oid , relid oid ) + pg_stat_reset_subscription_stats ( oid ) void - Resets the statistics of subscription workers running on the - subscription with subid shown in the - pg_stat_subscription_workers view. If the - argument relid is not NULL, - resets statistics of the subscription worker handling the initial data - copy of the relation with relid. Otherwise, - resets the subscription worker statistics of the main apply worker. - If the argument relid is omitted, resets the - statistics of all subscription workers running on the subscription - with subid. + Resets statistics for a single subscription shown in the + pg_stat_subscription_stats view to zero. If + the argument is NULL, reset statistics for all + subscriptions. This function is restricted to superusers by default, but other users diff --git a/src/backend/catalog/system_functions.sql b/src/backend/catalog/system_functions.sql index fd1421788e..758ab6e25a 100644 --- a/src/backend/catalog/system_functions.sql +++ b/src/backend/catalog/system_functions.sql @@ -639,9 +639,7 @@ REVOKE EXECUTE ON FUNCTION pg_stat_reset_single_function_counters(oid) FROM publ REVOKE EXECUTE ON FUNCTION pg_stat_reset_replication_slot(text) FROM public; -REVOKE EXECUTE ON FUNCTION pg_stat_reset_subscription_worker(oid) FROM public; - -REVOKE EXECUTE ON FUNCTION pg_stat_reset_subscription_worker(oid, oid) FROM public; +REVOKE EXECUTE ON FUNCTION pg_stat_reset_subscription_stats(oid) FROM public; REVOKE EXECUTE ON FUNCTION lo_import(text) FROM public; diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 3cb69b1f87..40b7bca5a9 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -1264,25 +1264,12 @@ GRANT SELECT (oid, subdbid, subname, subowner, subenabled, subbinary, substream, subtwophasestate, subslotname, subsynccommit, subpublications) ON pg_subscription TO public; -CREATE VIEW pg_stat_subscription_workers AS +CREATE VIEW pg_stat_subscription_stats AS SELECT - w.subid, + ss.subid, s.subname, - w.subrelid, - w.last_error_relid, - w.last_error_command, - w.last_error_xid, - w.last_error_count, - w.last_error_message, - w.last_error_time - FROM (SELECT - oid as subid, - NULL as relid - FROM pg_subscription - UNION ALL - SELECT - srsubid as subid, - srrelid as relid - FROM pg_subscription_rel) sr, - LATERAL pg_stat_get_subscription_worker(sr.subid, sr.relid) w - JOIN pg_subscription s ON (w.subid = s.oid); + ss.apply_error_count, + ss.sync_error_count, + ss.stats_reset + FROM pg_subscription as s, + pg_stat_get_subscription_stats(s.oid) as ss; diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 0646f53098..53ddd930e6 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -106,7 +106,7 @@ #define PGSTAT_DB_HASH_SIZE 16 #define PGSTAT_TAB_HASH_SIZE 512 #define PGSTAT_FUNCTION_HASH_SIZE 512 -#define PGSTAT_SUBWORKER_HASH_SIZE 32 +#define PGSTAT_SUBSCRIPTION_HASH_SIZE 32 #define PGSTAT_REPLSLOT_HASH_SIZE 32 @@ -284,6 +284,7 @@ static PgStat_GlobalStats globalStats; static PgStat_WalStats walStats; static PgStat_SLRUStats slruStats[SLRU_NUM_ELEMENTS]; static HTAB *replSlotStatHash = NULL; +static HTAB *subscriptionStatHash = NULL; /* * List of OIDs of databases we need to write out. If an entry is InvalidOid, @@ -322,14 +323,13 @@ NON_EXEC_STATIC void PgstatCollectorMain(int argc, char *argv[]) pg_attribute_no static PgStat_StatDBEntry *pgstat_get_db_entry(Oid databaseid, bool create); static PgStat_StatTabEntry *pgstat_get_tab_entry(PgStat_StatDBEntry *dbentry, Oid tableoid, bool create); -static PgStat_StatSubWorkerEntry *pgstat_get_subworker_entry(PgStat_StatDBEntry *dbentry, - Oid subid, Oid subrelid, - bool create); +static PgStat_StatSubEntry *pgstat_get_subscription_entry(Oid subid, bool create); +static void pgstat_reset_subscription(PgStat_StatSubEntry *subentry, TimestampTz ts); static void pgstat_write_statsfiles(bool permanent, bool allDbs); static void pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent); static HTAB *pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep); static void pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, - HTAB *subworkerhash, bool permanent); + bool permanent); static void backend_read_statsfile(void); static bool pgstat_write_statsfile_needed(void); @@ -341,7 +341,6 @@ static void pgstat_reset_replslot(PgStat_StatReplSlotEntry *slotstats, Timestamp static void pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg, TimestampTz now); static void pgstat_send_funcstats(void); static void pgstat_send_slru(void); -static void pgstat_send_subscription_purge(PgStat_MsgSubscriptionPurge *msg); static HTAB *pgstat_collect_oids(Oid catalogid, AttrNumber anum_oid); static bool pgstat_should_report_connstat(void); static void pgstat_report_disconnect(Oid dboid); @@ -363,6 +362,7 @@ static void pgstat_recv_resetsharedcounter(PgStat_MsgResetsharedcounter *msg, in static void pgstat_recv_resetsinglecounter(PgStat_MsgResetsinglecounter *msg, int len); static void pgstat_recv_resetslrucounter(PgStat_MsgResetslrucounter *msg, int len); static void pgstat_recv_resetreplslotcounter(PgStat_MsgResetreplslotcounter *msg, int len); +static void pgstat_recv_resetsubcounter(PgStat_MsgResetsubcounter *msg, int len); static void pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len); static void pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len); static void pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len); @@ -380,8 +380,8 @@ static void pgstat_recv_connect(PgStat_MsgConnect *msg, int len); static void pgstat_recv_disconnect(PgStat_MsgDisconnect *msg, int len); static void pgstat_recv_replslot(PgStat_MsgReplSlot *msg, int len); static void pgstat_recv_tempfile(PgStat_MsgTempFile *msg, int len); -static void pgstat_recv_subscription_purge(PgStat_MsgSubscriptionPurge *msg, int len); -static void pgstat_recv_subworker_error(PgStat_MsgSubWorkerError *msg, int len); +static void pgstat_recv_subscription_drop(PgStat_MsgSubscriptionDrop *msg, int len); +static void pgstat_recv_subscription_error(PgStat_MsgSubscriptionError *msg, int len); /* ------------------------------------------------------------ * Public functions called from postmaster follow @@ -1187,6 +1187,32 @@ pgstat_vacuum_stat(void) } } + /* + * Repeat the above steps for subscriptions, if subscription stats are + * being collected. + */ + if (subscriptionStatHash) + { + PgStat_StatSubEntry *subentry; + + /* + * Read pg_subscription and make a list of OIDs of all existing + * subscriptions. + */ + htab = pgstat_collect_oids(SubscriptionRelationId, Anum_pg_subscription_oid); + + hash_seq_init(&hstat, subscriptionStatHash); + while ((subentry = (PgStat_StatSubEntry *) hash_seq_search(&hstat)) != NULL) + { + CHECK_FOR_INTERRUPTS(); + + if (hash_search(htab, (void *) &(subentry->subid), HASH_FIND, NULL) == NULL) + pgstat_report_subscription_drop(subentry->subid); + } + + hash_destroy(htab); + } + /* * Lookup our own database entry; if not found, nothing more to do. */ @@ -1311,74 +1337,6 @@ pgstat_vacuum_stat(void) hash_destroy(htab); } - - /* - * Repeat for subscription workers. Similarly, we needn't bother in the - * common case where no subscription workers' stats are being collected. - */ - if (dbentry->subworkers != NULL && - hash_get_num_entries(dbentry->subworkers) > 0) - { - PgStat_StatSubWorkerEntry *subwentry; - PgStat_MsgSubscriptionPurge spmsg; - - /* - * Read pg_subscription and make a list of OIDs of all existing - * subscriptions - */ - htab = pgstat_collect_oids(SubscriptionRelationId, Anum_pg_subscription_oid); - - spmsg.m_databaseid = MyDatabaseId; - spmsg.m_nentries = 0; - - hash_seq_init(&hstat, dbentry->subworkers); - while ((subwentry = (PgStat_StatSubWorkerEntry *) hash_seq_search(&hstat)) != NULL) - { - bool exists = false; - Oid subid = subwentry->key.subid; - - CHECK_FOR_INTERRUPTS(); - - if (hash_search(htab, (void *) &subid, HASH_FIND, NULL) != NULL) - continue; - - /* - * It is possible that we have multiple entries for the - * subscription corresponding to apply worker and tablesync - * workers. In such cases, we don't need to add the same subid - * again. - */ - for (int i = 0; i < spmsg.m_nentries; i++) - { - if (spmsg.m_subids[i] == subid) - { - exists = true; - break; - } - } - - if (exists) - continue; - - /* This subscription is dead, add the subid to the message */ - spmsg.m_subids[spmsg.m_nentries++] = subid; - - /* - * If the message is full, send it out and reinitialize to empty - */ - if (spmsg.m_nentries >= PGSTAT_NUM_SUBSCRIPTIONPURGE) - { - pgstat_send_subscription_purge(&spmsg); - spmsg.m_nentries = 0; - } - } - - /* Send the rest of dead subscriptions */ - if (spmsg.m_nentries > 0) - pgstat_send_subscription_purge(&spmsg); - - hash_destroy(htab); - } } @@ -1551,8 +1509,7 @@ pgstat_reset_shared_counters(const char *target) * ---------- */ void -pgstat_reset_single_counter(Oid objoid, Oid subobjoid, - PgStat_Single_Reset_Type type) +pgstat_reset_single_counter(Oid objoid, PgStat_Single_Reset_Type type) { PgStat_MsgResetsinglecounter msg; @@ -1563,7 +1520,6 @@ pgstat_reset_single_counter(Oid objoid, Oid subobjoid, msg.m_databaseid = MyDatabaseId; msg.m_resettype = type; msg.m_objectid = objoid; - msg.m_subobjectid = subobjoid; pgstat_send(&msg, sizeof(msg)); } @@ -1623,6 +1579,30 @@ pgstat_reset_replslot_counter(const char *name) pgstat_send(&msg, sizeof(msg)); } +/* ---------- + * pgstat_reset_subscription_counter() - + * + * Tell the statistics collector to reset a single subscription + * counter, or all subscription counters (when subid is InvalidOid). + * + * Permission checking for this function is managed through the normal + * GRANT system. + * ---------- + */ +void +pgstat_reset_subscription_counter(Oid subid) +{ + PgStat_MsgResetsubcounter msg; + + if (pgStatSock == PGINVALID_SOCKET) + return; + + msg.m_subid = subid; + pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETSUBCOUNTER); + + pgstat_send(&msg, sizeof(msg)); +} + /* ---------- * pgstat_report_autovac() - * @@ -1949,31 +1929,20 @@ pgstat_report_replslot_drop(const char *slotname) } /* ---------- - * pgstat_report_subworker_error() - + * pgstat_report_subscription_error() - * - * Tell the collector about the subscription worker error. + * Tell the collector about the subscription error. * ---------- */ void -pgstat_report_subworker_error(Oid subid, Oid subrelid, Oid relid, - LogicalRepMsgType command, TransactionId xid, - const char *errmsg) +pgstat_report_subscription_error(Oid subid, bool is_apply_error) { - PgStat_MsgSubWorkerError msg; - int len; + PgStat_MsgSubscriptionError msg; - pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_SUBWORKERERROR); - msg.m_databaseid = MyDatabaseId; + pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_SUBSCRIPTIONERROR); msg.m_subid = subid; - msg.m_subrelid = subrelid; - msg.m_relid = relid; - msg.m_command = command; - msg.m_xid = xid; - msg.m_timestamp = GetCurrentTimestamp(); - strlcpy(msg.m_message, errmsg, PGSTAT_SUBWORKERERROR_MSGLEN); - - len = offsetof(PgStat_MsgSubWorkerError, m_message) + strlen(msg.m_message) + 1; - pgstat_send(&msg, len); + msg.m_is_apply_error = is_apply_error; + pgstat_send(&msg, sizeof(PgStat_MsgSubscriptionError)); } /* ---------- @@ -1985,12 +1954,11 @@ pgstat_report_subworker_error(Oid subid, Oid subrelid, Oid relid, void pgstat_report_subscription_drop(Oid subid) { - PgStat_MsgSubscriptionPurge msg; + PgStat_MsgSubscriptionDrop msg; - msg.m_databaseid = MyDatabaseId; - msg.m_subids[0] = subid; - msg.m_nentries = 1; - pgstat_send_subscription_purge(&msg); + pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_SUBSCRIPTIONDROP); + msg.m_subid = subid; + pgstat_send(&msg, sizeof(PgStat_MsgSubscriptionDrop)); } /* ---------- @@ -2998,36 +2966,6 @@ pgstat_fetch_stat_funcentry(Oid func_id) return funcentry; } -/* - * --------- - * pgstat_fetch_stat_subworker_entry() - - * - * Support function for the SQL-callable pgstat* functions. Returns - * the collected statistics for subscription worker or NULL. - * --------- - */ -PgStat_StatSubWorkerEntry * -pgstat_fetch_stat_subworker_entry(Oid subid, Oid subrelid) -{ - PgStat_StatDBEntry *dbentry; - PgStat_StatSubWorkerEntry *wentry = NULL; - - /* Load the stats file if needed */ - backend_read_statsfile(); - - /* - * Lookup our database, then find the requested subscription worker stats. - */ - dbentry = pgstat_fetch_stat_dbentry(MyDatabaseId); - if (dbentry != NULL && dbentry->subworkers != NULL) - { - wentry = pgstat_get_subworker_entry(dbentry, subid, subrelid, - false); - } - - return wentry; -} - /* * --------- * pgstat_fetch_stat_archiver() - @@ -3140,6 +3078,23 @@ pgstat_fetch_replslot(NameData slotname) return pgstat_get_replslot_entry(slotname, false); } +/* + * --------- + * pgstat_fetch_stat_subscription() - + * + * Support function for the SQL-callable pgstat* functions. Returns + * the collected statistics for one subscription or NULL. + * --------- + */ +PgStat_StatSubEntry * +pgstat_fetch_stat_subscription(Oid subid) +{ + /* Load the stats file if needed */ + backend_read_statsfile(); + + return pgstat_get_subscription_entry(subid, false); +} + /* * Shut down a single backend's statistics reporting at process exit. * @@ -3465,24 +3420,6 @@ pgstat_send_slru(void) } } -/* -------- - * pgstat_send_subscription_purge() - - * - * Send a subscription purge message to the collector - * -------- - */ -static void -pgstat_send_subscription_purge(PgStat_MsgSubscriptionPurge *msg) -{ - int len; - - len = offsetof(PgStat_MsgSubscriptionPurge, m_subids[0]) - + msg->m_nentries * sizeof(Oid); - - pgstat_setheader(&msg->m_hdr, PGSTAT_MTYPE_SUBSCRIPTIONPURGE); - pgstat_send(msg, len); -} - /* ---------- * PgstatCollectorMain() - * @@ -3668,6 +3605,10 @@ PgstatCollectorMain(int argc, char *argv[]) len); break; + case PGSTAT_MTYPE_RESETSUBCOUNTER: + pgstat_recv_resetsubcounter(&msg.msg_resetsubcounter, len); + break; + case PGSTAT_MTYPE_AUTOVAC_START: pgstat_recv_autovac(&msg.msg_autovacuum_start, len); break; @@ -3738,12 +3679,12 @@ PgstatCollectorMain(int argc, char *argv[]) pgstat_recv_disconnect(&msg.msg_disconnect, len); break; - case PGSTAT_MTYPE_SUBSCRIPTIONPURGE: - pgstat_recv_subscription_purge(&msg.msg_subscriptionpurge, len); + case PGSTAT_MTYPE_SUBSCRIPTIONDROP: + pgstat_recv_subscription_drop(&msg.msg_subscriptiondrop, len); break; - case PGSTAT_MTYPE_SUBWORKERERROR: - pgstat_recv_subworker_error(&msg.msg_subworkererror, len); + case PGSTAT_MTYPE_SUBSCRIPTIONERROR: + pgstat_recv_subscription_error(&msg.msg_subscriptionerror, len); break; default: @@ -3791,8 +3732,7 @@ PgstatCollectorMain(int argc, char *argv[]) /* * Subroutine to clear stats in a database entry * - * Tables, functions, and subscription workers hashes are initialized - * to empty. + * Tables and functions hashes are initialized to empty. */ static void reset_dbentry_counters(PgStat_StatDBEntry *dbentry) @@ -3845,13 +3785,6 @@ reset_dbentry_counters(PgStat_StatDBEntry *dbentry) PGSTAT_FUNCTION_HASH_SIZE, &hash_ctl, HASH_ELEM | HASH_BLOBS); - - hash_ctl.keysize = sizeof(PgStat_StatSubWorkerKey); - hash_ctl.entrysize = sizeof(PgStat_StatSubWorkerEntry); - dbentry->subworkers = hash_create("Per-database subscription worker", - PGSTAT_SUBWORKER_HASH_SIZE, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); } /* @@ -3876,7 +3809,7 @@ pgstat_get_db_entry(Oid databaseid, bool create) /* * If not found, initialize the new one. This creates empty hash tables - * for tables, functions, and subscription workers, too. + * for tables and functions, too. */ if (!found) reset_dbentry_counters(result); @@ -3934,48 +3867,6 @@ pgstat_get_tab_entry(PgStat_StatDBEntry *dbentry, Oid tableoid, bool create) return result; } -/* ---------- - * pgstat_get_subworker_entry - * - * Return subscription worker entry with the given subscription OID and - * relation OID. If subrelid is InvalidOid, it returns an entry of the - * apply worker otherwise returns an entry of the table sync worker - * associated with subrelid. If no subscription worker entry exists, - * initialize it, if the create parameter is true. Else, return NULL. - * ---------- - */ -static PgStat_StatSubWorkerEntry * -pgstat_get_subworker_entry(PgStat_StatDBEntry *dbentry, Oid subid, Oid subrelid, - bool create) -{ - PgStat_StatSubWorkerEntry *subwentry; - PgStat_StatSubWorkerKey key; - bool found; - HASHACTION action = (create ? HASH_ENTER : HASH_FIND); - - key.subid = subid; - key.subrelid = subrelid; - subwentry = (PgStat_StatSubWorkerEntry *) hash_search(dbentry->subworkers, - (void *) &key, - action, &found); - - if (!create && !found) - return NULL; - - /* If not found, initialize the new one */ - if (!found) - { - subwentry->last_error_relid = InvalidOid; - subwentry->last_error_command = 0; - subwentry->last_error_xid = InvalidTransactionId; - subwentry->last_error_count = 0; - subwentry->last_error_time = 0; - subwentry->last_error_message[0] = '\0'; - } - - return subwentry; -} - /* ---------- * pgstat_write_statsfiles() - * Write the global statistics file, as well as requested DB files. @@ -4059,8 +3950,8 @@ pgstat_write_statsfiles(bool permanent, bool allDbs) while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL) { /* - * Write out the table, function, and subscription-worker stats for - * this DB into the appropriate per-DB stat file, if required. + * Write out the table and function stats for this DB into the + * appropriate per-DB stat file, if required. */ if (allDbs || pgstat_db_requested(dbentry->databaseid)) { @@ -4095,6 +3986,22 @@ pgstat_write_statsfiles(bool permanent, bool allDbs) } } + /* + * Write subscription stats struct + */ + if (subscriptionStatHash) + { + PgStat_StatSubEntry *subentry; + + hash_seq_init(&hstat, subscriptionStatHash); + while ((subentry = (PgStat_StatSubEntry *) hash_seq_search(&hstat)) != NULL) + { + fputc('S', fpout); + rc = fwrite(subentry, sizeof(PgStat_StatSubEntry), 1, fpout); + (void) rc; /* we'll check for error with ferror */ + } + } + /* * No more output to be done. Close the temp file and replace the old * pgstat.stat with it. The ferror() check replaces testing for error @@ -4174,10 +4081,8 @@ pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent) { HASH_SEQ_STATUS tstat; HASH_SEQ_STATUS fstat; - HASH_SEQ_STATUS sstat; PgStat_StatTabEntry *tabentry; PgStat_StatFuncEntry *funcentry; - PgStat_StatSubWorkerEntry *subwentry; FILE *fpout; int32 format_id; Oid dbid = dbentry->databaseid; @@ -4232,17 +4137,6 @@ pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent) (void) rc; /* we'll check for error with ferror */ } - /* - * Walk through the database's subscription worker stats table. - */ - hash_seq_init(&sstat, dbentry->subworkers); - while ((subwentry = (PgStat_StatSubWorkerEntry *) hash_seq_search(&sstat)) != NULL) - { - fputc('S', fpout); - rc = fwrite(subwentry, sizeof(PgStat_StatSubWorkerEntry), 1, fpout); - (void) rc; /* we'll check for error with ferror */ - } - /* * No more output to be done. Close the temp file and replace the old * pgstat.stat with it. The ferror() check replaces testing for error @@ -4301,9 +4195,8 @@ pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent) * files after reading; the in-memory status is now authoritative, and the * files would be out of date in case somebody else reads them. * - * If a 'deep' read is requested, table/function/subscription-worker stats are - * read, otherwise the table/function/subscription-worker hash tables remain - * empty. + * If a 'deep' read is requested, table/function stats are read, otherwise + * the table/function hash tables remain empty. * ---------- */ static HTAB * @@ -4482,7 +4375,6 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) memcpy(dbentry, &dbbuf, sizeof(PgStat_StatDBEntry)); dbentry->tables = NULL; dbentry->functions = NULL; - dbentry->subworkers = NULL; /* * In the collector, disregard the timestamp we read from the @@ -4494,8 +4386,8 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) dbentry->stats_timestamp = 0; /* - * Don't create tables/functions/subworkers hashtables for - * uninteresting databases. + * Don't create tables/functions hashtables for uninteresting + * databases. */ if (onlydb != InvalidOid) { @@ -4520,14 +4412,6 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) &hash_ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - hash_ctl.keysize = sizeof(PgStat_StatSubWorkerKey); - hash_ctl.entrysize = sizeof(PgStat_StatSubWorkerEntry); - hash_ctl.hcxt = pgStatLocalContext; - dbentry->subworkers = hash_create("Per-database subscription worker", - PGSTAT_SUBWORKER_HASH_SIZE, - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - /* * If requested, read the data from the database-specific * file. Otherwise we just leave the hashtables empty. @@ -4536,7 +4420,6 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) pgstat_read_db_statsfile(dbentry->databaseid, dbentry->tables, dbentry->functions, - dbentry->subworkers, permanent); break; @@ -4580,6 +4463,45 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) break; } + /* + * 'S' A PgStat_StatSubEntry struct describing subscription + * statistics. + */ + case 'S': + { + PgStat_StatSubEntry subbuf; + PgStat_StatSubEntry *subentry; + + if (fread(&subbuf, 1, sizeof(PgStat_StatSubEntry), fpin) + != sizeof(PgStat_StatSubEntry)) + { + ereport(pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", + statfile))); + goto done; + } + + if (subscriptionStatHash == NULL) + { + HASHCTL hash_ctl; + + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(PgStat_StatSubEntry); + hash_ctl.hcxt = pgStatLocalContext; + subscriptionStatHash = hash_create("Subscription hash", + PGSTAT_SUBSCRIPTION_HASH_SIZE, + &hash_ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + } + + subentry = (PgStat_StatSubEntry *) hash_search(subscriptionStatHash, + (void *) &subbuf.subid, + HASH_ENTER, NULL); + + memcpy(subentry, &subbuf, sizeof(subbuf)); + break; + } + case 'E': goto done; @@ -4614,21 +4536,19 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) * As in pgstat_read_statsfiles, if the permanent file is requested, it is * removed after reading. * - * Note: this code has the ability to skip storing per-table, per-function, or - * per-subscription-worker data, if NULL is passed for the corresponding hashtable. - * That's not used at the moment though. + * Note: this code has the ability to skip storing per-table or per-function + * data, if NULL is passed for the corresponding hashtable. That's not used + * at the moment though. * ---------- */ static void pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, - HTAB *subworkerhash, bool permanent) + bool permanent) { PgStat_StatTabEntry *tabentry; PgStat_StatTabEntry tabbuf; PgStat_StatFuncEntry funcbuf; PgStat_StatFuncEntry *funcentry; - PgStat_StatSubWorkerEntry subwbuf; - PgStat_StatSubWorkerEntry *subwentry; FILE *fpin; int32 format_id; bool found; @@ -4742,41 +4662,6 @@ pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, memcpy(funcentry, &funcbuf, sizeof(funcbuf)); break; - /* - * 'S' A PgStat_StatSubWorkerEntry struct describing - * subscription worker statistics. - */ - case 'S': - if (fread(&subwbuf, 1, sizeof(PgStat_StatSubWorkerEntry), - fpin) != sizeof(PgStat_StatSubWorkerEntry)) - { - ereport(pgStatRunningInCollector ? LOG : WARNING, - (errmsg("corrupted statistics file \"%s\"", - statfile))); - goto done; - } - - /* - * Skip if subscription worker data not wanted. - */ - if (subworkerhash == NULL) - break; - - subwentry = (PgStat_StatSubWorkerEntry *) hash_search(subworkerhash, - (void *) &subwbuf.key, - HASH_ENTER, &found); - - if (found) - { - ereport(pgStatRunningInCollector ? LOG : WARNING, - (errmsg("corrupted statistics file \"%s\"", - statfile))); - goto done; - } - - memcpy(subwentry, &subwbuf, sizeof(subwbuf)); - break; - /* * 'E' The EOF marker of a complete stats file. */ @@ -4829,6 +4714,7 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent, PgStat_WalStats myWalStats; PgStat_SLRUStats mySLRUStats[SLRU_NUM_ELEMENTS]; PgStat_StatReplSlotEntry myReplSlotStats; + PgStat_StatSubEntry mySubStats; FILE *fpin; int32 format_id; const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename; @@ -4959,6 +4845,22 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent, } break; + /* + * 'S' A PgStat_StatSubEntry struct describing subscription + * statistics follows. + */ + case 'S': + if (fread(&mySubStats, 1, sizeof(PgStat_StatSubEntry), fpin) + != sizeof(PgStat_StatSubEntry)) + { + ereport(pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", + statfile))); + FreeFile(fpin); + return false; + } + break; + case 'E': goto done; @@ -5164,6 +5066,7 @@ pgstat_clear_snapshot(void) pgStatLocalContext = NULL; pgStatDBHash = NULL; replSlotStatHash = NULL; + subscriptionStatHash = NULL; /* * Historically the backend_status.c facilities lived in this file, and @@ -5450,8 +5353,6 @@ pgstat_recv_dropdb(PgStat_MsgDropdb *msg, int len) hash_destroy(dbentry->tables); if (dbentry->functions != NULL) hash_destroy(dbentry->functions); - if (dbentry->subworkers != NULL) - hash_destroy(dbentry->subworkers); if (hash_search(pgStatDBHash, (void *) &dbid, @@ -5489,16 +5390,13 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len) hash_destroy(dbentry->tables); if (dbentry->functions != NULL) hash_destroy(dbentry->functions); - if (dbentry->subworkers != NULL) - hash_destroy(dbentry->subworkers); dbentry->tables = NULL; dbentry->functions = NULL; - dbentry->subworkers = NULL; /* * Reset database-level stats, too. This creates empty hash tables for - * tables, functions, and subscription workers. + * tables and functions. */ reset_dbentry_counters(dbentry); } @@ -5567,14 +5465,6 @@ pgstat_recv_resetsinglecounter(PgStat_MsgResetsinglecounter *msg, int len) else if (msg->m_resettype == RESET_FUNCTION) (void) hash_search(dbentry->functions, (void *) &(msg->m_objectid), HASH_REMOVE, NULL); - else if (msg->m_resettype == RESET_SUBWORKER) - { - PgStat_StatSubWorkerKey key; - - key.subid = msg->m_objectid; - key.subrelid = msg->m_subobjectid; - (void) hash_search(dbentry->subworkers, (void *) &key, HASH_REMOVE, NULL); - } } /* ---------- @@ -5645,6 +5535,51 @@ pgstat_recv_resetreplslotcounter(PgStat_MsgResetreplslotcounter *msg, } } +/* ---------- + * pgstat_recv_resetsubcounter() - + * + * Reset some subscription statistics of the cluster. + * ---------- + */ +static void +pgstat_recv_resetsubcounter(PgStat_MsgResetsubcounter *msg, int len) +{ + PgStat_StatSubEntry *subentry; + TimestampTz ts; + + /* Return if we don't have replication subscription statistics */ + if (subscriptionStatHash == NULL) + return; + + ts = GetCurrentTimestamp(); + if (!OidIsValid(msg->m_subid)) + { + HASH_SEQ_STATUS sstat; + + /* Clear all subscription counters */ + hash_seq_init(&sstat, subscriptionStatHash); + while ((subentry = (PgStat_StatSubEntry *) hash_seq_search(&sstat)) != NULL) + pgstat_reset_subscription(subentry, ts); + } + else + { + /* Get the subscription statistics to reset */ + subentry = pgstat_get_subscription_entry(msg->m_subid, false); + + /* + * Nothing to do if the given subscription entry is not found. This + * could happen when the subscription with the subid is removed and + * the corresponding statistics entry is also removed before receiving + * the reset message. + */ + if (!subentry) + return; + + /* Reset the stats for the requested subscription */ + pgstat_reset_subscription(subentry, ts); + } +} + /* ---------- * pgstat_recv_autovac() - @@ -6118,81 +6053,42 @@ pgstat_recv_funcpurge(PgStat_MsgFuncpurge *msg, int len) } /* ---------- - * pgstat_recv_subscription_purge() - + * pgstat_recv_subscription_drop() - * - * Process a SUBSCRIPTIONPURGE message. + * Process a SUBSCRIPTIONDROP message. * ---------- */ static void -pgstat_recv_subscription_purge(PgStat_MsgSubscriptionPurge *msg, int len) +pgstat_recv_subscription_drop(PgStat_MsgSubscriptionDrop *msg, int len) { - HASH_SEQ_STATUS hstat; - PgStat_StatDBEntry *dbentry; - PgStat_StatSubWorkerEntry *subwentry; - - dbentry = pgstat_get_db_entry(msg->m_databaseid, false); - - /* No need to purge if we don't even know the database */ - if (!dbentry || !dbentry->subworkers) + /* Return if we don't have replication subscription statistics */ + if (subscriptionStatHash == NULL) return; - /* Remove all subscription worker statistics for the given subscriptions */ - hash_seq_init(&hstat, dbentry->subworkers); - while ((subwentry = (PgStat_StatSubWorkerEntry *) hash_seq_search(&hstat)) != NULL) - { - for (int i = 0; i < msg->m_nentries; i++) - { - if (subwentry->key.subid == msg->m_subids[i]) - { - (void) hash_search(dbentry->subworkers, (void *) &(subwentry->key), - HASH_REMOVE, NULL); - break; - } - } - } + /* Remove from hashtable if present; we don't care if it's not */ + (void) hash_search(subscriptionStatHash, (void *) &(msg->m_subid), + HASH_REMOVE, NULL); } /* ---------- - * pgstat_recv_subworker_error() - + * pgstat_recv_subscription_error() - * - * Process a SUBWORKERERROR message. + * Process a SUBSCRIPTIONERROR message. * ---------- */ static void -pgstat_recv_subworker_error(PgStat_MsgSubWorkerError *msg, int len) +pgstat_recv_subscription_error(PgStat_MsgSubscriptionError *msg, int len) { - PgStat_StatDBEntry *dbentry; - PgStat_StatSubWorkerEntry *subwentry; - - dbentry = pgstat_get_db_entry(msg->m_databaseid, true); + PgStat_StatSubEntry *subentry; - /* Get the subscription worker stats */ - subwentry = pgstat_get_subworker_entry(dbentry, msg->m_subid, - msg->m_subrelid, true); - Assert(subwentry); - - if (subwentry->last_error_relid == msg->m_relid && - subwentry->last_error_command == msg->m_command && - subwentry->last_error_xid == msg->m_xid && - strcmp(subwentry->last_error_message, msg->m_message) == 0) - { - /* - * The same error occurred again in succession, just update its - * timestamp and count. - */ - subwentry->last_error_count++; - subwentry->last_error_time = msg->m_timestamp; - return; - } + /* Get the subscription stats */ + subentry = pgstat_get_subscription_entry(msg->m_subid, true); + Assert(subentry); - /* Otherwise, update the error information */ - subwentry->last_error_relid = msg->m_relid; - subwentry->last_error_command = msg->m_command; - subwentry->last_error_xid = msg->m_xid; - subwentry->last_error_count = 1; - subwentry->last_error_time = msg->m_timestamp; - strlcpy(subwentry->last_error_message, msg->m_message, - PGSTAT_SUBWORKERERROR_MSGLEN); + if (msg->m_is_apply_error) + subentry->apply_error_count++; + else + subentry->sync_error_count++; } /* ---------- @@ -6313,6 +6209,68 @@ pgstat_reset_replslot(PgStat_StatReplSlotEntry *slotent, TimestampTz ts) slotent->stat_reset_timestamp = ts; } +/* ---------- + * pgstat_get_subscription_entry + * + * Return the subscription statistics entry with the given subscription OID. + * If no subscription entry exists, initialize it, if the create parameter is + * true. Else, return NULL. + * ---------- + */ +static PgStat_StatSubEntry * +pgstat_get_subscription_entry(Oid subid, bool create) +{ + PgStat_StatSubEntry *subentry; + bool found; + HASHACTION action = (create ? HASH_ENTER : HASH_FIND); + + if (subscriptionStatHash == NULL) + { + HASHCTL hash_ctl; + + /* + * Quick return NULL if the hash table is empty and the caller didn't + * request to create the entry. + */ + if (!create) + return NULL; + + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(PgStat_StatSubEntry); + subscriptionStatHash = hash_create("Subscription hash", + PGSTAT_SUBSCRIPTION_HASH_SIZE, + &hash_ctl, + HASH_ELEM | HASH_BLOBS); + } + + subentry = (PgStat_StatSubEntry *) hash_search(subscriptionStatHash, + (void *) &subid, + action, &found); + + if (!create && !found) + return NULL; + + /* If not found, initialize the new one */ + if (!found) + pgstat_reset_subscription(subentry, 0); + + return subentry; +} + +/* ---------- + * pgstat_reset_subscription + * + * Reset the given subscription stats. + * ---------- + */ +static void +pgstat_reset_subscription(PgStat_StatSubEntry *subentry, TimestampTz ts) +{ + subentry->apply_error_count = 0; + subentry->sync_error_count = 0; + subentry->stat_reset_timestamp = ts; +} + /* * pgstat_slru_index * diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 5d9acc6173..7e267f7960 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -3377,7 +3377,6 @@ void ApplyWorkerMain(Datum main_arg) { int worker_slot = DatumGetInt32(main_arg); - MemoryContext cctx = CurrentMemoryContext; MemoryContext oldctx; char originname[NAMEDATALEN]; XLogRecPtr origin_startpos; @@ -3485,20 +3484,15 @@ ApplyWorkerMain(Datum main_arg) } PG_CATCH(); { - MemoryContext ecxt = MemoryContextSwitchTo(cctx); - ErrorData *errdata = CopyErrorData(); - /* - * Report the table sync error. There is no corresponding message - * type for table synchronization. + * Abort the current transaction so that we send the stats message + * in an idle state. */ - pgstat_report_subworker_error(MyLogicalRepWorker->subid, - MyLogicalRepWorker->relid, - MyLogicalRepWorker->relid, - 0, /* message type */ - InvalidTransactionId, - errdata->message); - MemoryContextSwitchTo(ecxt); + AbortOutOfAnyTransaction(); + + /* Report the worker failed during table synchronization */ + pgstat_report_subscription_error(MySubscription->oid, false); + PG_RE_THROW(); } PG_END_TRY(); @@ -3625,22 +3619,14 @@ ApplyWorkerMain(Datum main_arg) } PG_CATCH(); { - /* report the apply error */ - if (apply_error_callback_arg.command != 0) - { - MemoryContext ecxt = MemoryContextSwitchTo(cctx); - ErrorData *errdata = CopyErrorData(); - - pgstat_report_subworker_error(MyLogicalRepWorker->subid, - MyLogicalRepWorker->relid, - apply_error_callback_arg.rel != NULL - ? apply_error_callback_arg.rel->localreloid - : InvalidOid, - apply_error_callback_arg.command, - apply_error_callback_arg.remote_xid, - errdata->message); - MemoryContextSwitchTo(ecxt); - } + /* + * Abort the current transaction so that we send the stats message in + * an idle state. + */ + AbortOutOfAnyTransaction(); + + /* Report the worker failed while applying changes */ + pgstat_report_subscription_error(MySubscription->oid, !am_tablesync_worker()); PG_RE_THROW(); } diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 30e8dfa7c1..fd993d0d5f 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -2163,7 +2163,7 @@ pg_stat_reset_single_table_counters(PG_FUNCTION_ARGS) { Oid taboid = PG_GETARG_OID(0); - pgstat_reset_single_counter(taboid, InvalidOid, RESET_TABLE); + pgstat_reset_single_counter(taboid, RESET_TABLE); PG_RETURN_VOID(); } @@ -2173,38 +2173,11 @@ pg_stat_reset_single_function_counters(PG_FUNCTION_ARGS) { Oid funcoid = PG_GETARG_OID(0); - pgstat_reset_single_counter(funcoid, InvalidOid, RESET_FUNCTION); + pgstat_reset_single_counter(funcoid, RESET_FUNCTION); PG_RETURN_VOID(); } -Datum -pg_stat_reset_subscription_worker_subrel(PG_FUNCTION_ARGS) -{ - Oid subid = PG_GETARG_OID(0); - Oid relid = PG_ARGISNULL(1) ? InvalidOid : PG_GETARG_OID(1); - - pgstat_reset_single_counter(subid, relid, RESET_SUBWORKER); - - PG_RETURN_VOID(); -} - -/* Reset all subscription worker stats associated with the given subscription */ -Datum -pg_stat_reset_subscription_worker_sub(PG_FUNCTION_ARGS) -{ - Oid subid = PG_GETARG_OID(0); - - /* - * Use subscription drop message to remove statistics of all subscription - * workers. - */ - pgstat_report_subscription_drop(subid); - - PG_RETURN_VOID(); -} - - /* Reset SLRU counters (a specific one or all of them). */ Datum pg_stat_reset_slru(PG_FUNCTION_ARGS) @@ -2258,6 +2231,32 @@ pg_stat_reset_replication_slot(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* Reset subscription stats (a specific one or all of them) */ +Datum +pg_stat_reset_subscription_stats(PG_FUNCTION_ARGS) +{ + Oid subid; + + if (PG_ARGISNULL(0)) + { + /* Clear all subscription stats */ + subid = InvalidOid; + } + else + { + subid = PG_GETARG_OID(0); + + if (!OidIsValid(subid)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid subscription OID %u", subid))); + } + + pgstat_reset_subscription_counter(subid); + + PG_RETURN_VOID(); +} + Datum pg_stat_get_archiver(PG_FUNCTION_ARGS) { @@ -2400,50 +2399,32 @@ pg_stat_get_replication_slot(PG_FUNCTION_ARGS) } /* - * Get the subscription worker statistics for the given subscription - * (and relation). + * Get the subscription statistics for the given subscription. If the + * subscription statistics is not available, return all-zeros stats. */ Datum -pg_stat_get_subscription_worker(PG_FUNCTION_ARGS) +pg_stat_get_subscription_stats(PG_FUNCTION_ARGS) { -#define PG_STAT_GET_SUBSCRIPTION_WORKER_COLS 8 +#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 4 Oid subid = PG_GETARG_OID(0); - Oid subrelid; TupleDesc tupdesc; - Datum values[PG_STAT_GET_SUBSCRIPTION_WORKER_COLS]; - bool nulls[PG_STAT_GET_SUBSCRIPTION_WORKER_COLS]; - PgStat_StatSubWorkerEntry *wentry; - int i; - - if (PG_ARGISNULL(1)) - subrelid = InvalidOid; - else - subrelid = PG_GETARG_OID(1); + Datum values[PG_STAT_GET_SUBSCRIPTION_STATS_COLS]; + bool nulls[PG_STAT_GET_SUBSCRIPTION_STATS_COLS]; + PgStat_StatSubEntry *subentry; + PgStat_StatSubEntry allzero; - /* Get subscription worker stats */ - wentry = pgstat_fetch_stat_subworker_entry(subid, subrelid); - - /* Return NULL if there is no worker statistics */ - if (wentry == NULL) - PG_RETURN_NULL(); + /* Get subscription stats */ + subentry = pgstat_fetch_stat_subscription(subid); /* Initialise attributes information in the tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_SUBSCRIPTION_WORKER_COLS); + tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_SUBSCRIPTION_STATS_COLS); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "subid", OIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "subrelid", - OIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "last_error_relid", - OIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 4, "last_error_command", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 5, "last_error_xid", - XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 6, "last_error_count", + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "apply_error_count", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 7, "last_error_message", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 8, "last_error_time", + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "sync_error_count", + INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 4, "stats_reset", TIMESTAMPTZOID, -1, 0); BlessTupleDesc(tupdesc); @@ -2451,46 +2432,27 @@ pg_stat_get_subscription_worker(PG_FUNCTION_ARGS) MemSet(values, 0, sizeof(values)); MemSet(nulls, 0, sizeof(nulls)); - i = 0; - /* subid */ - values[i++] = ObjectIdGetDatum(subid); - - /* subrelid */ - if (OidIsValid(subrelid)) - values[i++] = ObjectIdGetDatum(subrelid); - else - nulls[i++] = true; - - /* last_error_relid */ - if (OidIsValid(wentry->last_error_relid)) - values[i++] = ObjectIdGetDatum(wentry->last_error_relid); - else - nulls[i++] = true; - - /* last_error_command */ - if (wentry->last_error_command != 0) - values[i++] = - CStringGetTextDatum(logicalrep_message_type(wentry->last_error_command)); - else - nulls[i++] = true; + if (!subentry) + { + /* If the subscription is not found, initialise its stats */ + memset(&allzero, 0, sizeof(PgStat_StatSubEntry)); + subentry = &allzero; + } - /* last_error_xid */ - if (TransactionIdIsValid(wentry->last_error_xid)) - values[i++] = TransactionIdGetDatum(wentry->last_error_xid); - else - nulls[i++] = true; + /* subid */ + values[0] = ObjectIdGetDatum(subid); - /* last_error_count */ - values[i++] = Int64GetDatum(wentry->last_error_count); + /* apply_error_count */ + values[1] = Int64GetDatum(subentry->apply_error_count); - /* last_error_message */ - values[i++] = CStringGetTextDatum(wentry->last_error_message); + /* sync_error_count */ + values[2] = Int64GetDatum(subentry->sync_error_count); - /* last_error_time */ - if (wentry->last_error_time != 0) - values[i++] = TimestampTzGetDatum(wentry->last_error_time); + /* stats_reset */ + if (subentry->stat_reset_timestamp == 0) + nulls[3] = true; else - nulls[i++] = true; + values[3] = TimestampTzGetDatum(subentry->stat_reset_timestamp); /* Returns the record as Datum */ PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 14194afe1c..5cf18059b8 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202202251 +#define CATALOG_VERSION_NO 202203011 #endif diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 7de8cfc7e9..bf88858171 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -5376,14 +5376,14 @@ proargmodes => '{i,o,o,o,o,o,o,o,o,o,o}', proargnames => '{slot_name,slot_name,spill_txns,spill_count,spill_bytes,stream_txns,stream_count,stream_bytes,total_txns,total_bytes,stats_reset}', prosrc => 'pg_stat_get_replication_slot' }, -{ oid => '8523', descr => 'statistics: information about subscription worker', - proname => 'pg_stat_get_subscription_worker', prorows => '1', proisstrict => 'f', - proretset => 't', provolatile => 's', proparallel => 'r', - prorettype => 'record', proargtypes => 'oid oid', - proallargtypes => '{oid,oid,oid,oid,oid,text,xid,int8,text,timestamptz}', - proargmodes => '{i,i,o,o,o,o,o,o,o,o}', - proargnames => '{subid,subrelid,subid,subrelid,last_error_relid,last_error_command,last_error_xid,last_error_count,last_error_message,last_error_time}', - prosrc => 'pg_stat_get_subscription_worker' }, +{ oid => '8523', descr => 'statistics: information about subscription stats', + proname => 'pg_stat_get_subscription_stats', proisstrict => 'f', + provolatile => 's', proparallel => 'r', + prorettype => 'record', proargtypes => 'oid', + proallargtypes => '{oid,oid,int8,int8,timestamptz}', + proargmodes => '{i,o,o,o,o}', + proargnames => '{subid,subid,apply_error_count,sync_error_count,stats_reset}', + prosrc => 'pg_stat_get_subscription_stats' }, { oid => '6118', descr => 'statistics: information about subscription', proname => 'pg_stat_get_subscription', prorows => '10', proisstrict => 'f', proretset => 't', provolatile => 's', proparallel => 'r', @@ -5772,15 +5772,10 @@ provolatile => 'v', prorettype => 'void', proargtypes => 'text', prosrc => 'pg_stat_reset_replication_slot' }, { oid => '8524', - descr => 'statistics: reset collected statistics for a single subscription worker', - proname => 'pg_stat_reset_subscription_worker', proisstrict => 'f', - provolatile => 'v', prorettype => 'void', proargtypes => 'oid oid', - prosrc => 'pg_stat_reset_subscription_worker_subrel' }, -{ oid => '8525', - descr => 'statistics: reset all collected statistics for a single subscription', - proname => 'pg_stat_reset_subscription_worker', + descr => 'statistics: reset collected statistics for a single subscription', + proname => 'pg_stat_reset_subscription_stats', proisstrict => 'f', provolatile => 'v', prorettype => 'void', proargtypes => 'oid', - prosrc => 'pg_stat_reset_subscription_worker_sub' }, + prosrc => 'pg_stat_reset_subscription_stats' }, { oid => '3163', descr => 'current trigger depth', proname => 'pg_trigger_depth', provolatile => 's', proparallel => 'r', diff --git a/src/include/pgstat.h b/src/include/pgstat.h index e10d20222a..be2f7e2bcc 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -67,6 +67,7 @@ typedef enum StatMsgType PGSTAT_MTYPE_RESETSINGLECOUNTER, PGSTAT_MTYPE_RESETSLRUCOUNTER, PGSTAT_MTYPE_RESETREPLSLOTCOUNTER, + PGSTAT_MTYPE_RESETSUBCOUNTER, PGSTAT_MTYPE_AUTOVAC_START, PGSTAT_MTYPE_VACUUM, PGSTAT_MTYPE_ANALYZE, @@ -84,8 +85,8 @@ typedef enum StatMsgType PGSTAT_MTYPE_REPLSLOT, PGSTAT_MTYPE_CONNECT, PGSTAT_MTYPE_DISCONNECT, - PGSTAT_MTYPE_SUBSCRIPTIONPURGE, - PGSTAT_MTYPE_SUBWORKERERROR, + PGSTAT_MTYPE_SUBSCRIPTIONDROP, + PGSTAT_MTYPE_SUBSCRIPTIONERROR, } StatMsgType; /* ---------- @@ -148,8 +149,7 @@ typedef enum PgStat_Shared_Reset_Target typedef enum PgStat_Single_Reset_Type { RESET_TABLE, - RESET_FUNCTION, - RESET_SUBWORKER + RESET_FUNCTION } PgStat_Single_Reset_Type; /* ------------------------------------------------------------ @@ -368,7 +368,6 @@ typedef struct PgStat_MsgResetsinglecounter Oid m_databaseid; PgStat_Single_Reset_Type m_resettype; Oid m_objectid; - Oid m_subobjectid; } PgStat_MsgResetsinglecounter; /* ---------- @@ -394,6 +393,19 @@ typedef struct PgStat_MsgResetreplslotcounter bool clearall; } PgStat_MsgResetreplslotcounter; +/* ---------- + * PgStat_MsgResetsubcounter Sent by the backend to tell the collector + * to reset subscription counter(s) + * ---------- + */ +typedef struct PgStat_MsgResetsubcounter +{ + PgStat_MsgHdr m_hdr; + Oid m_subid; /* InvalidOid means reset all subscription + * stats */ +} PgStat_MsgResetsubcounter; + + /* ---------- * PgStat_MsgAutovacStart Sent by the autovacuum daemon to signal * that a database is going to be processed @@ -542,53 +554,28 @@ typedef struct PgStat_MsgReplSlot } PgStat_MsgReplSlot; /* ---------- - * PgStat_MsgSubscriptionPurge Sent by the backend and autovacuum to tell the - * collector about the dead subscriptions. + * PgStat_MsgSubscriptionDrop Sent by the backend and autovacuum to tell the + * collector about the dead subscription. * ---------- */ -#define PGSTAT_NUM_SUBSCRIPTIONPURGE \ - ((PGSTAT_MSG_PAYLOAD - sizeof(Oid) - sizeof(int)) / sizeof(Oid)) - -typedef struct PgStat_MsgSubscriptionPurge +typedef struct PgStat_MsgSubscriptionDrop { PgStat_MsgHdr m_hdr; - Oid m_databaseid; - int m_nentries; - Oid m_subids[PGSTAT_NUM_SUBSCRIPTIONPURGE]; -} PgStat_MsgSubscriptionPurge; + Oid m_subid; +} PgStat_MsgSubscriptionDrop; /* ---------- - * PgStat_MsgSubWorkerError Sent by the apply worker or the table sync - * worker to report the error occurred while - * processing changes. + * PgStat_MsgSubscriptionError Sent by the apply worker or the table sync + * worker to report an error on the subscription. * ---------- */ -#define PGSTAT_SUBWORKERERROR_MSGLEN 256 -typedef struct PgStat_MsgSubWorkerError +typedef struct PgStat_MsgSubscriptionError { PgStat_MsgHdr m_hdr; - /* - * m_subid and m_subrelid are used to determine the subscription and the - * reporter of the error. m_subrelid is InvalidOid if reported by an apply - * worker otherwise reported by a table sync worker. - */ - Oid m_databaseid; Oid m_subid; - Oid m_subrelid; - - /* - * Oid of the table that the reporter was actually processing. m_relid can - * be InvalidOid if an error occurred during worker applying a - * non-data-modification message such as RELATION. - */ - Oid m_relid; - - LogicalRepMsgType m_command; - TransactionId m_xid; - TimestampTz m_timestamp; - char m_message[PGSTAT_SUBWORKERERROR_MSGLEN]; -} PgStat_MsgSubWorkerError; + bool m_is_apply_error; +} PgStat_MsgSubscriptionError; /* ---------- * PgStat_MsgRecoveryConflict Sent by the backend upon recovery conflict @@ -750,6 +737,7 @@ typedef union PgStat_Msg PgStat_MsgResetsinglecounter msg_resetsinglecounter; PgStat_MsgResetslrucounter msg_resetslrucounter; PgStat_MsgResetreplslotcounter msg_resetreplslotcounter; + PgStat_MsgResetsubcounter msg_resetsubcounter; PgStat_MsgAutovacStart msg_autovacuum_start; PgStat_MsgVacuum msg_vacuum; PgStat_MsgAnalyze msg_analyze; @@ -767,8 +755,8 @@ typedef union PgStat_Msg PgStat_MsgReplSlot msg_replslot; PgStat_MsgConnect msg_connect; PgStat_MsgDisconnect msg_disconnect; - PgStat_MsgSubscriptionPurge msg_subscriptionpurge; - PgStat_MsgSubWorkerError msg_subworkererror; + PgStat_MsgSubscriptionError msg_subscriptionerror; + PgStat_MsgSubscriptionDrop msg_subscriptiondrop; } PgStat_Msg; @@ -780,7 +768,7 @@ typedef union PgStat_Msg * ------------------------------------------------------------ */ -#define PGSTAT_FILE_FORMAT_ID 0x01A5BCA5 +#define PGSTAT_FILE_FORMAT_ID 0x01A5BCA6 /* ---------- * PgStat_StatDBEntry The collector's data per database @@ -823,16 +811,11 @@ typedef struct PgStat_StatDBEntry TimestampTz stats_timestamp; /* time of db stats file update */ /* - * tables, functions, and subscription workers must be last in the struct, - * because we don't write the pointers out to the stats file. - * - * subworkers is the hash table of PgStat_StatSubWorkerEntry which stores - * statistics of logical replication workers: apply worker and table sync - * worker. + * tables and functions must be last in the struct, because we don't write + * the pointers out to the stats file. */ HTAB *tables; HTAB *functions; - HTAB *subworkers; } PgStat_StatDBEntry; @@ -989,38 +972,17 @@ typedef struct PgStat_StatReplSlotEntry TimestampTz stat_reset_timestamp; } PgStat_StatReplSlotEntry; -/* The lookup key for subscription worker hash table */ -typedef struct PgStat_StatSubWorkerKey -{ - Oid subid; - - /* - * Oid of the table for which tablesync worker will copy the initial data. - * An InvalidOid will be assigned for apply workers. - */ - Oid subrelid; -} PgStat_StatSubWorkerKey; - /* - * Logical replication apply worker and table sync worker statistics kept in the - * stats collector. + * Subscription statistics kept in the stats collector. */ -typedef struct PgStat_StatSubWorkerEntry +typedef struct PgStat_StatSubEntry { - PgStat_StatSubWorkerKey key; /* hash key (must be first) */ + Oid subid; /* hash key (must be first) */ - /* - * Subscription worker error statistics representing an error that - * occurred during application of changes or the initial table - * synchronization. - */ - Oid last_error_relid; - LogicalRepMsgType last_error_command; - TransactionId last_error_xid; - PgStat_Counter last_error_count; - TimestampTz last_error_time; - char last_error_message[PGSTAT_SUBWORKERERROR_MSGLEN]; -} PgStat_StatSubWorkerEntry; + PgStat_Counter apply_error_count; + PgStat_Counter sync_error_count; + TimestampTz stat_reset_timestamp; +} PgStat_StatSubEntry; /* * Working state needed to accumulate per-function-call timing statistics. @@ -1111,10 +1073,10 @@ extern void pgstat_drop_database(Oid databaseid); extern void pgstat_clear_snapshot(void); extern void pgstat_reset_counters(void); extern void pgstat_reset_shared_counters(const char *); -extern void pgstat_reset_single_counter(Oid objectid, Oid subobjectid, - PgStat_Single_Reset_Type type); +extern void pgstat_reset_single_counter(Oid objectid, PgStat_Single_Reset_Type type); extern void pgstat_reset_slru_counter(const char *); extern void pgstat_reset_replslot_counter(const char *name); +extern void pgstat_reset_subscription_counter(Oid subid); extern void pgstat_report_connect(Oid dboid); extern void pgstat_report_autovac(Oid dboid); @@ -1131,9 +1093,7 @@ extern void pgstat_report_checksum_failure(void); extern void pgstat_report_replslot(const PgStat_StatReplSlotEntry *repSlotStat); extern void pgstat_report_replslot_create(const char *slotname); extern void pgstat_report_replslot_drop(const char *slotname); -extern void pgstat_report_subworker_error(Oid subid, Oid subrelid, Oid relid, - LogicalRepMsgType command, - TransactionId xid, const char *errmsg); +extern void pgstat_report_subscription_error(Oid subid, bool is_apply_error); extern void pgstat_report_subscription_drop(Oid subid); extern void pgstat_initialize(void); @@ -1226,8 +1186,7 @@ extern void pgstat_send_wal(bool force); extern PgStat_StatDBEntry *pgstat_fetch_stat_dbentry(Oid dbid); extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry(Oid relid); extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid); -extern PgStat_StatSubWorkerEntry *pgstat_fetch_stat_subworker_entry(Oid subid, - Oid subrelid); +extern PgStat_StatSubEntry *pgstat_fetch_stat_subscription(Oid subid); extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void); extern PgStat_BgWriterStats *pgstat_fetch_stat_bgwriter(void); extern PgStat_CheckpointerStats *pgstat_fetch_stat_checkpointer(void); diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 1420288d67..ac468568a1 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2072,24 +2072,13 @@ pg_stat_subscription| SELECT su.oid AS subid, st.latest_end_time FROM (pg_subscription su LEFT JOIN pg_stat_get_subscription(NULL::oid) st(subid, relid, pid, received_lsn, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time) ON ((st.subid = su.oid))); -pg_stat_subscription_workers| SELECT w.subid, +pg_stat_subscription_stats| SELECT ss.subid, s.subname, - w.subrelid, - w.last_error_relid, - w.last_error_command, - w.last_error_xid, - w.last_error_count, - w.last_error_message, - w.last_error_time - FROM ( SELECT pg_subscription.oid AS subid, - NULL::oid AS relid - FROM pg_subscription - UNION ALL - SELECT pg_subscription_rel.srsubid AS subid, - pg_subscription_rel.srrelid AS relid - FROM pg_subscription_rel) sr, - (LATERAL pg_stat_get_subscription_worker(sr.subid, sr.relid) w(subid, subrelid, last_error_relid, last_error_command, last_error_xid, last_error_count, last_error_message, last_error_time) - JOIN pg_subscription s ON ((w.subid = s.oid))); + ss.apply_error_count, + ss.sync_error_count, + ss.stats_reset + FROM pg_subscription s, + LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, stats_reset); pg_stat_sys_indexes| SELECT pg_stat_all_indexes.relid, pg_stat_all_indexes.indexrelid, pg_stat_all_indexes.schemaname, diff --git a/src/test/subscription/t/026_stats.pl b/src/test/subscription/t/026_stats.pl new file mode 100644 index 0000000000..a42ea3170e --- /dev/null +++ b/src/test/subscription/t/026_stats.pl @@ -0,0 +1,102 @@ + +# Copyright (c) 2021-2022, PostgreSQL Global Development Group + +# Tests for subscription stats. +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +# Create publisher node. +my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +# Create subscriber node. +my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +# Initial table setup on both publisher and subscriber. On subscriber we +# create the same tables but with primary keys. Also, insert some data that +# will conflict with the data replicated from publisher later. +$node_publisher->safe_psql( + 'postgres', + qq[ +BEGIN; +CREATE TABLE test_tab1 (a int); +INSERT INTO test_tab1 VALUES (1); +COMMIT; +]); +$node_subscriber->safe_psql( + 'postgres', + qq[ +BEGIN; +CREATE TABLE test_tab1 (a int primary key); +INSERT INTO test_tab1 VALUES (1); +COMMIT; +]); + +# Setup publication. +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub FOR TABLE test_tab1;"); + +# There shouldn't be any subscription errors before starting logical replication. +my $result = $node_subscriber->safe_psql('postgres', + "SELECT count(1) FROM pg_stat_subscription_stats"); +is($result, qq(0), 'check no subscription error'); + +# Create subscription. The tablesync for test_tab1 on tap_sub will enter into +# infinite error loop due to violating the unique constraint. +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub;" +); + +$node_publisher->wait_for_catchup('tap_sub'); + +# Wait for the tablesync error to be reported. +$node_subscriber->poll_query_until( + 'postgres', + qq[ +SELECT sync_error_count > 0 +FROM pg_stat_subscription_stats +WHERE subname = 'tap_sub' +]) or die "Timed out while waiting for tablesync error"; + +# Truncate test_tab1 so that tablesync worker can continue. +$node_subscriber->safe_psql('postgres', "TRUNCATE test_tab1;"); + +# Wait for initial tablesync for test_tab1 to finish. +$node_subscriber->poll_query_until( + 'postgres', + qq[ +SELECT count(1) = 1 FROM pg_subscription_rel +WHERE srrelid = 'test_tab1'::regclass AND srsubstate in ('r', 's') +]) or die "Timed out while waiting for subscriber to synchronize data"; + +# Check test_tab1 on the subscriber has one row. +$result = $node_subscriber->safe_psql('postgres', "SELECT a FROM test_tab1"); +is($result, qq(1), 'check the table has now row'); + +# Insert data to test_tab1 on the publisher, raising an error on the subscriber +# due to violation of the unique constraint on test_tab1. +$node_publisher->safe_psql('postgres', "INSERT INTO test_tab1 VALUES (1)"); + +# Wait for the apply error to be reported. +$node_subscriber->poll_query_until( + 'postgres', + qq[ +SELECT apply_error_count > 0 +FROM pg_stat_subscription_stats +WHERE subname = 'tap_sub' +]) or die "Timed out while waiting for apply error"; + +# Truncate test_tab1 so that apply worker can continue. +$node_subscriber->safe_psql('postgres', "TRUNCATE test_tab1;"); + +$node_subscriber->stop('fast'); +$node_publisher->stop('fast'); + +done_testing(); diff --git a/src/test/subscription/t/026_worker_stats.pl b/src/test/subscription/t/026_worker_stats.pl deleted file mode 100644 index f72e4766e8..0000000000 --- a/src/test/subscription/t/026_worker_stats.pl +++ /dev/null @@ -1,165 +0,0 @@ - -# Copyright (c) 2021-2022, PostgreSQL Global Development Group - -# Tests for subscription error stats. -use strict; -use warnings; -use PostgreSQL::Test::Cluster; -use PostgreSQL::Test::Utils; -use Test::More; - -# Test if the error reported on pg_stat_subscription_workers view is expected. -sub test_subscription_error -{ - my ($node, $relname, $command, $xid, $by_apply_worker, $errmsg_prefix, $msg) - = @_; - - my $check_sql = qq[ -SELECT count(1) > 0 -FROM pg_stat_subscription_workers -WHERE last_error_relid = '$relname'::regclass - AND starts_with(last_error_message, '$errmsg_prefix')]; - - # subrelid - $check_sql .= $by_apply_worker - ? qq[ AND subrelid IS NULL] - : qq[ AND subrelid = '$relname'::regclass]; - - # last_error_command - $check_sql .= $command eq '' - ? qq[ AND last_error_command IS NULL] - : qq[ AND last_error_command = '$command']; - - # last_error_xid - $check_sql .= $xid eq '' - ? qq[ AND last_error_xid IS NULL] - : qq[ AND last_error_xid = '$xid'::xid]; - - # Wait for the particular error statistics to be reported. - $node->poll_query_until('postgres', $check_sql, -) or die "Timed out while waiting for " . $msg; -} - -# Create publisher node. -my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); -$node_publisher->init(allows_streaming => 'logical'); -$node_publisher->start; - -# Create subscriber node. -my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); -$node_subscriber->init(allows_streaming => 'logical'); - -# The subscriber will enter an infinite error loop, so we don't want -# to overflow the server log with error messages. -$node_subscriber->append_conf('postgresql.conf', - qq[ -wal_retrieve_retry_interval = 2s -]); -$node_subscriber->start; - -# Initial table setup on both publisher and subscriber. On subscriber we -# create the same tables but with primary keys. Also, insert some data that -# will conflict with the data replicated from publisher later. -$node_publisher->safe_psql( - 'postgres', - qq[ -BEGIN; -CREATE TABLE test_tab1 (a int); -CREATE TABLE test_tab2 (a int); -INSERT INTO test_tab1 VALUES (1); -INSERT INTO test_tab2 VALUES (1); -COMMIT; -]); -$node_subscriber->safe_psql( - 'postgres', - qq[ -BEGIN; -CREATE TABLE test_tab1 (a int primary key); -CREATE TABLE test_tab2 (a int primary key); -INSERT INTO test_tab2 VALUES (1); -COMMIT; -]); - -# Setup publications. -my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -$node_publisher->safe_psql( - 'postgres', - "CREATE PUBLICATION tap_pub FOR TABLE test_tab1, test_tab2;"); - -# There shouldn't be any subscription errors before starting logical replication. -my $result = $node_subscriber->safe_psql( - 'postgres', - "SELECT count(1) FROM pg_stat_subscription_workers"); -is($result, qq(0), 'check no subscription error'); - -# Create subscription. The table sync for test_tab2 on tap_sub will enter into -# infinite error loop due to violating the unique constraint. -$node_subscriber->safe_psql( - 'postgres', - "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub;"); - -$node_publisher->wait_for_catchup('tap_sub'); - -# Wait for initial table sync for test_tab1 to finish. -$node_subscriber->poll_query_until( - 'postgres', - qq[ -SELECT count(1) = 1 FROM pg_subscription_rel -WHERE srrelid = 'test_tab1'::regclass AND srsubstate in ('r', 's') -]) or die "Timed out while waiting for subscriber to synchronize data"; - -# Check the initial data. -$result = $node_subscriber->safe_psql( - 'postgres', - "SELECT count(a) FROM test_tab1"); -is($result, q(1), 'check initial data are copied to subscriber'); - -# Insert more data to test_tab1, raising an error on the subscriber due to -# violation of the unique constraint on test_tab1. -my $xid = $node_publisher->safe_psql( - 'postgres', - qq[ -BEGIN; -INSERT INTO test_tab1 VALUES (1); -SELECT pg_current_xact_id()::xid; -COMMIT; -]); -test_subscription_error($node_subscriber, 'test_tab1', 'INSERT', $xid, - 1, # check apply worker error - qq(duplicate key value violates unique constraint), - 'error reported by the apply worker'); - -# Check the table sync worker's error in the view. -test_subscription_error($node_subscriber, 'test_tab2', '', '', - 0, # check tablesync worker error - qq(duplicate key value violates unique constraint), - 'the error reported by the table sync worker'); - -# Test for resetting subscription worker statistics. -# Truncate test_tab1 and test_tab2 so that applying changes and table sync can -# continue, respectively. -$node_subscriber->safe_psql( - 'postgres', - "TRUNCATE test_tab1, test_tab2;"); - -# Wait for the data to be replicated. -$node_subscriber->poll_query_until( - 'postgres', - "SELECT count(1) > 0 FROM test_tab1"); -$node_subscriber->poll_query_until( - 'postgres', - "SELECT count(1) > 0 FROM test_tab2"); - -# There shouldn't be any errors in the view after dropping the subscription. -$node_subscriber->safe_psql( - 'postgres', - "DROP SUBSCRIPTION tap_sub;"); -$result = $node_subscriber->safe_psql( - 'postgres', - "SELECT count(1) FROM pg_stat_subscription_workers"); -is($result, q(0), 'no error after dropping subscription'); - -$node_subscriber->stop('fast'); -$node_publisher->stop('fast'); - -done_testing(); diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index c6b302c7b2..d9b83f744f 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1943,9 +1943,10 @@ PgStat_MsgResetreplslotcounter PgStat_MsgResetsharedcounter PgStat_MsgResetsinglecounter PgStat_MsgResetslrucounter +PgStat_MsgResetsubcounter PgStat_MsgSLRU -PgStat_MsgSubscriptionPurge -PgStat_MsgSubWorkerError +PgStat_MsgSubscriptionDrop +PgStat_MsgSubscriptionError PgStat_MsgTabpurge PgStat_MsgTabstat PgStat_MsgTempFile @@ -1957,8 +1958,7 @@ PgStat_Single_Reset_Type PgStat_StatDBEntry PgStat_StatFuncEntry PgStat_StatReplSlotEntry -PgStat_StatSubWorkerEntry -PgStat_StatSubWorkerKey +PgStat_StatSubEntry PgStat_StatTabEntry PgStat_SubXactStatus PgStat_TableCounts From a33e17f210547226ada52d2b8af851c3553bb4fa Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 1 Mar 2022 12:52:25 +0900 Subject: [PATCH 072/108] Rework internal command generation of pg_rewind pg_rewind generates and executes internally up to two commands to work on the target cluster, depending on the options given by its caller: - postgres -C to retrieve the value of restore_command, when using -c/--restore-target-wal. - postgres --single to enforce recovery once and get the target cluster in a clean shutdown state. Both commands have been applying incorrect quoting rules, which could lead to failures when for example using a target data directory with unexpected characters like CRLFs. Those commands are now generated with PQExpBuffer, making use of string_utils.h to quote those commands as they should. We may extend those commands in the future with more options, so this makes any upcoming additions easier. This is arguably a bug fix, but nobody has complained about the existing code being a problem either, so no backpatch is done. Extracted from a larger patch by the same author. Author: Gunnar "Nick" Bluth Discussion: https://postgr.es/m/7c59265d-ac50-b0aa-ca1e-65e8bd27642a@pro-open.de --- src/bin/pg_rewind/pg_rewind.c | 43 +++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c index efb82a4034..b39b5c1aac 100644 --- a/src/bin/pg_rewind/pg_rewind.c +++ b/src/bin/pg_rewind/pg_rewind.c @@ -23,6 +23,7 @@ #include "common/restricted_token.h" #include "common/string.h" #include "fe_utils/recovery_gen.h" +#include "fe_utils/string_utils.h" #include "file_ops.h" #include "filemap.h" #include "getopt_long.h" @@ -1016,8 +1017,8 @@ getRestoreCommand(const char *argv0) { int rc; char postgres_exec_path[MAXPGPATH], - postgres_cmd[MAXPGPATH], cmd_output[MAXPGPATH]; + PQExpBuffer postgres_cmd; if (!restore_wal) return; @@ -1051,11 +1052,19 @@ getRestoreCommand(const char *argv0) * Build a command able to retrieve the value of GUC parameter * restore_command, if set. */ - snprintf(postgres_cmd, sizeof(postgres_cmd), - "\"%s\" -D \"%s\" -C restore_command", - postgres_exec_path, datadir_target); + postgres_cmd = createPQExpBuffer(); - if (!pipe_read_line(postgres_cmd, cmd_output, sizeof(cmd_output))) + /* path to postgres, properly quoted */ + appendShellString(postgres_cmd, postgres_exec_path); + + /* add -D switch, with properly quoted data directory */ + appendPQExpBufferStr(postgres_cmd, " -D "); + appendShellString(postgres_cmd, datadir_target); + + /* add -C switch, for restore_command */ + appendPQExpBufferStr(postgres_cmd, " -C restore_command"); + + if (!pipe_read_line(postgres_cmd->data, cmd_output, sizeof(cmd_output))) exit(1); (void) pg_strip_crlf(cmd_output); @@ -1067,6 +1076,8 @@ getRestoreCommand(const char *argv0) pg_log_debug("using for rewind restore_command = \'%s\'", restore_command); + + destroyPQExpBuffer(postgres_cmd); } @@ -1080,7 +1091,7 @@ ensureCleanShutdown(const char *argv0) int ret; #define MAXCMDLEN (2 * MAXPGPATH) char exec_path[MAXPGPATH]; - char cmd[MAXCMDLEN]; + PQExpBuffer postgres_cmd; /* locate postgres binary */ if ((ret = find_other_exec(argv0, "postgres", @@ -1119,14 +1130,26 @@ ensureCleanShutdown(const char *argv0) * fsync here. This makes the recovery faster, and the target data folder * is synced at the end anyway. */ - snprintf(cmd, MAXCMDLEN, "\"%s\" --single -F -D \"%s\" template1 < \"%s\"", - exec_path, datadir_target, DEVNULL); + postgres_cmd = createPQExpBuffer(); - if (system(cmd) != 0) + /* path to postgres, properly quoted */ + appendShellString(postgres_cmd, exec_path); + + /* add set of options with properly quoted data directory */ + appendPQExpBufferStr(postgres_cmd, " --single -F -D "); + appendShellString(postgres_cmd, datadir_target); + + /* finish with the database name, and a properly quoted redirection */ + appendPQExpBufferStr(postgres_cmd, " template1 < "); + appendShellString(postgres_cmd, DEVNULL); + + if (system(postgres_cmd->data) != 0) { pg_log_error("postgres single-user mode in target cluster failed"); - pg_fatal("Command was: %s", cmd); + pg_fatal("Command was: %s", postgres_cmd->data); } + + destroyPQExpBuffer(postgres_cmd); } static void From 9028cce426ba6e08ee5ef8fcaedb2445e6c08c75 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 1 Mar 2022 11:21:20 +0100 Subject: [PATCH 073/108] psql: Additional tests Add a few TAP tests for things that happen while a user query is being sent: - \timing - client encoding handling - notifications Discussion: https://www.postgresql.org/message-id/3199e176-424e-1bef-f180-c1548466c2da@enterprisedb.com --- src/bin/psql/t/001_basic.pl | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl index f416e0ab5e..44ecd05add 100644 --- a/src/bin/psql/t/001_basic.pl +++ b/src/bin/psql/t/001_basic.pl @@ -60,7 +60,7 @@ sub psql_fails_like } my $node = PostgreSQL::Test::Cluster->new('main'); -$node->init; +$node->init(extra => [ '--locale=C', '--encoding=UTF8' ]); $node->append_conf( 'postgresql.conf', q{ wal_level = 'logical' @@ -80,4 +80,39 @@ sub psql_fails_like qr/unexpected PQresultStatus: 8$/, 'handling of unexpected PQresultStatus'); +# test \timing +psql_like( + $node, + '\timing on +SELECT 1', + qr/^1$ +^Time: \d+.\d\d\d ms/m, + '\timing'); + +# test that ENCODING variable is set and that it is updated when +# client encoding is changed +psql_like( + $node, + '\echo :ENCODING +set client_encoding = LATIN1; +\echo :ENCODING', + qr/^UTF8$ +^LATIN1$/m, + 'ENCODING variable is set and updated'); + +# test LISTEN/NOTIFY +psql_like( + $node, + 'LISTEN foo; +NOTIFY foo;', + qr/^Asynchronous notification "foo" received from server process with PID \d+\.$/, + 'notification'); + +psql_like( + $node, + "LISTEN foo; +NOTIFY foo, 'bar';", + qr/^Asynchronous notification "foo" with payload "bar" received from server process with PID \d+\.$/, + 'notification with payload'); + done_testing(); From dc57366c583685c4b2901f2ba69943f596b974ec Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Wed, 2 Mar 2022 07:37:07 +0900 Subject: [PATCH 074/108] Fix check for PGHOST[ADDR] in pg_upgrade with Windows and temporary paths The checks currently done at the startup of pg_upgrade on PGHOST and PGHOSTADDR to avoid any attempts to access to an external cluster fail setting those parameters to Windows paths or even temporary paths prefixed by an '@', as it only considers as a valid path strings beginning with a slash. As mentioned by Andres, is_unixsock_path() is designed to detect such cases, so, like any other code paths dealing with the same problem (psql and libpq), use it rather than assuming that all valid paths are prefixed with just a slash. This issue has been found while testing the TAP tests of pg_upgrade through the CI on Windows. This is a bug, but nobody has complained about it since pg_upgrade exists so no backpatch is done, at least for now. Analyzed-by: Andres Freund, Michael Paquier Discussion: https://postgr.es/m/YeYj4DU5qY/rtKXT@paquier.xyz --- src/bin/pg_upgrade/server.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c index 7878d233de..265137e86b 100644 --- a/src/bin/pg_upgrade/server.c +++ b/src/bin/pg_upgrade/server.c @@ -11,6 +11,7 @@ #include "common/connect.h" #include "fe_utils/string_utils.h" +#include "libpq/pqcomm.h" #include "pg_upgrade.h" static PGconn *get_db_conn(ClusterInfo *cluster, const char *db_name); @@ -368,7 +369,7 @@ check_pghost_envvar(void) if (value && strlen(value) > 0 && /* check for 'local' host values */ (strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 && - strcmp(value, "::1") != 0 && value[0] != '/')) + strcmp(value, "::1") != 0 && !is_unixsock_path(value))) pg_fatal("libpq environment variable %s has a non-local server value: %s\n", option->envvar, value); } From 506035b0b8323126823849483cee833e1de75330 Mon Sep 17 00:00:00 2001 From: Tatsuo Ishii Date: Wed, 2 Mar 2022 08:28:12 +0900 Subject: [PATCH 075/108] Fix typo in pgbench messages. Author: KAWAMOTO Masaya Reviewed-by: Fabien COELHO Discussion: https://postgr.es/m/20220224115622.41e671e3449ebd8c270e9103%40sraoss.co.jp --- src/bin/pgbench/pgbench.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index f166a77e3a..4ebe5e6ea4 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -5598,11 +5598,11 @@ printResults(StatsData *total, return; if (throttle_delay && latency_limit) - printf("number of transactions skipped: " INT64_FORMAT " (%.3f %%)\n", + printf("number of transactions skipped: " INT64_FORMAT " (%.3f%%)\n", total->skipped, 100.0 * total->skipped / total->cnt); if (latency_limit) - printf("number of transactions above the %.1f ms latency limit: " INT64_FORMAT "/" INT64_FORMAT " (%.3f %%)\n", + printf("number of transactions above the %.1f ms latency limit: " INT64_FORMAT "/" INT64_FORMAT " (%.3f%%)\n", latency_limit / 1000.0, latency_late, ntx, (ntx > 0) ? 100.0 * latency_late / ntx : 0.0); From e58791c6ad317fddcb7f54d19f6a8a4c43fecf7b Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Wed, 2 Mar 2022 10:30:41 +0100 Subject: [PATCH 076/108] Add id's to various elements in protocol.sgml For easier direct linking. Author: Brar Piening Discussion: https://www.postgresql.org/message-id/flat/dbad4f77-4dce-1b05-2b65-831acb5d5b66@gmx.de --- doc/src/sgml/logicaldecoding.sgml | 2 +- doc/src/sgml/protocol.sgml | 172 +++++++++++++++--------------- 2 files changed, 87 insertions(+), 87 deletions(-) diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index 5ebf23e963..3d7d52a84d 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -342,7 +342,7 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU Exported Snapshots When a new replication slot is created using the streaming replication - interface (see ), a + interface (see ), a snapshot is exported (see ), which will show exactly the state of the database after which all changes will be diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index 1c5ab00879..c51c4254a7 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -1810,7 +1810,7 @@ Replication commands are logged in the server log when The commands accepted in replication mode are: - + IDENTIFY_SYSTEM IDENTIFY_SYSTEM @@ -1875,7 +1875,7 @@ The commands accepted in replication mode are: - + SHOW name SHOW @@ -1899,7 +1899,7 @@ The commands accepted in replication mode are: - + TIMELINE_HISTORY tli TIMELINE_HISTORY @@ -1941,7 +1941,7 @@ The commands accepted in replication mode are: - + CREATE_REPLICATION_SLOT slot_name [ TEMPORARY ] { PHYSICAL | LOGICAL } [ ( option [, ...] ) ] CREATE_REPLICATION_SLOT @@ -2084,7 +2084,7 @@ The commands accepted in replication mode are: - + CREATE_REPLICATION_SLOT slot_name [ TEMPORARY ] { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin [ EXPORT_SNAPSHOT | NOEXPORT_SNAPSHOT | USE_SNAPSHOT | TWO_PHASE ] } @@ -2095,7 +2095,7 @@ The commands accepted in replication mode are: - + READ_REPLICATION_SLOT slot_name READ_REPLICATION_SLOT @@ -2143,7 +2143,7 @@ The commands accepted in replication mode are: - + START_REPLICATION [ SLOT slot_name ] [ PHYSICAL ] XXX/XXX [ TIMELINE tli ] START_REPLICATION @@ -2201,7 +2201,7 @@ The commands accepted in replication mode are: - + XLogData (B) @@ -2270,7 +2270,7 @@ The commands accepted in replication mode are: - + Primary keepalive message (B) @@ -2334,7 +2334,7 @@ The commands accepted in replication mode are: - + Standby status update (F) @@ -2415,7 +2415,7 @@ The commands accepted in replication mode are: - + Hot Standby feedback message (F) @@ -2497,7 +2497,7 @@ The commands accepted in replication mode are: - + START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [ option_value ] [, ...] ) ] @@ -2572,7 +2572,7 @@ The commands accepted in replication mode are: - + DROP_REPLICATION_SLOT slot_name WAIT DROP_REPLICATION_SLOT @@ -3266,7 +3266,7 @@ of any individual CopyData message cannot be interpretable on their own.) - + AuthenticationOk (B) @@ -3311,7 +3311,7 @@ AuthenticationOk (B) - + AuthenticationKerberosV5 (B) @@ -3355,7 +3355,7 @@ AuthenticationKerberosV5 (B) - + AuthenticationCleartextPassword (B) @@ -3399,7 +3399,7 @@ AuthenticationCleartextPassword (B) - + AuthenticationMD5Password (B) @@ -3454,7 +3454,7 @@ AuthenticationMD5Password (B) - + AuthenticationSCMCredential (B) @@ -3499,7 +3499,7 @@ AuthenticationSCMCredential (B) - + AuthenticationGSS (B) @@ -3544,7 +3544,7 @@ AuthenticationGSS (B) - + AuthenticationGSSContinue (B) @@ -3599,7 +3599,7 @@ AuthenticationGSSContinue (B) - + AuthenticationSSPI (B) @@ -3644,7 +3644,7 @@ AuthenticationSSPI (B) - + AuthenticationSASL (B) @@ -3705,7 +3705,7 @@ following: - + AuthenticationSASLContinue (B) @@ -3760,7 +3760,7 @@ AuthenticationSASLContinue (B) - + AuthenticationSASLFinal (B) @@ -3816,7 +3816,7 @@ AuthenticationSASLFinal (B) - + BackendKeyData (B) @@ -3873,7 +3873,7 @@ BackendKeyData (B) - + Bind (F) @@ -4026,7 +4026,7 @@ Bind (F) - + BindComplete (B) @@ -4061,7 +4061,7 @@ BindComplete (B) - + CancelRequest (F) @@ -4119,7 +4119,7 @@ CancelRequest (F) - + Close (F) @@ -4176,7 +4176,7 @@ Close (F) - + CloseComplete (B) @@ -4211,7 +4211,7 @@ CloseComplete (B) - + CommandComplete (B) @@ -4310,7 +4310,7 @@ CommandComplete (B) - + CopyData (F & B) @@ -4356,7 +4356,7 @@ CopyData (F & B) - + CopyDone (F & B) @@ -4391,7 +4391,7 @@ CopyDone (F & B) - + CopyFail (F) @@ -4436,7 +4436,7 @@ CopyFail (F) - + CopyInResponse (B) @@ -4512,7 +4512,7 @@ CopyInResponse (B) - + CopyOutResponse (B) @@ -4585,7 +4585,7 @@ CopyOutResponse (B) - + CopyBothResponse (B) @@ -4658,7 +4658,7 @@ CopyBothResponse (B) - + DataRow (B) @@ -4730,7 +4730,7 @@ DataRow (B) - + Describe (F) @@ -4787,7 +4787,7 @@ Describe (F) - + EmptyQueryResponse (B) @@ -4823,7 +4823,7 @@ EmptyQueryResponse (B) - + ErrorResponse (B) @@ -4889,7 +4889,7 @@ ErrorResponse (B) - + Execute (F) @@ -4946,7 +4946,7 @@ Execute (F) - + Flush (F) @@ -4981,7 +4981,7 @@ Flush (F) - + FunctionCall (F) @@ -5106,7 +5106,7 @@ FunctionCall (F) - + FunctionCallResponse (B) @@ -5166,7 +5166,7 @@ FunctionCallResponse (B) - + GSSENCRequest (F) @@ -5204,7 +5204,7 @@ GSSENCRequest (F) - + GSSResponse (F) @@ -5249,7 +5249,7 @@ GSSResponse (F) - + NegotiateProtocolVersion (B) @@ -5318,7 +5318,7 @@ NegotiateProtocolVersion (B) - + NoData (B) @@ -5353,7 +5353,7 @@ NoData (B) - + NoticeResponse (B) @@ -5419,7 +5419,7 @@ NoticeResponse (B) - + NotificationResponse (B) @@ -5484,7 +5484,7 @@ NotificationResponse (B) - + ParameterDescription (B) @@ -5542,7 +5542,7 @@ ParameterDescription (B) - + ParameterStatus (B) @@ -5596,7 +5596,7 @@ ParameterStatus (B) - + Parse (F) @@ -5680,7 +5680,7 @@ Parse (F) - + ParseComplete (B) @@ -5715,7 +5715,7 @@ ParseComplete (B) - + PasswordMessage (F) @@ -5761,7 +5761,7 @@ PasswordMessage (F) - + PortalSuspended (B) @@ -5798,7 +5798,7 @@ PortalSuspended (B) - + Query (F) @@ -5843,7 +5843,7 @@ Query (F) - + ReadyForQuery (B) @@ -5893,7 +5893,7 @@ ReadyForQuery (B) - + RowDescription (B) @@ -6018,7 +6018,7 @@ RowDescription (B) - + SASLInitialResponse (F) @@ -6086,7 +6086,7 @@ SASLInitialResponse (F) - + SASLResponse (F) @@ -6132,7 +6132,7 @@ SASLResponse (F) - + SSLRequest (F) @@ -6170,7 +6170,7 @@ SSLRequest (F) - + StartupMessage (F) @@ -6299,7 +6299,7 @@ StartupMessage (F) - + Sync (F) @@ -6334,7 +6334,7 @@ Sync (F) - + Terminate (F) @@ -6665,7 +6665,7 @@ flow as physical replication. - + Begin @@ -6720,7 +6720,7 @@ Begin - + Message @@ -6808,7 +6808,7 @@ Message - + Commit @@ -6873,7 +6873,7 @@ Commit - + Origin @@ -6922,7 +6922,7 @@ Origin - + Relation @@ -7054,7 +7054,7 @@ Relation - + Type @@ -7119,7 +7119,7 @@ Type - + Insert @@ -7186,7 +7186,7 @@ Insert - + Update @@ -7300,7 +7300,7 @@ Update - + Delete @@ -7389,7 +7389,7 @@ Delete - + Truncate @@ -7467,7 +7467,7 @@ Stream Abort) are available since protocol version 2. - + Stream Start @@ -7512,7 +7512,7 @@ Stream Start - + Stream Stop @@ -7536,7 +7536,7 @@ Stream Stop - + Stream Commit @@ -7611,7 +7611,7 @@ Stream Commit - + Stream Abort @@ -7665,7 +7665,7 @@ are available since protocol version 3. - + Begin Prepare @@ -7730,7 +7730,7 @@ are available since protocol version 3. - + Prepare @@ -7804,7 +7804,7 @@ are available since protocol version 3. - + Commit Prepared @@ -7878,7 +7878,7 @@ are available since protocol version 3. - + Rollback Prepared @@ -7962,7 +7962,7 @@ are available since protocol version 3. - + Stream Prepare @@ -8046,7 +8046,7 @@ The following message parts are shared by the above messages. - + TupleData From 50f03473ed8132a43bf5c10764fb5b9eda71ac16 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 2 Mar 2022 11:29:11 -0500 Subject: [PATCH 077/108] Doc: update libpq.sgml for root-owned SSL private keys. My oversight in a59c79564. Discussion: https://postgr.es/m/f4b7bc55-97ac-9e69-7398-335e212f7743@pgmasters.net --- doc/src/sgml/libpq.sgml | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 64e17401cd..3998b1781b 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -8397,23 +8397,35 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) If the server attempts to verify the identity of the client by requesting the client's leaf certificate, - libpq will send the certificates stored in + libpq will send the certificate(s) stored in file ~/.postgresql/postgresql.crt in the user's home directory. The certificates must chain to the root certificate trusted by the server. A matching private key file ~/.postgresql/postgresql.key must also - be present. The private - key file must not allow any access to world or group; achieve this by the - command chmod 0600 ~/.postgresql/postgresql.key. + be present. On Microsoft Windows these files are named %APPDATA%\postgresql\postgresql.crt and - %APPDATA%\postgresql\postgresql.key, and there - is no special permissions check since the directory is presumed secure. + %APPDATA%\postgresql\postgresql.key. The location of the certificate and key files can be overridden by the - connection parameters sslcert and sslkey or the + connection parameters sslcert + and sslkey, or by the environment variables PGSSLCERT and PGSSLKEY. + + On Unix systems, the permissions on the private key file must disallow + any access to world or group; achieve this by a command such as + chmod 0600 ~/.postgresql/postgresql.key. + Alternatively, the file can be owned by root and have group read access + (that is, 0640 permissions). That setup is intended + for installations where certificate and key files are managed by the + operating system. The user of libpq should + then be made a member of the group that has access to those certificate + and key files. (On Microsoft Windows, there is no file permissions + check, since the %APPDATA%\postgresql directory is + presumed secure.) + + The first certificate in postgresql.crt must be the client's certificate because it must match the client's private key. From 62ce0c758d5d66092efbca7d037233e2ca9bdc78 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Thu, 3 Mar 2022 10:51:57 +0900 Subject: [PATCH 078/108] Fix catalog data of pg_stop_backup(), labelled v2 This function has been incorrectly marked as a set-returning function with prorows (estimated number of rows) set to 1 since its creation in 7117685, that introduced non-exclusive backups. There is no need for that as the function is designed to return only one tuple. This commit fixes the catalog definition of pg_stop_backup_v2() so as it is not marked as proretset anymore, with prorows set to 0. This simplifies its internals by removing one tuplestore (used for one single record anyway) and by removing all the checks related to a set-returning function. Issue found during my quest to simplify some of the logic used in in-core system functions. Bump catalog version. Reviewed-by: Aleksander Alekseev, Kyotaro Horiguchi Discussion: https://postgr.es/m/Yh8guT78f1Ercfzw@paquier.xyz --- src/backend/access/transam/xlogfuncs.c | 36 ++++-------------------- src/backend/catalog/system_functions.sql | 2 +- src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_proc.dat | 6 ++-- 4 files changed, 11 insertions(+), 35 deletions(-) diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index 12e2bf4135..2752be63c1 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -165,43 +165,20 @@ pg_stop_backup(PG_FUNCTION_ARGS) Datum pg_stop_backup_v2(PG_FUNCTION_ARGS) { - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; +#define PG_STOP_BACKUP_V2_COLS 3 TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - Datum values[3]; - bool nulls[3]; + Datum values[PG_STOP_BACKUP_V2_COLS]; + bool nulls[PG_STOP_BACKUP_V2_COLS]; bool exclusive = PG_GETARG_BOOL(0); bool waitforarchive = PG_GETARG_BOOL(1); XLogRecPtr stoppoint; SessionBackupState status = get_backup_status(); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ + /* Initialize attributes information in the tuple descriptor */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); - MemSet(values, 0, sizeof(values)); MemSet(nulls, 0, sizeof(nulls)); @@ -251,9 +228,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS) /* Stoppoint is included on both exclusive and nonexclusive backups */ values[0] = LSNGetDatum(stoppoint); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); - - return (Datum) 0; + /* Returns the record as Datum */ + PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); } /* diff --git a/src/backend/catalog/system_functions.sql b/src/backend/catalog/system_functions.sql index 758ab6e25a..81bac6f581 100644 --- a/src/backend/catalog/system_functions.sql +++ b/src/backend/catalog/system_functions.sql @@ -384,7 +384,7 @@ CREATE OR REPLACE FUNCTION CREATE OR REPLACE FUNCTION pg_stop_backup ( exclusive boolean, wait_for_archive boolean DEFAULT true, OUT lsn pg_lsn, OUT labelfile text, OUT spcmapfile text) - RETURNS SETOF record STRICT VOLATILE LANGUAGE internal as 'pg_stop_backup_v2' + RETURNS record STRICT VOLATILE LANGUAGE internal as 'pg_stop_backup_v2' PARALLEL RESTRICTED; CREATE OR REPLACE FUNCTION diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 5cf18059b8..695990959e 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202203011 +#define CATALOG_VERSION_NO 202203031 #endif diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index bf88858171..d8e8715ed1 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -6275,9 +6275,9 @@ proname => 'pg_stop_backup', provolatile => 'v', proparallel => 'r', prorettype => 'pg_lsn', proargtypes => '', prosrc => 'pg_stop_backup' }, { oid => '2739', descr => 'finish taking an online backup', - proname => 'pg_stop_backup', prorows => '1', proretset => 't', - provolatile => 'v', proparallel => 'r', prorettype => 'record', - proargtypes => 'bool bool', proallargtypes => '{bool,bool,pg_lsn,text,text}', + proname => 'pg_stop_backup', provolatile => 'v', proparallel => 'r', + prorettype => 'record', proargtypes => 'bool bool', + proallargtypes => '{bool,bool,pg_lsn,text,text}', proargmodes => '{i,i,o,o,o}', proargnames => '{exclusive,wait_for_archive,lsn,labelfile,spcmapfile}', prosrc => 'pg_stop_backup_v2' }, From 46ab07ffda9d6c8e63360ded2d4568aa160a7700 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 3 Mar 2022 18:13:24 -0500 Subject: [PATCH 079/108] Clean up assorted failures under clang's -fsanitize=undefined checks. Most of these are cases where we could call memcpy() or other libc functions with a NULL pointer and a zero count, which is forbidden by POSIX even though every production version of libc allows it. We've fixed such things before in a piecemeal way, but apparently never made an effort to try to get them all. I don't claim that this patch does so either, but it gets every failure I observe in check-world, using clang 12.0.1 on current RHEL8. numeric.c has a different issue that the sanitizer doesn't like: "ln(-1.0)" will compute log10(0) and then try to assign the resulting -Inf to an integer variable. We don't actually use the result in such a case, so there's no live bug. Back-patch to all supported branches, with the idea that we might start running a buildfarm member that tests this case. This includes back-patching c1132aae3 (Check the size in COPY_POINTER_FIELD), which previously silenced some of these issues in copyfuncs.c. Discussion: https://postgr.es/m/CALNJ-vT9r0DSsAOw9OXVJFxLENoVS_68kJ5x0p44atoYH+H4dg@mail.gmail.com --- contrib/pgcrypto/px.c | 2 +- src/backend/access/heap/heapam.c | 2 +- src/backend/access/heap/heapam_visibility.c | 4 ++-- src/backend/access/transam/clog.c | 5 +++-- src/backend/access/transam/xact.c | 5 +++-- src/backend/storage/ipc/shm_mq.c | 7 +++++-- src/backend/utils/adt/numeric.c | 8 ++++++++ src/backend/utils/time/snapmgr.c | 10 ++++++---- src/fe_utils/print.c | 3 ++- 9 files changed, 31 insertions(+), 15 deletions(-) diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c index 75e2426e9f..0010addaf7 100644 --- a/contrib/pgcrypto/px.c +++ b/contrib/pgcrypto/px.c @@ -198,7 +198,7 @@ combo_init(PX_Combo *cx, const uint8 *key, unsigned klen, ivbuf = palloc0(ivs); if (ivlen > ivs) memcpy(ivbuf, iv, ivs); - else + else if (ivlen > 0) memcpy(ivbuf, iv, ivlen); } diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 59d43e2ba9..4e6aeba315 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -328,7 +328,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) /* * copy the scan key, if appropriate */ - if (key != NULL) + if (key != NULL && scan->rs_base.rs_nkeys > 0) memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData)); /* diff --git a/src/backend/access/heap/heapam_visibility.c b/src/backend/access/heap/heapam_visibility.c index ceadac70d5..ff0b8a688d 100644 --- a/src/backend/access/heap/heapam_visibility.c +++ b/src/backend/access/heap/heapam_visibility.c @@ -1564,8 +1564,8 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple) static bool TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num) { - return bsearch(&xid, xip, num, - sizeof(TransactionId), xidComparator) != NULL; + return num > 0 && + bsearch(&xid, xip, num, sizeof(TransactionId), xidComparator) != NULL; } /* diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index de787c3d37..3d9088a704 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -297,8 +297,9 @@ TransactionIdSetPageStatus(TransactionId xid, int nsubxids, if (all_xact_same_page && xid == MyProc->xid && nsubxids <= THRESHOLD_SUBTRANS_CLOG_OPT && nsubxids == MyProc->subxidStatus.count && - memcmp(subxids, MyProc->subxids.xids, - nsubxids * sizeof(TransactionId)) == 0) + (nsubxids == 0 || + memcmp(subxids, MyProc->subxids.xids, + nsubxids * sizeof(TransactionId)) == 0)) { /* * If we can immediately acquire XactSLRULock, we update the status of diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index adf763a8ea..8964ddf3eb 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -5353,8 +5353,9 @@ SerializeTransactionState(Size maxsize, char *start_address) { if (FullTransactionIdIsValid(s->fullTransactionId)) workspace[i++] = XidFromFullTransactionId(s->fullTransactionId); - memcpy(&workspace[i], s->childXids, - s->nChildXids * sizeof(TransactionId)); + if (s->nChildXids > 0) + memcpy(&workspace[i], s->childXids, + s->nChildXids * sizeof(TransactionId)); i += s->nChildXids; } Assert(i == nxids); diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index 45b0dfc062..603cf9b0fa 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -773,8 +773,11 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) /* Copy as much as we can. */ Assert(mqh->mqh_partial_bytes + rb <= nbytes); - memcpy(&mqh->mqh_buffer[mqh->mqh_partial_bytes], rawdata, rb); - mqh->mqh_partial_bytes += rb; + if (rb > 0) + { + memcpy(&mqh->mqh_buffer[mqh->mqh_partial_bytes], rawdata, rb); + mqh->mqh_partial_bytes += rb; + } /* * Update count of bytes that can be consumed, accounting for diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 975d7dcf47..45547f6ae7 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -10048,12 +10048,20 @@ exp_var(const NumericVar *arg, NumericVar *result, int rscale) * * Essentially, we're approximating log10(abs(ln(var))). This is used to * determine the appropriate rscale when computing natural logarithms. + * + * Note: many callers call this before range-checking the input. Therefore, + * we must be robust against values that are invalid to apply ln() to. + * We don't wish to throw an error here, so just return zero in such cases. */ static int estimate_ln_dweight(const NumericVar *var) { int ln_dweight; + /* Caller should fail on ln(negative), but for the moment return zero */ + if (var->sign != NUMERIC_POS) + return 0; + if (cmp_var(var, &const_zero_point_nine) >= 0 && cmp_var(var, &const_one_point_one) <= 0) { diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index a0b81bf154..a0be0c411a 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -536,12 +536,14 @@ SetTransactionSnapshot(Snapshot sourcesnap, VirtualTransactionId *sourcevxid, CurrentSnapshot->xmax = sourcesnap->xmax; CurrentSnapshot->xcnt = sourcesnap->xcnt; Assert(sourcesnap->xcnt <= GetMaxSnapshotXidCount()); - memcpy(CurrentSnapshot->xip, sourcesnap->xip, - sourcesnap->xcnt * sizeof(TransactionId)); + if (sourcesnap->xcnt > 0) + memcpy(CurrentSnapshot->xip, sourcesnap->xip, + sourcesnap->xcnt * sizeof(TransactionId)); CurrentSnapshot->subxcnt = sourcesnap->subxcnt; Assert(sourcesnap->subxcnt <= GetMaxSnapshotSubxidCount()); - memcpy(CurrentSnapshot->subxip, sourcesnap->subxip, - sourcesnap->subxcnt * sizeof(TransactionId)); + if (sourcesnap->subxcnt > 0) + memcpy(CurrentSnapshot->subxip, sourcesnap->subxip, + sourcesnap->subxcnt * sizeof(TransactionId)); CurrentSnapshot->suboverflowed = sourcesnap->suboverflowed; CurrentSnapshot->takenDuringRecovery = sourcesnap->takenDuringRecovery; /* NB: curcid should NOT be copied, it's a local matter */ diff --git a/src/fe_utils/print.c b/src/fe_utils/print.c index 2c8e58ebf5..dcdb2e0d0c 100644 --- a/src/fe_utils/print.c +++ b/src/fe_utils/print.c @@ -966,7 +966,8 @@ print_aligned_text(const printTableContent *cont, FILE *fout, bool is_pager) more_col_wrapping = col_count; curr_nl_line = 0; - memset(header_done, false, col_count * sizeof(bool)); + if (col_count > 0) + memset(header_done, false, col_count * sizeof(bool)); while (more_col_wrapping) { if (opt_border == 2) From 0fbdfaf79d0bbfe1ede9d8ca2d85b2c9a8513082 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 3 Mar 2022 19:03:17 -0500 Subject: [PATCH 080/108] Fix bogus casting in BlockIdGetBlockNumber(). This macro cast the result to BlockNumber after shifting, not before, which is the wrong thing. Per the C spec, the uint16 fields would promote to int not unsigned int, so that (for 32-bit int) the shift potentially shifts a nonzero bit into the sign position. I doubt there are any production systems where this would actually end with the wrong answer, but it is undefined behavior per the C spec, and clang's -fsanitize=undefined option reputedly warns about it on some platforms. (I can't reproduce that right now, but the code is undeniably wrong per spec.) It's easy to fix by casting to BlockNumber (uint32) in the proper places. It's been wrong for ages, so back-patch to all supported branches. Report and patch by Zhihong Yu (cosmetic tweaking by me) Discussion: https://postgr.es/m/CALNJ-vT9r0DSsAOw9OXVJFxLENoVS_68kJ5x0p44atoYH+H4dg@mail.gmail.com --- src/include/storage/block.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/storage/block.h b/src/include/storage/block.h index 4c9d3ace8e..cf1fc499df 100644 --- a/src/include/storage/block.h +++ b/src/include/storage/block.h @@ -115,7 +115,7 @@ typedef BlockIdData *BlockId; /* block identifier */ #define BlockIdGetBlockNumber(blockId) \ ( \ AssertMacro(BlockIdIsValid(blockId)), \ - (BlockNumber) (((blockId)->bi_hi << 16) | ((uint16) (blockId)->bi_lo)) \ + ((((BlockNumber) (blockId)->bi_hi) << 16) | ((BlockNumber) (blockId)->bi_lo)) \ ) #endif /* BLOCK_H */ From 8134fe4ad80a1f9673770126ed7c45045b8ef467 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 3 Mar 2022 19:15:38 -0500 Subject: [PATCH 081/108] Remove some pointless code in block.h. There's no visible point in casting the result of a comparison to bool, because it already is that, at least on C99 compilers. I see no point in these assertions that a pointer we're about to dereference isn't null, either. If it is, the resulting SIGSEGV will notify us of the problem just fine. Noted while reviewing Zhihong Yu's patch. This is basically cosmetic, so no need for back-patch. Discussion: https://postgr.es/m/CALNJ-vT9r0DSsAOw9OXVJFxLENoVS_68kJ5x0p44atoYH+H4dg@mail.gmail.com --- src/include/storage/block.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/include/storage/block.h b/src/include/storage/block.h index cf1fc499df..d756e3fda5 100644 --- a/src/include/storage/block.h +++ b/src/include/storage/block.h @@ -68,14 +68,14 @@ typedef BlockIdData *BlockId; /* block identifier */ * True iff blockNumber is valid. */ #define BlockNumberIsValid(blockNumber) \ - ((bool) ((BlockNumber) (blockNumber) != InvalidBlockNumber)) + ((BlockNumber) (blockNumber) != InvalidBlockNumber) /* * BlockIdIsValid * True iff the block identifier is valid. */ #define BlockIdIsValid(blockId) \ - ((bool) PointerIsValid(blockId)) + PointerIsValid(blockId) /* * BlockIdSet @@ -83,7 +83,6 @@ typedef BlockIdData *BlockId; /* block identifier */ */ #define BlockIdSet(blockId, blockNumber) \ ( \ - AssertMacro(PointerIsValid(blockId)), \ (blockId)->bi_hi = (blockNumber) >> 16, \ (blockId)->bi_lo = (blockNumber) & 0xffff \ ) @@ -94,8 +93,6 @@ typedef BlockIdData *BlockId; /* block identifier */ */ #define BlockIdCopy(toBlockId, fromBlockId) \ ( \ - AssertMacro(PointerIsValid(toBlockId)), \ - AssertMacro(PointerIsValid(fromBlockId)), \ (toBlockId)->bi_hi = (fromBlockId)->bi_hi, \ (toBlockId)->bi_lo = (fromBlockId)->bi_lo \ ) @@ -113,9 +110,6 @@ typedef BlockIdData *BlockId; /* block identifier */ * Retrieve the block number from a block identifier. */ #define BlockIdGetBlockNumber(blockId) \ -( \ - AssertMacro(BlockIdIsValid(blockId)), \ - ((((BlockNumber) (blockId)->bi_hi) << 16) | ((BlockNumber) (blockId)->bi_lo)) \ -) + ((((BlockNumber) (blockId)->bi_hi) << 16) | ((BlockNumber) (blockId)->bi_lo)) #endif /* BLOCK_H */ From b3c8aae00850384b1cec5311eb1864e2f5e80a44 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Fri, 4 Mar 2022 09:51:12 +0900 Subject: [PATCH 082/108] doc: Fix description of pg_stop_backup() The function was still documented as returning a set of records, something not true as of 62ce0c7. Reported-by: Tom Lane Discussion: https://postgr.es/m/3159823.1646320180@sss.pgh.pa.us --- doc/src/sgml/func.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index df3cd5987b..8a802fb225 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -25726,7 +25726,7 @@ LOG: Grand total: 1651920 bytes in 201 blocks; 622360 free (88 chunks); 1029560 exclusive boolean , wait_for_archive boolean ) - setof record + record ( lsn pg_lsn, labelfile text, spcmapfile text ) From f7ea240aa7491b6ed2985bb50888bd432f3341df Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 3 Mar 2022 20:03:47 -0500 Subject: [PATCH 083/108] Tighten overflow checks in tidin(). This code seems to have been written on the assumption that "unsigned long" is 32 bits; or at any rate it ignored the possibility of conversion overflow. Rewrite, borrowing some logic from oidin(). Discussion: https://postgr.es/m/3441768.1646343914@sss.pgh.pa.us --- src/backend/utils/adt/tid.c | 28 +++++++++++++++++++++------- src/test/regress/expected/tid.out | 19 +++++++++++++++++++ src/test/regress/sql/tid.sql | 12 ++++++++++++ 3 files changed, 52 insertions(+), 7 deletions(-) diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c index dcc1620afb..83ac589f95 100644 --- a/src/backend/utils/adt/tid.c +++ b/src/backend/utils/adt/tid.c @@ -64,10 +64,10 @@ tidin(PG_FUNCTION_ARGS) BlockNumber blockNumber; OffsetNumber offsetNumber; char *badp; - int hold_offset; + unsigned long cvt; for (i = 0, p = str; *p && i < NTIDARGS && *p != RDELIM; p++) - if (*p == DELIM || (*p == LDELIM && !i)) + if (*p == DELIM || (*p == LDELIM && i == 0)) coord[i++] = p + 1; if (i < NTIDARGS) @@ -77,22 +77,36 @@ tidin(PG_FUNCTION_ARGS) "tid", str))); errno = 0; - blockNumber = strtoul(coord[0], &badp, 10); + cvt = strtoul(coord[0], &badp, 10); if (errno || *badp != DELIM) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type %s: \"%s\"", "tid", str))); + blockNumber = (BlockNumber) cvt; - hold_offset = strtol(coord[1], &badp, 10); - if (errno || *badp != RDELIM || - hold_offset > USHRT_MAX || hold_offset < 0) + /* + * Cope with possibility that unsigned long is wider than BlockNumber, in + * which case strtoul will not raise an error for some values that are out + * of the range of BlockNumber. (See similar code in oidin().) + */ +#if SIZEOF_LONG > 4 + if (cvt != (unsigned long) blockNumber && + cvt != (unsigned long) ((int32) blockNumber)) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type %s: \"%s\"", "tid", str))); +#endif - offsetNumber = hold_offset; + cvt = strtoul(coord[1], &badp, 10); + if (errno || *badp != RDELIM || + cvt > USHRT_MAX) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type %s: \"%s\"", + "tid", str))); + offsetNumber = (OffsetNumber) cvt; result = (ItemPointer) palloc(sizeof(ItemPointerData)); diff --git a/src/test/regress/expected/tid.out b/src/test/regress/expected/tid.out index 8da1a45576..7d8957bd6f 100644 --- a/src/test/regress/expected/tid.out +++ b/src/test/regress/expected/tid.out @@ -1,3 +1,22 @@ +-- basic tests for the TID data type +SELECT + '(0,0)'::tid as tid00, + '(0,1)'::tid as tid01, + '(-1,0)'::tid as tidm10, + '(4294967295,65535)'::tid as tidmax; + tid00 | tid01 | tidm10 | tidmax +-------+-------+----------------+-------------------- + (0,0) | (0,1) | (4294967295,0) | (4294967295,65535) +(1 row) + +SELECT '(4294967296,1)'::tid; -- error +ERROR: invalid input syntax for type tid: "(4294967296,1)" +LINE 1: SELECT '(4294967296,1)'::tid; + ^ +SELECT '(1,65536)'::tid; -- error +ERROR: invalid input syntax for type tid: "(1,65536)" +LINE 1: SELECT '(1,65536)'::tid; + ^ -- tests for functions related to TID handling CREATE TABLE tid_tab (a int); -- min() and max() for TIDs diff --git a/src/test/regress/sql/tid.sql b/src/test/regress/sql/tid.sql index 34546a3cb7..990d314a5f 100644 --- a/src/test/regress/sql/tid.sql +++ b/src/test/regress/sql/tid.sql @@ -1,3 +1,15 @@ +-- basic tests for the TID data type + +SELECT + '(0,0)'::tid as tid00, + '(0,1)'::tid as tid01, + '(-1,0)'::tid as tidm10, + '(4294967295,65535)'::tid as tidmax; + +SELECT '(4294967296,1)'::tid; -- error +SELECT '(1,65536)'::tid; -- error + + -- tests for functions related to TID handling CREATE TABLE tid_tab (a int); From ceb57afd3ce177e897cb4c5b44aa683fc0036782 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Fri, 4 Mar 2022 07:54:12 +0530 Subject: [PATCH 084/108] Add some additional tests for row filters in logical replication. Commit 52e4f0cd47 didn't add tests for pg_dump support, so add a few tests for it. Additionally, verify that catalogs are updated after few ALTER PUBLICATION commands that modify row filters by using \d. Reported-by: Tomas Vondra Author: Shi yu, based on initial by Tomas Vondra Reviewed-by: Euler Taveira and Amit Kapila Discussion: https://postgr.es/m/6bdbd7fc-e81a-9a77-d963-24adeb95f29e@enterprisedb.com --- src/bin/pg_dump/t/002_pg_dump.pl | 34 +++++++++++++++++++++++ src/test/regress/expected/publication.out | 22 +++++++++++++++ src/test/regress/sql/publication.sql | 3 ++ 3 files changed, 59 insertions(+) diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index dd065c758f..d9bc267f6d 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -2382,6 +2382,15 @@ like => { %full_runs, section_post_data => 1, }, }, + 'CREATE PUBLICATION pub4' => { + create_order => 50, + create_sql => 'CREATE PUBLICATION pub4;', + regexp => qr/^ + \QCREATE PUBLICATION pub4 WITH (publish = 'insert, update, delete, truncate');\E + /xm, + like => { %full_runs, section_post_data => 1, }, + }, + 'CREATE SUBSCRIPTION sub1' => { create_order => 50, create_sql => 'CREATE SUBSCRIPTION sub1 @@ -2439,6 +2448,31 @@ like => { %full_runs, section_post_data => 1, }, }, + 'ALTER PUBLICATION pub4 ADD TABLE test_table WHERE (col1 > 0);' => { + create_order => 51, + create_sql => + 'ALTER PUBLICATION pub4 ADD TABLE dump_test.test_table WHERE (col1 > 0);', + regexp => qr/^ + \QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_table WHERE ((col1 > 0));\E + /xm, + like => { %full_runs, section_post_data => 1, }, + unlike => { + exclude_dump_test_schema => 1, + exclude_test_table => 1, + }, + }, + + 'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');' => { + create_order => 52, + create_sql => + 'ALTER PUBLICATION pub4 ADD TABLE dump_test.test_second_table WHERE (col2 = \'test\');', + regexp => qr/^ + \QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_second_table WHERE ((col2 = 'test'::text));\E + /xm, + like => { %full_runs, section_post_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, + }, + 'CREATE SCHEMA public' => { regexp => qr/^CREATE SCHEMA public;/m, diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index 3c382e520e..4e191c120a 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -263,6 +263,12 @@ Tables: "public.testpub_rf_tbl1" "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) +\d testpub_rf_tbl3 + Table "public.testpub_rf_tbl3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + e | integer | | | + ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); \dRp+ testpub5 Publication testpub5 @@ -274,6 +280,14 @@ Tables: "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) +\d testpub_rf_tbl3 + Table "public.testpub_rf_tbl3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + e | integer | | | +Publications: + "testpub5" WHERE ((e > 1000) AND (e < 2000)) + ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; \dRp+ testpub5 Publication testpub5 @@ -294,6 +308,14 @@ ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500) Tables: "public.testpub_rf_tbl3" WHERE ((e > 300) AND (e < 500)) +\d testpub_rf_tbl3 + Table "public.testpub_rf_tbl3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + e | integer | | | +Publications: + "testpub5" WHERE ((e > 300) AND (e < 500)) + -- test \d (now it displays filter information) SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index 3f04d34264..5457c56b33 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -150,13 +150,16 @@ SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); RESET client_min_messages; \dRp+ testpub5 +\d testpub_rf_tbl3 ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); \dRp+ testpub5 +\d testpub_rf_tbl3 ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; \dRp+ testpub5 -- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); \dRp+ testpub5 +\d testpub_rf_tbl3 -- test \d (now it displays filter information) SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); From d816f366bc427cacba29c1e4b1696afa620e73a7 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Fri, 4 Mar 2022 08:47:30 +0100 Subject: [PATCH 085/108] psql: Make SSL info display more compact Remove the bits display, since that can be derived from the cipher suite. Reviewed-by: Daniel Gustafsson Discussion: https://www.postgresql.org/message-id/flat/aee28ee7-0ab3-c2e2-5bed-109feb0c089b%40enterprisedb.com --- src/bin/psql/command.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 292cff5df9..079f4a1a76 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -3667,7 +3667,6 @@ printSSLInfo(void) { const char *protocol; const char *cipher; - const char *bits; const char *compression; if (!PQsslInUse(pset.db)) @@ -3675,13 +3674,11 @@ printSSLInfo(void) protocol = PQsslAttribute(pset.db, "protocol"); cipher = PQsslAttribute(pset.db, "cipher"); - bits = PQsslAttribute(pset.db, "key_bits"); compression = PQsslAttribute(pset.db, "compression"); - printf(_("SSL connection (protocol: %s, cipher: %s, bits: %s, compression: %s)\n"), + printf(_("SSL connection (protocol: %s, cipher: %s, compression: %s)\n"), protocol ? protocol : _("unknown"), cipher ? cipher : _("unknown"), - bits ? bits : _("unknown"), (compression && strcmp(compression, "off") != 0) ? _("on") : _("off")); } From 791b1b71da35d9d4264f72a87e4078b85a2fcfb4 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Fri, 4 Mar 2022 14:49:37 +0100 Subject: [PATCH 086/108] Parse/analyze function renaming There are three parallel ways to call parse/analyze: with fixed parameters, with variable parameters, and by supplying your own parser callback. Some of the involved functions were confusingly named and made this API structure more confusing. This patch renames some functions to make this clearer: parse_analyze() -> parse_analyze_fixedparams() pg_analyze_and_rewrite() -> pg_analyze_and_rewrite_fixedparams() (Otherwise one might think this variant doesn't accept parameters, but in fact all three ways accept parameters.) pg_analyze_and_rewrite_params() -> pg_analyze_and_rewrite_withcb() (Before, and also when considering pg_analyze_and_rewrite(), one might think this is the only way to pass parameters. Moreover, the parser callback doesn't necessarily need to parse only parameters, it's just one of the things it could do.) parse_fixed_parameters() -> setup_parse_fixed_parameters() parse_variable_parameters() -> setup_parse_variable_parameters() (These functions don't actually do any parsing, they just set up callbacks to use during parsing later.) This patch also adds some const decorations to the fixed-parameters API, so the distinction from the variable-parameters API is more clear. Reviewed-by: Nathan Bossart Discussion: https://www.postgresql.org/message-id/flat/c67ce276-52b4-0239-dc0e-39875bf81840@enterprisedb.com --- src/backend/catalog/pg_proc.c | 2 +- src/backend/commands/copyto.c | 2 +- src/backend/commands/extension.c | 2 +- src/backend/commands/schemacmds.c | 2 +- src/backend/commands/tablecmds.c | 2 +- src/backend/commands/view.c | 2 +- src/backend/executor/functions.c | 2 +- src/backend/executor/spi.c | 8 ++++---- src/backend/optimizer/util/clauses.c | 2 +- src/backend/parser/analyze.c | 10 +++++----- src/backend/parser/parse_param.c | 8 ++++---- src/backend/parser/parse_utilcmd.c | 2 +- src/backend/tcop/postgres.c | 17 +++++++++-------- src/backend/utils/cache/plancache.c | 4 ++-- src/include/parser/analyze.h | 4 ++-- src/include/parser/parse_param.h | 6 +++--- src/include/tcop/tcopprot.h | 6 +++--- 17 files changed, 41 insertions(+), 40 deletions(-) diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 12521c77c3..ac8aacbd59 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -947,7 +947,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) RawStmt *parsetree = lfirst_node(RawStmt, lc); List *querytree_sublist; - querytree_sublist = pg_analyze_and_rewrite_params(parsetree, + querytree_sublist = pg_analyze_and_rewrite_withcb(parsetree, prosrc, (ParserSetupHook) sql_fn_parser_setup, pinfo, diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c index 3283ef50d0..55c38b04c4 100644 --- a/src/backend/commands/copyto.c +++ b/src/backend/commands/copyto.c @@ -439,7 +439,7 @@ BeginCopyTo(ParseState *pstate, * Run parse analysis and rewrite. Note this also acquires sufficient * locks on the source table(s). */ - rewritten = pg_analyze_and_rewrite(raw_query, + rewritten = pg_analyze_and_rewrite_fixedparams(raw_query, pstate->p_sourcetext, NULL, 0, NULL); diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 0e04304cb0..42503ef454 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -757,7 +757,7 @@ execute_sql_string(const char *sql) /* Be sure parser can see any DDL done so far */ CommandCounterIncrement(); - stmt_list = pg_analyze_and_rewrite(parsetree, + stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree, sql, NULL, 0, diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index 984000a5bc..be3925b3b4 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -172,7 +172,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString, /* * Execute each command contained in the CREATE SCHEMA. Since the grammar * allows only utility commands in CREATE SCHEMA, there is no need to pass - * them through parse_analyze() or the rewriter; we can just hand them + * them through parse_analyze_*() or the rewriter; we can just hand them * straight to ProcessUtility. */ foreach(parsetree_item, parsetree_list) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 3e83f375b5..dc5872f988 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -13181,7 +13181,7 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, /* * We expect that we will get only ALTER TABLE and CREATE INDEX * statements. Hence, there is no need to pass them through - * parse_analyze() or the rewriter, but instead we need to pass them + * parse_analyze_*() or the rewriter, but instead we need to pass them * through parse_utilcmd.c to make them ready for execution. */ raw_parsetree_list = raw_parser(cmd, RAW_PARSE_DEFAULT); diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index 459e9821d0..8690a3f3c6 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -439,7 +439,7 @@ DefineView(ViewStmt *stmt, const char *queryString, rawstmt->stmt_location = stmt_location; rawstmt->stmt_len = stmt_len; - viewParse = parse_analyze(rawstmt, queryString, NULL, 0, NULL); + viewParse = parse_analyze_fixedparams(rawstmt, queryString, NULL, 0, NULL); /* * The grammar should ensure that the result is a single SELECT Query. diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 29a68879ee..f9460ae506 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -718,7 +718,7 @@ init_sql_fcache(FunctionCallInfo fcinfo, Oid collation, bool lazyEvalOK) RawStmt *parsetree = lfirst_node(RawStmt, lc); List *queryTree_sublist; - queryTree_sublist = pg_analyze_and_rewrite_params(parsetree, + queryTree_sublist = pg_analyze_and_rewrite_withcb(parsetree, fcache->src, (ParserSetupHook) sql_fn_parser_setup, fcache->pinfo, diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 5b353cb93a..a82e986667 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -2258,7 +2258,7 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan) if (plan->parserSetup != NULL) { Assert(plan->nargs == 0); - stmt_list = pg_analyze_and_rewrite_params(parsetree, + stmt_list = pg_analyze_and_rewrite_withcb(parsetree, src, plan->parserSetup, plan->parserSetupArg, @@ -2266,7 +2266,7 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan) } else { - stmt_list = pg_analyze_and_rewrite(parsetree, + stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree, src, plan->argtypes, plan->nargs, @@ -2495,7 +2495,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, else if (plan->parserSetup != NULL) { Assert(plan->nargs == 0); - stmt_list = pg_analyze_and_rewrite_params(parsetree, + stmt_list = pg_analyze_and_rewrite_withcb(parsetree, src, plan->parserSetup, plan->parserSetupArg, @@ -2503,7 +2503,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, } else { - stmt_list = pg_analyze_and_rewrite(parsetree, + stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree, src, plan->argtypes, plan->nargs, diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index a707dc9f26..413dcac036 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -5057,7 +5057,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) if (list_length(raw_parsetree_list) != 1) goto fail; - querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list), + querytree_list = pg_analyze_and_rewrite_withcb(linitial(raw_parsetree_list), src, (ParserSetupHook) sql_fn_parser_setup, pinfo, NULL); diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 6ac2e9ce23..19d97fe731 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -96,7 +96,7 @@ static bool test_raw_expression_coverage(Node *node, void *context); /* - * parse_analyze + * parse_analyze_fixedparams * Analyze a raw parse tree and transform it to Query form. * * Optionally, information about $n parameter types can be supplied. @@ -107,8 +107,8 @@ static bool test_raw_expression_coverage(Node *node, void *context); * a dummy CMD_UTILITY Query node. */ Query * -parse_analyze(RawStmt *parseTree, const char *sourceText, - Oid *paramTypes, int numParams, +parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText, + const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv) { ParseState *pstate = make_parsestate(NULL); @@ -120,7 +120,7 @@ parse_analyze(RawStmt *parseTree, const char *sourceText, pstate->p_sourcetext = sourceText; if (numParams > 0) - parse_fixed_parameters(pstate, paramTypes, numParams); + setup_parse_fixed_parameters(pstate, paramTypes, numParams); pstate->p_queryEnv = queryEnv; @@ -158,7 +158,7 @@ parse_analyze_varparams(RawStmt *parseTree, const char *sourceText, pstate->p_sourcetext = sourceText; - parse_variable_parameters(pstate, paramTypes, numParams); + setup_parse_variable_parameters(pstate, paramTypes, numParams); query = transformTopLevelStmt(pstate, parseTree); diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c index 3100d890d2..31a43e034c 100644 --- a/src/backend/parser/parse_param.c +++ b/src/backend/parser/parse_param.c @@ -35,7 +35,7 @@ typedef struct FixedParamState { - Oid *paramTypes; /* array of parameter type OIDs */ + const Oid *paramTypes; /* array of parameter type OIDs */ int numParams; /* number of array entries */ } FixedParamState; @@ -64,8 +64,8 @@ static bool query_contains_extern_params_walker(Node *node, void *context); * Set up to process a query containing references to fixed parameters. */ void -parse_fixed_parameters(ParseState *pstate, - Oid *paramTypes, int numParams) +setup_parse_fixed_parameters(ParseState *pstate, + const Oid *paramTypes, int numParams) { FixedParamState *parstate = palloc(sizeof(FixedParamState)); @@ -80,7 +80,7 @@ parse_fixed_parameters(ParseState *pstate, * Set up to process a query containing references to variable parameters. */ void -parse_variable_parameters(ParseState *pstate, +setup_parse_variable_parameters(ParseState *pstate, Oid **paramTypes, int *numParams) { VarParamState *parstate = palloc(sizeof(VarParamState)); diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 99efa26ce4..cd946c7692 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -3,7 +3,7 @@ * parse_utilcmd.c * Perform parse analysis work for various utility commands * - * Formerly we did this work during parse_analyze() in analyze.c. However + * Formerly we did this work during parse_analyze_*() in analyze.c. However * that is fairly unsafe in the presence of querytree caching, since any * database state that we depend on in making the transformations might be * obsolete by the time the utility command is executed; and utility commands diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 34c13a1113..c087db4445 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -637,8 +637,8 @@ pg_parse_query(const char *query_string) * NOTE: for reasons mentioned above, this must be separate from raw parsing. */ List * -pg_analyze_and_rewrite(RawStmt *parsetree, const char *query_string, - Oid *paramTypes, int numParams, +pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree, const char *query_string, + const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv) { Query *query; @@ -652,7 +652,7 @@ pg_analyze_and_rewrite(RawStmt *parsetree, const char *query_string, if (log_parser_stats) ResetUsage(); - query = parse_analyze(parsetree, query_string, paramTypes, numParams, + query = parse_analyze_fixedparams(parsetree, query_string, paramTypes, numParams, queryEnv); if (log_parser_stats) @@ -669,12 +669,13 @@ pg_analyze_and_rewrite(RawStmt *parsetree, const char *query_string, } /* - * Do parse analysis and rewriting. This is the same as pg_analyze_and_rewrite - * except that external-parameter resolution is determined by parser callback - * hooks instead of a fixed list of parameter datatypes. + * Do parse analysis and rewriting. This is the same as + * pg_analyze_and_rewrite_fixedparams except that, instead of a fixed list of + * parameter datatypes, a parser callback is supplied that can do + * external-parameter resolution and possibly other things. */ List * -pg_analyze_and_rewrite_params(RawStmt *parsetree, +pg_analyze_and_rewrite_withcb(RawStmt *parsetree, const char *query_string, ParserSetupHook parserSetup, void *parserSetupArg, @@ -1125,7 +1126,7 @@ exec_simple_query(const char *query_string) else oldcontext = MemoryContextSwitchTo(MessageContext); - querytree_list = pg_analyze_and_rewrite(parsetree, query_string, + querytree_list = pg_analyze_and_rewrite_fixedparams(parsetree, query_string, NULL, 0, NULL); plantree_list = pg_plan_queries(querytree_list, query_string, diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 4a9055e6bb..4cf6db504f 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -682,13 +682,13 @@ RevalidateCachedQuery(CachedPlanSource *plansource, if (rawtree == NULL) tlist = NIL; else if (plansource->parserSetup != NULL) - tlist = pg_analyze_and_rewrite_params(rawtree, + tlist = pg_analyze_and_rewrite_withcb(rawtree, plansource->query_string, plansource->parserSetup, plansource->parserSetupArg, queryEnv); else - tlist = pg_analyze_and_rewrite(rawtree, + tlist = pg_analyze_and_rewrite_fixedparams(rawtree, plansource->query_string, plansource->param_types, plansource->num_params, diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h index 0022184de0..ed989bb141 100644 --- a/src/include/parser/analyze.h +++ b/src/include/parser/analyze.h @@ -24,8 +24,8 @@ typedef void (*post_parse_analyze_hook_type) (ParseState *pstate, extern PGDLLIMPORT post_parse_analyze_hook_type post_parse_analyze_hook; -extern Query *parse_analyze(RawStmt *parseTree, const char *sourceText, - Oid *paramTypes, int numParams, QueryEnvironment *queryEnv); +extern Query *parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText, + const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv); extern Query *parse_analyze_varparams(RawStmt *parseTree, const char *sourceText, Oid **paramTypes, int *numParams); diff --git a/src/include/parser/parse_param.h b/src/include/parser/parse_param.h index 18e608093c..d6f0b65649 100644 --- a/src/include/parser/parse_param.h +++ b/src/include/parser/parse_param.h @@ -15,9 +15,9 @@ #include "parser/parse_node.h" -extern void parse_fixed_parameters(ParseState *pstate, - Oid *paramTypes, int numParams); -extern void parse_variable_parameters(ParseState *pstate, +extern void setup_parse_fixed_parameters(ParseState *pstate, + const Oid *paramTypes, int numParams); +extern void setup_parse_variable_parameters(ParseState *pstate, Oid **paramTypes, int *numParams); extern void check_variable_parameters(ParseState *pstate, Query *query); extern bool query_contains_extern_params(Query *query); diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h index 15a11bc3ff..00c20966ab 100644 --- a/src/include/tcop/tcopprot.h +++ b/src/include/tcop/tcopprot.h @@ -45,11 +45,11 @@ extern PGDLLIMPORT int log_statement; extern List *pg_parse_query(const char *query_string); extern List *pg_rewrite_query(Query *query); -extern List *pg_analyze_and_rewrite(RawStmt *parsetree, +extern List *pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree, const char *query_string, - Oid *paramTypes, int numParams, + const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv); -extern List *pg_analyze_and_rewrite_params(RawStmt *parsetree, +extern List *pg_analyze_and_rewrite_withcb(RawStmt *parsetree, const char *query_string, ParserSetupHook parserSetup, void *parserSetupArg, From 9240589798e02705dbe3e86549d064988c0f47d2 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 4 Mar 2022 13:23:58 -0500 Subject: [PATCH 087/108] Fix pg_regress to print the correct postmaster address on Windows. pg_regress reported "Unix socket" as the default location whenever HAVE_UNIX_SOCKETS is defined. However, that's not been accurate on Windows since 8f3ec75de. Update this logic to match what libpq actually does now. This is just cosmetic, but still it's potentially misleading. Back-patch to v13 where 8f3ec75de came in. Discussion: https://postgr.es/m/3894060.1646415641@sss.pgh.pa.us --- src/interfaces/libpq/fe-connect.c | 5 +++++ src/test/regress/pg_regress.c | 10 ++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index 1c5a2b43e9..cf554d389f 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -1115,6 +1115,11 @@ connectOptions2(PGconn *conn) { if (ch->host) free(ch->host); + + /* + * This bit selects the default host location. If you change + * this, see also pg_regress. + */ #ifdef HAVE_UNIX_SOCKETS if (DEFAULT_PGSOCKET_DIR[0]) { diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index db8427dd9b..982801e029 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -746,10 +746,16 @@ initialize_environment(void) */ pghost = getenv("PGHOST"); pgport = getenv("PGPORT"); -#ifndef HAVE_UNIX_SOCKETS if (!pghost) - pghost = "localhost"; + { + /* Keep this bit in sync with libpq's default host location: */ +#ifdef HAVE_UNIX_SOCKETS + if (DEFAULT_PGSOCKET_DIR[0]) + /* do nothing, we'll print "Unix socket" below */ ; + else #endif + pghost = "localhost"; /* DefaultHost in fe-connect.c */ + } if (pghost && pgport) printf(_("(using postmaster on %s, port %s)\n"), pghost, pgport); From f2698ea02ca8a56f38935d2b300ac54936712558 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Fri, 4 Mar 2022 18:53:13 -0800 Subject: [PATCH 088/108] Introduce PG_TEST_TIMEOUT_DEFAULT for TAP suite non-elapsing timeouts. Slow hosts may avoid load-induced, spurious failures by setting environment variable PG_TEST_TIMEOUT_DEFAULT to some number of seconds greater than 180. Developers may see faster failures by setting that environment variable to some lesser number of seconds. In tests, write $PostgreSQL::Test::Utils::timeout_default wherever the convention has been to write 180. This change raises the default for some briefer timeouts. Back-patch to v10 (all supported versions). Discussion: https://postgr.es/m/20220218052842.GA3627003@rfd.leadboat.com --- contrib/amcheck/t/002_cic.pl | 3 +- contrib/amcheck/t/003_cic_2pc.pl | 14 +++++---- src/bin/pg_ctl/t/004_logrotate.pl | 4 +-- src/bin/pg_dump/t/002_pg_dump.pl | 3 +- src/bin/psql/t/010_tab_completion.pl | 6 ++-- src/bin/psql/t/020_cancel.pl | 5 ++-- src/bin/scripts/t/080_pg_isready.pl | 4 +-- src/test/perl/PostgreSQL/Test/Cluster.pm | 29 ++++++++++--------- src/test/perl/PostgreSQL/Test/Utils.pm | 8 +++-- src/test/perl/README | 6 ++++ src/test/recovery/t/003_recovery_targets.pl | 4 +-- src/test/recovery/t/006_logical_decoding.pl | 6 ++-- .../t/010_logical_decoding_timelines.pl | 4 +-- src/test/recovery/t/013_crash_restart.pl | 6 +--- src/test/recovery/t/017_shm.pl | 13 +++++---- src/test/recovery/t/019_replslot_limit.pl | 6 ++-- src/test/recovery/t/021_row_visibility.pl | 7 ++--- src/test/recovery/t/022_crash_temp_files.pl | 6 +--- src/test/recovery/t/024_archive_recovery.pl | 4 +-- src/test/subscription/t/015_stream.pl | 2 +- 20 files changed, 75 insertions(+), 65 deletions(-) diff --git a/contrib/amcheck/t/002_cic.pl b/contrib/amcheck/t/002_cic.pl index d604def0d0..b8e4ac7cf4 100644 --- a/contrib/amcheck/t/002_cic.pl +++ b/contrib/amcheck/t/002_cic.pl @@ -18,7 +18,8 @@ # $node = PostgreSQL::Test::Cluster->new('CIC_test'); $node->init; -$node->append_conf('postgresql.conf', 'lock_timeout = 180000'); +$node->append_conf('postgresql.conf', + 'lock_timeout = ' . (1000 * $PostgreSQL::Test::Utils::timeout_default)); $node->start; $node->safe_psql('postgres', q(CREATE EXTENSION amcheck)); $node->safe_psql('postgres', q(CREATE TABLE tbl(i int))); diff --git a/contrib/amcheck/t/003_cic_2pc.pl b/contrib/amcheck/t/003_cic_2pc.pl index f668ed3c40..e66ccd93f1 100644 --- a/contrib/amcheck/t/003_cic_2pc.pl +++ b/contrib/amcheck/t/003_cic_2pc.pl @@ -22,7 +22,8 @@ $node = PostgreSQL::Test::Cluster->new('CIC_2PC_test'); $node->init; $node->append_conf('postgresql.conf', 'max_prepared_transactions = 10'); -$node->append_conf('postgresql.conf', 'lock_timeout = 180000'); +$node->append_conf('postgresql.conf', + 'lock_timeout = ' . (1000 * $PostgreSQL::Test::Utils::timeout_default)); $node->start; $node->safe_psql('postgres', q(CREATE EXTENSION amcheck)); $node->safe_psql('postgres', q(CREATE TABLE tbl(i int))); @@ -38,7 +39,7 @@ my $main_in = ''; my $main_out = ''; -my $main_timer = IPC::Run::timeout(180); +my $main_timer = IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); my $main_h = $node->background_psql('postgres', \$main_in, \$main_out, @@ -52,7 +53,7 @@ my $cic_in = ''; my $cic_out = ''; -my $cic_timer = IPC::Run::timeout(180); +my $cic_timer = IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); my $cic_h = $node->background_psql('postgres', \$cic_in, \$cic_out, $cic_timer, on_error_stop => 1); @@ -113,9 +114,10 @@ )); $node->restart; -my $reindex_in = ''; -my $reindex_out = ''; -my $reindex_timer = IPC::Run::timeout(180); +my $reindex_in = ''; +my $reindex_out = ''; +my $reindex_timer = + IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); my $reindex_h = $node->background_psql('postgres', \$reindex_in, \$reindex_out, $reindex_timer, on_error_stop => 1); diff --git a/src/bin/pg_ctl/t/004_logrotate.pl b/src/bin/pg_ctl/t/004_logrotate.pl index d290452100..d73ce034cd 100644 --- a/src/bin/pg_ctl/t/004_logrotate.pl +++ b/src/bin/pg_ctl/t/004_logrotate.pl @@ -39,7 +39,7 @@ sub check_log_pattern my $node = shift; my $lfname = fetch_file_name($logfiles, $format); - my $max_attempts = 180 * 10; + my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; my $logcontents; for (my $attempts = 0; $attempts < $max_attempts; $attempts++) @@ -78,7 +78,7 @@ sub check_log_pattern $node->psql('postgres', 'SELECT 1/0'); # might need to retry if logging collector process is slow... -my $max_attempts = 180 * 10; +my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; my $current_logfiles; for (my $attempts = 0; $attempts < $max_attempts; $attempts++) diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index d9bc267f6d..3e55ff26f8 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -295,7 +295,8 @@ '--no-sync', "--file=$tempdir/only_dump_test_table.sql", '--table=dump_test.test_table', - '--lock-wait-timeout=1000000', + '--lock-wait-timeout=' + . (1000 * $PostgreSQL::Test::Utils::timeout_default), 'postgres', ], }, diff --git a/src/bin/psql/t/010_tab_completion.pl b/src/bin/psql/t/010_tab_completion.pl index 005961f34d..a54910680e 100644 --- a/src/bin/psql/t/010_tab_completion.pl +++ b/src/bin/psql/t/010_tab_completion.pl @@ -94,7 +94,7 @@ my $in = ''; my $out = ''; -my $timer = timer(5); +my $timer = timer($PostgreSQL::Test::Utils::timeout_default); my $h = $node->interactive_psql('postgres', \$in, \$out, $timer); @@ -111,7 +111,7 @@ sub check_completion # reset output collector $out = ""; # restart per-command timer - $timer->start(5); + $timer->start($PostgreSQL::Test::Utils::timeout_default); # send the data to be sent $in .= $send; # wait ... @@ -442,7 +442,7 @@ sub clear_line clear_query(); # send psql an explicit \q to shut it down, else pty won't close properly -$timer->start(5); +$timer->start($PostgreSQL::Test::Utils::timeout_default); $in .= "\\q\n"; finish $h or die "psql returned $?"; $timer->reset; diff --git a/src/bin/psql/t/020_cancel.pl b/src/bin/psql/t/020_cancel.pl index 3224f8e26a..d57d342952 100644 --- a/src/bin/psql/t/020_cancel.pl +++ b/src/bin/psql/t/020_cancel.pl @@ -46,12 +46,13 @@ my $psql_pid; until (-s "$tempdir/psql.pid" and ($psql_pid = PostgreSQL::Test::Utils::slurp_file("$tempdir/psql.pid")) =~ /^\d+\n/s) { - ($count++ < 180 * 100) or die "pid file did not appear"; + ($count++ < 100 * $PostgreSQL::Test::Utils::timeout_default) + or die "pid file did not appear"; usleep(10_000) } # Send sleep command and wait until the server has registered it - $stdin = "select pg_sleep(180);\n"; + $stdin = "select pg_sleep($PostgreSQL::Test::Utils::timeout_default);\n"; pump $h while length $stdin; $node->poll_query_until('postgres', q{SELECT (SELECT count(*) FROM pg_stat_activity WHERE query ~ '^select pg_sleep') > 0;}) or die "timed out"; diff --git a/src/bin/scripts/t/080_pg_isready.pl b/src/bin/scripts/t/080_pg_isready.pl index e8436dc7e8..c45ca6666f 100644 --- a/src/bin/scripts/t/080_pg_isready.pl +++ b/src/bin/scripts/t/080_pg_isready.pl @@ -18,8 +18,8 @@ $node->init; $node->start; -# use a long timeout for the benefit of very slow buildfarm machines -$node->command_ok([qw(pg_isready --timeout=60)], +$node->command_ok( + [ 'pg_isready', "--timeout=$PostgreSQL::Test::Utils::timeout_default" ], 'succeeds with server running'); done_testing(); diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index be05845248..4db52bc936 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -36,7 +36,8 @@ PostgreSQL::Test::Cluster - class representing PostgreSQL server instance my ($stdout, $stderr, $timed_out); my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)', stdout => \$stdout, stderr => \$stderr, - timeout => 180, timed_out => \$timed_out, + timeout => $PostgreSQL::Test::Utils::timeout_default, + timed_out => \$timed_out, extra_params => ['--single-transaction'], on_error_die => 1) print "Sleep timed out" if $timed_out; @@ -1723,7 +1724,8 @@ e.g. my ($stdout, $stderr, $timed_out); my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)', stdout => \$stdout, stderr => \$stderr, - timeout => 180, timed_out => \$timed_out, + timeout => $PostgreSQL::Test::Utils::timeout_default, + timed_out => \$timed_out, extra_params => ['--single-transaction']) will set $cmdret to undef and $timed_out to a true value. @@ -1897,7 +1899,8 @@ scalar reference. This allows the caller to act on other parts of the system while idling this backend. The specified timer object is attached to the harness, as well. It's caller's -responsibility to select the timeout length, and to restart the timer after +responsibility to set the timeout length (usually +$PostgreSQL::Test::Utils::timeout_default), and to restart the timer after each command if the timeout is per-command. psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc> @@ -1985,9 +1988,10 @@ The process's stdin is sourced from the $stdin scalar reference, and its stdout and stderr go to the $stdout scalar reference. ptys are used so that psql thinks it's being called interactively. -The specified timer object is attached to the harness, as well. -It's caller's responsibility to select the timeout length, and to -restart the timer after each command if the timeout is per-command. +The specified timer object is attached to the harness, as well. It's caller's +responsibility to set the timeout length (usually +$PostgreSQL::Test::Utils::timeout_default), and to restart the timer after +each command if the timeout is per-command. psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc> disabled. That may be overridden by passing extra psql parameters. @@ -2303,7 +2307,7 @@ sub connect_fails Run B<$query> repeatedly, until it returns the B<$expected> result ('t', or SQL boolean true, by default). Continues polling if B returns an error result. -Times out after 180 seconds. +Times out after $PostgreSQL::Test::Utils::timeout_default seconds. Returns 1 if successful, 0 if timed out. =cut @@ -2321,7 +2325,7 @@ sub poll_query_until '-d', $self->connstr($dbname) ]; my ($stdout, $stderr); - my $max_attempts = 180 * 10; + my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; my $attempts = 0; while ($attempts < $max_attempts) @@ -2343,8 +2347,8 @@ sub poll_query_until $attempts++; } - # The query result didn't change in 180 seconds. Give up. Print the - # output from the last attempt, hopefully that's useful for debugging. + # Give up. Print the output from the last attempt, hopefully that's useful + # for debugging. diag qq(poll_query_until timed out executing this query: $query expecting this output: @@ -2657,7 +2661,7 @@ sub wait_for_slot_catchup Waits for the contents of the server log file, starting at the given offset, to match the supplied regular expression. Checks the entire log if no offset is -given. Times out after 180 seconds. +given. Times out after $PostgreSQL::Test::Utils::timeout_default seconds. If successful, returns the length of the entire log file, in bytes. @@ -2668,7 +2672,7 @@ sub wait_for_log my ($self, $regexp, $offset) = @_; $offset = 0 unless defined $offset; - my $max_attempts = 180 * 10; + my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; my $attempts = 0; while ($attempts < $max_attempts) @@ -2683,7 +2687,6 @@ sub wait_for_log $attempts++; } - # The logs didn't match within 180 seconds. Give up. croak "timed out waiting for match: $regexp"; } diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index 46cd746796..15b314d1f8 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -91,8 +91,8 @@ our @EXPORT = qw( $use_unix_sockets ); -our ($windows_os, $is_msys2, $use_unix_sockets, $tmp_check, $log_path, - $test_logfile); +our ($windows_os, $is_msys2, $use_unix_sockets, $timeout_default, + $tmp_check, $log_path, $test_logfile); BEGIN { @@ -157,6 +157,10 @@ BEGIN # supported, but it can be overridden if desired. $use_unix_sockets = (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS}); + + $timeout_default = $ENV{PG_TEST_TIMEOUT_DEFAULT}; + $timeout_default = 180 + if not defined $timeout_default or $timeout_default eq ''; } =pod diff --git a/src/test/perl/README b/src/test/perl/README index 0511c55e9a..4b160cce36 100644 --- a/src/test/perl/README +++ b/src/test/perl/README @@ -23,6 +23,12 @@ tmp_check/log/ to get more info. Files named 'regress_log_XXX' are log output from the perl test scripts themselves, and should be examined first. Other files are postmaster logs, and may be helpful as additional data. +The tests default to a timeout of 180 seconds for many individual operations. +Slow hosts may avoid load-induced, spurious failures by setting environment +variable PG_TEST_TIMEOUT_DEFAULT to some number of seconds greater than 180. +Developers may see faster failures by setting that environment variable to +some lesser number of seconds. + Data directories will also be left behind for analysis when a test fails; they are named according to the test filename. But if the environment variable PG_TEST_NOCLEAN is set, data directories will be retained diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl index 25dd5ee7ec..e8e1a420bc 100644 --- a/src/test/recovery/t/003_recovery_targets.pl +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -172,8 +172,8 @@ sub test_recovery_standby $node_standby->logfile, 'start' ]); -# wait up to 180s for postgres to terminate -foreach my $i (0 .. 1800) +# wait for postgres to terminate +foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default) { last if !-f $node_standby->data_dir . '/postmaster.pid'; usleep(100_000); diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index fa6bd45332..9cec2792fc 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -107,7 +107,8 @@ ); my $stdout_recv = $node_primary->pg_recvlogical_upto( - 'postgres', 'test_slot', $endpos, 180, + 'postgres', 'test_slot', $endpos, + $PostgreSQL::Test::Utils::timeout_default, 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); @@ -119,7 +120,8 @@ ) or die "slot never became inactive"; $stdout_recv = $node_primary->pg_recvlogical_upto( - 'postgres', 'test_slot', $endpos, 180, + 'postgres', 'test_slot', $endpos, + $PostgreSQL::Test::Utils::timeout_default, 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl index 6e8b0b1b96..01ff31e61f 100644 --- a/src/test/recovery/t/010_logical_decoding_timelines.pl +++ b/src/test/recovery/t/010_logical_decoding_timelines.pl @@ -157,7 +157,7 @@ ($ret, $stdout, $stderr) = $node_replica->psql( 'postgres', "SELECT data FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');", - timeout => 180); + timeout => $PostgreSQL::Test::Utils::timeout_default); is($ret, 0, 'replay from slot before_basebackup succeeds'); my $final_expected_output_bb = q(BEGIN @@ -186,7 +186,7 @@ BEGIN $stdout = $node_replica->pg_recvlogical_upto( 'postgres', 'before_basebackup', - $endpos, 180, + $endpos, $PostgreSQL::Test::Utils::timeout_default, 'include-xids' => '0', 'skip-empty-xacts' => '1'); diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index 3976e339c0..44c4c62cb7 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -18,11 +18,7 @@ use Test::More; use Config; -# To avoid hanging while expecting some specific input from a psql -# instance being driven by us, add a timeout high enough that it -# should never trigger even on very slow machines, unless something -# is really wrong. -my $psql_timeout = IPC::Run::timer(60); +my $psql_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default); my $node = PostgreSQL::Test::Cluster->new('primary'); $node->init(allows_streaming => 1); diff --git a/src/test/recovery/t/017_shm.pl b/src/test/recovery/t/017_shm.pl index 88f9e2b9cd..713e6f068d 100644 --- a/src/test/recovery/t/017_shm.pl +++ b/src/test/recovery/t/017_shm.pl @@ -132,7 +132,7 @@ sub log_ipcs \$stdout, '2>', \$stderr, - IPC::Run::timeout(900)); # five times the poll_query_until timeout + IPC::Run::timeout(5 * $PostgreSQL::Test::Utils::timeout_default)); ok( $gnat->poll_query_until( 'postgres', "SELECT 1 FROM pg_stat_activity WHERE query = '$slow_query'", '1'), @@ -143,10 +143,11 @@ sub log_ipcs unlink($gnat->data_dir . '/postmaster.pid'); $gnat->rotate_logfile; # on Windows, can't open old log for writing log_ipcs(); -# Reject ordinary startup. Retry for the same reasons poll_start() does. +# Reject ordinary startup. Retry for the same reasons poll_start() does, +# every 0.1s for at least $PostgreSQL::Test::Utils::timeout_default seconds. my $pre_existing_msg = qr/pre-existing shared memory block/; { - my $max_attempts = 180 * 10; # Retry every 0.1s for at least 180s. + my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; my $attempts = 0; while ($attempts < $max_attempts) { @@ -193,7 +194,7 @@ sub poll_start { my ($node) = @_; - my $max_attempts = 180 * 10; + my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; my $attempts = 0; while ($attempts < $max_attempts) @@ -209,8 +210,8 @@ sub poll_start $attempts++; } - # No success within 180 seconds. Try one last time without fail_ok, which - # will BAIL_OUT unless it succeeds. + # Try one last time without fail_ok, which will BAIL_OUT unless it + # succeeds. $node->start && return 1; return 0; } diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index 9bb71b62c0..f62b7b32f6 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -291,7 +291,7 @@ SELECT pg_switch_wal(); CHECKPOINT; SELECT 'finished';", - timeout => '60')); + timeout => $PostgreSQL::Test::Utils::timeout_default)); is($result[1], 'finished', 'check if checkpoint command is not blocked'); $node_primary2->stop; @@ -362,7 +362,7 @@ kill 'STOP', $senderpid, $receiverpid; advance_wal($node_primary3, 2); -my $max_attempts = 180; +my $max_attempts = $PostgreSQL::Test::Utils::timeout_default; while ($max_attempts-- >= 0) { if (find_in_log( @@ -385,7 +385,7 @@ "lost") or die "timed out waiting for slot to be lost"; -$max_attempts = 180; +$max_attempts = $PostgreSQL::Test::Utils::timeout_default; while ($max_attempts-- >= 0) { if (find_in_log( diff --git a/src/test/recovery/t/021_row_visibility.pl b/src/test/recovery/t/021_row_visibility.pl index 75cd487451..55d8c31b56 100644 --- a/src/test/recovery/t/021_row_visibility.pl +++ b/src/test/recovery/t/021_row_visibility.pl @@ -32,11 +32,8 @@ $node_standby->append_conf('postgresql.conf', 'max_prepared_transactions=10'); $node_standby->start; -# To avoid hanging while expecting some specific input from a psql -# instance being driven by us, add a timeout high enough that it -# should never trigger even on very slow machines, unless something -# is really wrong. -my $psql_timeout = IPC::Run::timer(300); +my $psql_timeout = + IPC::Run::timer(2 * $PostgreSQL::Test::Utils::timeout_default); # One psql to primary and standby each, for all queries. That allows # to check uncommitted changes being replicated and such. diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl index 49dd86e848..36906b4aca 100644 --- a/src/test/recovery/t/022_crash_temp_files.pl +++ b/src/test/recovery/t/022_crash_temp_files.pl @@ -15,11 +15,7 @@ exit; } -# To avoid hanging while expecting some specific input from a psql -# instance being driven by us, add a timeout high enough that it -# should never trigger even on very slow machines, unless something -# is really wrong. -my $psql_timeout = IPC::Run::timer(60); +my $psql_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default); my $node = PostgreSQL::Test::Cluster->new('node_crash'); $node->init(); diff --git a/src/test/recovery/t/024_archive_recovery.pl b/src/test/recovery/t/024_archive_recovery.pl index c10bb5bf70..ce347e0cd1 100644 --- a/src/test/recovery/t/024_archive_recovery.pl +++ b/src/test/recovery/t/024_archive_recovery.pl @@ -81,8 +81,8 @@ sub test_recovery_wal_level_minimal $recovery_node->logfile, 'start' ]); - # Wait up to 180s for postgres to terminate - foreach my $i (0 .. 1800) + # wait for postgres to terminate + foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default) { last if !-f $recovery_node->data_dir . '/postmaster.pid'; usleep(100_000); diff --git a/src/test/subscription/t/015_stream.pl b/src/test/subscription/t/015_stream.pl index 9f221fc78c..6561b189de 100644 --- a/src/test/subscription/t/015_stream.pl +++ b/src/test/subscription/t/015_stream.pl @@ -58,7 +58,7 @@ my $in = ''; my $out = ''; -my $timer = IPC::Run::timeout(180); +my $timer = IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); my $h = $node_publisher->background_psql('postgres', \$in, \$out, $timer, on_error_stop => 0); From 766075105c21442418359221e0a0da43040b273c Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Fri, 4 Mar 2022 18:53:13 -0800 Subject: [PATCH 089/108] Use PG_TEST_TIMEOUT_DEFAULT for pg_regress suite non-elapsing timeouts. Currently, only contrib/test_decoding has this property. Use \getenv to load the timeout value. Discussion: https://postgr.es/m/20220218052842.GA3627003@rfd.leadboat.com --- contrib/test_decoding/expected/twophase.out | 5 ++++- contrib/test_decoding/sql/twophase.sql | 5 ++++- src/test/regress/expected/stats.out | 5 ++++- src/test/regress/sql/stats.sql | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/contrib/test_decoding/expected/twophase.out b/contrib/test_decoding/expected/twophase.out index e5e0f96896..e89dc74a5e 100644 --- a/contrib/test_decoding/expected/twophase.out +++ b/contrib/test_decoding/expected/twophase.out @@ -137,7 +137,10 @@ WHERE locktype = 'relation' (3 rows) -- The above CLUSTER command shouldn't cause a timeout on 2pc decoding. -SET statement_timeout = '180s'; +\set env_timeout '' +\getenv env_timeout PG_TEST_TIMEOUT_DEFAULT +SELECT COALESCE(NULLIF(:'env_timeout', ''), '180') || 's' AS timeout \gset +SET statement_timeout = :'timeout'; SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data --------------------------------------------------------------------------- diff --git a/contrib/test_decoding/sql/twophase.sql b/contrib/test_decoding/sql/twophase.sql index 05f18e8494..aff5114eb1 100644 --- a/contrib/test_decoding/sql/twophase.sql +++ b/contrib/test_decoding/sql/twophase.sql @@ -69,7 +69,10 @@ FROM pg_locks WHERE locktype = 'relation' AND relation = 'test_prepared1'::regclass; -- The above CLUSTER command shouldn't cause a timeout on 2pc decoding. -SET statement_timeout = '180s'; +\set env_timeout '' +\getenv env_timeout PG_TEST_TIMEOUT_DEFAULT +SELECT COALESCE(NULLIF(:'env_timeout', ''), '180') || 's' AS timeout \gset +SET statement_timeout = :'timeout'; SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); RESET statement_timeout; COMMIT PREPARED 'test_prepared_lock'; diff --git a/src/test/regress/expected/stats.out b/src/test/regress/expected/stats.out index 3e9ab0915f..b7416c8f8f 100644 --- a/src/test/regress/expected/stats.out +++ b/src/test/regress/expected/stats.out @@ -34,7 +34,10 @@ declare updated3 bool; updated4 bool; begin - -- we don't want to wait forever; loop will exit after 30 seconds + -- We don't want to wait forever. No timeout suffices if the OS drops our + -- stats traffic because an earlier test file left a full UDP buffer. + -- Hence, don't use PG_TEST_TIMEOUT_DEFAULT, which may be large for + -- can't-happen timeouts. Exit after 30 seconds. for i in 1 .. 300 loop -- With parallel query, the seqscan and indexscan on tenk2 might be done diff --git a/src/test/regress/sql/stats.sql b/src/test/regress/sql/stats.sql index 82e6f24c39..dbc2dd28b6 100644 --- a/src/test/regress/sql/stats.sql +++ b/src/test/regress/sql/stats.sql @@ -33,7 +33,10 @@ declare updated3 bool; updated4 bool; begin - -- we don't want to wait forever; loop will exit after 30 seconds + -- We don't want to wait forever. No timeout suffices if the OS drops our + -- stats traffic because an earlier test file left a full UDP buffer. + -- Hence, don't use PG_TEST_TIMEOUT_DEFAULT, which may be large for + -- can't-happen timeouts. Exit after 30 seconds. for i in 1 .. 300 loop -- With parallel query, the seqscan and indexscan on tenk2 might be done From 770011e3f39f21f2095d3a044b72460c4efac345 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Fri, 4 Mar 2022 21:58:57 -0800 Subject: [PATCH 090/108] ci: windows: Work around cirrus-ci bug causing test failures. Will be reverted once fixed on cirrus's side. See also https://github.com/cirruslabs/cirrus-ci-agent/issues/218 Discussion: https://postgr.es/m/CA+hUKGKx7k14n2nAALSvv6M_AB6oHasNBA65X6Dvo8hwfi9y0A@mail.gmail.com --- .cirrus.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.cirrus.yml b/.cirrus.yml index d10b0a82f9..40854046d6 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -370,6 +370,11 @@ task: # "suites". T_C: "\"C:/Program Files/Git/usr/bin/timeout.exe\" -v -k60s 15m" + # Temporarily work around cirrus-ci bug causing processes started from a + # script to be killed, even if intentionally running in background. See + # https://github.com/cirruslabs/cirrus-ci-agent/issues/218 + # https://postgr.es/m/CA%2BhUKGKx7k14n2nAALSvv6M_AB6oHasNBA65X6Dvo8hwfi9y0A%40mail.gmail.com + CIRRUS_AGENT_VERSION: 1.73.2 only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*windows.*' From 9e98583898c347e007958c8a09911be2ea4acfb9 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Mon, 7 Mar 2022 10:26:29 +0900 Subject: [PATCH 091/108] Create routine able to set single-call SRFs for Materialize mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set-returning functions that use the Materialize mode, creating a tuplestore to include all the tuples returned in a set rather than doing so in multiple calls, use roughly the same set of steps to prepare ReturnSetInfo for this job: - Check if ReturnSetInfo supports returning a tuplestore and if the materialize mode is enabled. - Create a tuplestore for all the tuples part of the returned set in the per-query memory context, stored in ReturnSetInfo->setResult. - Build a tuple descriptor mostly from get_call_result_type(), then stored in ReturnSetInfo->setDesc. Note that there are some cases where the SRF's tuple descriptor has to be the one specified by the function caller. This refactoring is done so as there are (well, should be) no behavior changes in any of the in-core functions refactored, and the centralized function that checks and sets up the function's ReturnSetInfo can be controlled with a set of bits32 options. Two of them prove to be necessary now: - SRF_SINGLE_USE_EXPECTED to use expectedDesc as tuple descriptor, as expected by the function's caller. - SRF_SINGLE_BLESS to validate the tuple descriptor for the SRF. The same initialization pattern is simplified in 28 places per my count as of src/backend/, shaving up to ~900 lines of code. These mostly come from the removal of the per-query initializations and the sanity checks now grouped in a single location. There are more locations that could be simplified in contrib/, that are left for a follow-up cleanup. fcc2817, 07daca5 and d61a361 have prepared the areas of the code related to this change, to ease this refactoring. Author: Melanie Plageman, Michael Paquier Reviewed-by: Álvaro Herrera, Justin Pryzby Discussion: https://postgr.es/m/CAAKRu_azyd1Z3W_r7Ou4sorTjRCs+PxeHw1CWJeXKofkE6TuZg@mail.gmail.com --- src/backend/commands/event_trigger.c | 62 +------- src/backend/commands/extension.c | 93 ++---------- src/backend/commands/prepare.c | 34 +---- src/backend/foreign/foreign.c | 34 +---- src/backend/libpq/hba.c | 46 ++---- src/backend/replication/logical/launcher.c | 31 +--- .../replication/logical/logicalfuncs.c | 21 +-- src/backend/replication/logical/origin.c | 32 +--- src/backend/replication/slotfuncs.c | 31 +--- src/backend/replication/walsender.c | 31 +--- src/backend/storage/ipc/shmem.c | 35 +---- src/backend/utils/adt/datetime.c | 30 +--- src/backend/utils/adt/genfile.c | 61 +------- src/backend/utils/adt/jsonfuncs.c | 141 ++---------------- src/backend/utils/adt/mcxtfuncs.c | 33 +--- src/backend/utils/adt/misc.c | 33 +--- src/backend/utils/adt/pgstatfuncs.c | 91 +---------- src/backend/utils/adt/varlena.c | 27 +--- src/backend/utils/fmgr/README | 4 + src/backend/utils/fmgr/funcapi.c | 69 +++++++++ src/backend/utils/misc/guc.c | 31 +--- src/backend/utils/misc/pg_config.c | 29 +--- src/backend/utils/mmgr/portalmem.c | 37 +---- src/include/funcapi.h | 12 +- 24 files changed, 176 insertions(+), 872 deletions(-) diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 1e8587502e..3c3fc2515b 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -1290,10 +1290,6 @@ Datum pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; slist_iter iter; /* @@ -1306,30 +1302,8 @@ pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS) errmsg("%s can only be called in a sql_drop event trigger function", "pg_event_trigger_dropped_objects()"))); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); slist_foreach(iter, &(currentEventTriggerState->SQLDropList)) { @@ -1398,7 +1372,8 @@ pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS) nulls[i++] = true; } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } return (Datum) 0; @@ -1846,10 +1821,6 @@ Datum pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; ListCell *lc; /* @@ -1861,30 +1832,8 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) errmsg("%s can only be called in an event trigger function", "pg_event_trigger_ddl_commands()"))); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); foreach(lc, currentEventTriggerState->commandList) { @@ -2055,7 +2004,8 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) break; } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } PG_RETURN_VOID(); diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 42503ef454..1013790dbb 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -1932,38 +1932,12 @@ Datum pg_available_extensions(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; char *location; DIR *dir; struct dirent *de; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); location = get_extension_control_directory(); dir = AllocateDir(location); @@ -2015,7 +1989,8 @@ pg_available_extensions(PG_FUNCTION_ARGS) else values[2] = CStringGetTextDatum(control->comment); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } FreeDir(dir); @@ -2037,38 +2012,12 @@ Datum pg_available_extension_versions(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; char *location; DIR *dir; struct dirent *de; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); location = get_extension_control_directory(); dir = AllocateDir(location); @@ -2103,7 +2052,8 @@ pg_available_extension_versions(PG_FUNCTION_ARGS) control = read_extension_control_file(extname); /* scan extension's script directory for install scripts */ - get_available_versions_for_extension(control, tupstore, tupdesc); + get_available_versions_for_extension(control, rsinfo->setResult, + rsinfo->setDesc); } FreeDir(dir); @@ -2316,10 +2266,6 @@ pg_extension_update_paths(PG_FUNCTION_ARGS) { Name extname = PG_GETARG_NAME(0); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; List *evi_list; ExtensionControlFile *control; ListCell *lc1; @@ -2327,30 +2273,8 @@ pg_extension_update_paths(PG_FUNCTION_ARGS) /* Check extension name validity before any filesystem access */ check_valid_extension_name(NameStr(*extname)); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* Read the extension's control file */ control = read_extension_control_file(NameStr(*extname)); @@ -2407,7 +2331,8 @@ pg_extension_update_paths(PG_FUNCTION_ARGS) pfree(pathbuf.data); } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } } diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index dce30aed6c..d2d8ee120c 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -702,41 +702,12 @@ Datum pg_prepared_statement(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - /* need to build tuplestore in query context */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); /* * We put all the tuples into a tuplestore in one scan of the hashtable. * This avoids any issue of the hashtable possibly changing between calls. */ - tupstore = - tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, - false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - /* generate junk in short-term context */ - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* hash table might be uninitialized */ if (prepared_queries) @@ -761,7 +732,8 @@ pg_prepared_statement(PG_FUNCTION_ARGS) values[5] = Int64GetDatumFast(prep_stmt->plansource->num_generic_plans); values[6] = Int64GetDatumFast(prep_stmt->plansource->num_custom_plans); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } } diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c index c3406c3b9d..cf222fc3e9 100644 --- a/src/backend/foreign/foreign.c +++ b/src/backend/foreign/foreign.c @@ -20,6 +20,7 @@ #include "catalog/pg_user_mapping.h" #include "foreign/fdwapi.h" #include "foreign/foreign.h" +#include "funcapi.h" #include "lib/stringinfo.h" #include "miscadmin.h" #include "utils/builtins.h" @@ -510,38 +511,12 @@ pg_options_to_table(PG_FUNCTION_ARGS) ListCell *cell; List *options; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize) || - rsinfo->expectedDesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); options = untransformRelOptions(array); rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - /* - * Now prepare the result set. - */ - tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + /* prepare the result set */ + SetSingleFuncCall(fcinfo, SRF_SINGLE_USE_EXPECTED); foreach(cell, options) { @@ -561,7 +536,8 @@ pg_options_to_table(PG_FUNCTION_ARGS) values[1] = (Datum) 0; nulls[1] = true; } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } return (Datum) 0; diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index d84a40b726..90953c38f3 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -1685,8 +1685,8 @@ parse_hba_line(TokenizedLine *tok_line, int elevel) if (parsedline->auth_method == uaCert) { /* - * For auth method cert, client certificate validation is mandatory, and it implies - * the level of verify-full. + * For auth method cert, client certificate validation is mandatory, + * and it implies the level of verify-full. */ parsedline->clientcert = clientCertFull; } @@ -2703,47 +2703,19 @@ fill_hba_view(Tuplestorestate *tuple_store, TupleDesc tupdesc) Datum pg_hba_file_rules(PG_FUNCTION_ARGS) { - Tuplestorestate *tuple_store; - TupleDesc tupdesc; - MemoryContext old_cxt; ReturnSetInfo *rsi; /* - * We must use the Materialize mode to be safe against HBA file changes - * while the cursor is open. It's also more efficient than having to look - * up our current position in the parsed list every time. + * Build tuplestore to hold the result rows. We must use the Materialize + * mode to be safe against HBA file changes while the cursor is open. + * It's also more efficient than having to look up our current position in + * the parsed list every time. */ - rsi = (ReturnSetInfo *) fcinfo->resultinfo; - - /* Check to see if caller supports us returning a tuplestore */ - if (rsi == NULL || !IsA(rsi, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsi->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - rsi->returnMode = SFRM_Materialize; - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - /* Build tuplestore to hold the result rows */ - old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); - - tuple_store = - tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize_Random, - false, work_mem); - rsi->setDesc = tupdesc; - rsi->setResult = tuple_store; - - MemoryContextSwitchTo(old_cxt); + SetSingleFuncCall(fcinfo, 0); /* Fill the tuplestore */ - fill_hba_view(tuple_store, tupdesc); + rsi = (ReturnSetInfo *) fcinfo->resultinfo; + fill_hba_view(rsi->setResult, rsi->setDesc); PG_RETURN_NULL(); } diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 5a68d6dead..6f25b2c2ad 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -930,34 +930,8 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS) Oid subid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); int i; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* Make sure we get consistent view of the workers. */ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); @@ -1010,7 +984,8 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS) else values[7] = TimestampTzGetDatum(worker.reply_time); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); /* * If only a single subscription was requested, and we found it, diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c index 3bd770a3ba..6058d36e0d 100644 --- a/src/backend/replication/logical/logicalfuncs.c +++ b/src/backend/replication/logical/logicalfuncs.c @@ -142,25 +142,11 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin errmsg("options array must not be null"))); arr = PG_GETARG_ARRAYTYPE_P(3); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - /* state to write output to */ p = palloc0(sizeof(DecodingOutputState)); p->binary_output = binary; - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &p->tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); @@ -203,10 +189,9 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin } } - p->tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = p->tupstore; - rsinfo->setDesc = p->tupdesc; + SetSingleFuncCall(fcinfo, 0); + p->tupstore = rsinfo->setResult; + p->tupdesc = rsinfo->setDesc; /* * Compute the current end-of-wal. diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 76055a8a03..0e38eff0f0 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -1482,40 +1482,13 @@ Datum pg_show_replication_origin_status(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; int i; #define REPLICATION_ORIGIN_PROGRESS_COLS 4 /* we want to return 0 rows if slot is set to zero */ replorigin_check_prerequisites(false, true); - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - if (tupdesc->natts != REPLICATION_ORIGIN_PROGRESS_COLS) - elog(ERROR, "wrong function definition"); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); - + SetSingleFuncCall(fcinfo, 0); /* prevent slots from being concurrently dropped */ LWLockAcquire(ReplicationOriginLock, LW_SHARED); @@ -1565,7 +1538,8 @@ pg_show_replication_origin_status(PG_FUNCTION_ARGS) LWLockRelease(&state->lock); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } LWLockRelease(ReplicationOriginLock); diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 886899afd2..ca945994ef 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -233,42 +233,16 @@ pg_get_replication_slots(PG_FUNCTION_ARGS) { #define PG_GET_REPLICATION_SLOTS_COLS 14 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; XLogRecPtr currlsn; int slotno; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - /* * We don't require any special permission to see this function's data * because nothing should be sensitive. The most critical being the slot * name, which shouldn't contain anything particularly sensitive. */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); currlsn = GetXLogWriteRecPtr(); @@ -431,7 +405,8 @@ pg_get_replication_slots(PG_FUNCTION_ARGS) Assert(i == PG_GET_REPLICATION_SLOTS_COLS); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } LWLockRelease(ReplicationSlotControlLock); diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 5a718b1fe9..2d0292a092 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -3403,37 +3403,11 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS) { #define PG_STAT_GET_WAL_SENDERS_COLS 12 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; SyncRepStandbyData *sync_standbys; int num_standbys; int i; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* * Get the currently active synchronous standbys. This could be out of @@ -3577,7 +3551,8 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS) values[11] = TimestampTzGetDatum(replyTime); } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } return (Datum) 0; diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 1f023a3460..c1279960cd 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -537,39 +537,13 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) { #define PG_GET_SHMEM_SIZES_COLS 4 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; HASH_SEQ_STATUS hstat; ShmemIndexEnt *ent; Size named_allocated = 0; Datum values[PG_GET_SHMEM_SIZES_COLS]; bool nulls[PG_GET_SHMEM_SIZES_COLS]; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); LWLockAcquire(ShmemIndexLock, LW_SHARED); @@ -585,7 +559,8 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) values[3] = Int64GetDatum(ent->allocated_size); named_allocated += ent->allocated_size; - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } /* output shared memory allocated but not counted via the shmem index */ @@ -593,7 +568,7 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) nulls[1] = true; values[2] = Int64GetDatum(ShmemSegHdr->freeoffset - named_allocated); values[3] = values[2]; - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); /* output as-of-yet unused shared memory */ nulls[0] = true; @@ -601,7 +576,7 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) nulls[1] = false; values[2] = Int64GetDatum(ShmemSegHdr->totalsize - ShmemSegHdr->freeoffset); values[3] = values[2]; - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); LWLockRelease(ShmemIndexLock); diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index 7926258c06..ba0ec35ac5 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -4786,9 +4786,6 @@ Datum pg_timezone_names(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - bool randomAccess; - TupleDesc tupdesc; - Tuplestorestate *tupstore; pg_tzenum *tzenum; pg_tz *tz; Datum values[4]; @@ -4799,31 +4796,8 @@ pg_timezone_names(PG_FUNCTION_ARGS) const char *tzn; Interval *resInterval; struct pg_tm itm; - MemoryContext oldcontext; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - randomAccess = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* initialize timezone scanning code */ tzenum = pg_tzenumerate_start(); @@ -4865,7 +4839,7 @@ pg_timezone_names(PG_FUNCTION_ARGS) values[3] = BoolGetDatum(tm.tm_isdst > 0); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } pg_tzenumerate_end(tzenum); diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index fe6863d8b4..1ed01620a1 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -477,12 +477,8 @@ pg_ls_dir(PG_FUNCTION_ARGS) char *location; bool missing_ok = false; bool include_dot_dirs = false; - bool randomAccess; - TupleDesc tupdesc; - Tuplestorestate *tupstore; DIR *dirdesc; struct dirent *de; - MemoryContext oldcontext; location = convert_and_check_filename(PG_GETARG_TEXT_PP(0)); @@ -495,29 +491,7 @@ pg_ls_dir(PG_FUNCTION_ARGS) include_dot_dirs = PG_GETARG_BOOL(2); } - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - tupdesc = CreateTemplateTupleDesc(1); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pg_ls_dir", TEXTOID, -1, 0); - - randomAccess = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, SRF_SINGLE_USE_EXPECTED); dirdesc = AllocateDir(location); if (!dirdesc) @@ -541,7 +515,8 @@ pg_ls_dir(PG_FUNCTION_ARGS) values[0] = CStringGetTextDatum(de->d_name); nulls[0] = false; - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } FreeDir(dirdesc); @@ -571,36 +546,10 @@ static Datum pg_ls_dir_files(FunctionCallInfo fcinfo, const char *dir, bool missing_ok) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - bool randomAccess; - TupleDesc tupdesc; - Tuplestorestate *tupstore; DIR *dirdesc; struct dirent *de; - MemoryContext oldcontext; - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - randomAccess = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* * Now walk the directory. Note that we must do this within a single SRF @@ -648,7 +597,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, const char *dir, bool missing_ok) values[2] = TimestampTzGetDatum(time_t_to_timestamptz(attrib.st_mtime)); memset(nulls, 0, sizeof(nulls)); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } FreeDir(dirdesc); diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 2457061f97..29664aa6e4 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -1909,9 +1909,6 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) { Jsonb *jb = PG_GETARG_JSONB_P(0); ReturnSetInfo *rsi; - Tuplestorestate *tuple_store; - TupleDesc tupdesc; - TupleDesc ret_tdesc; MemoryContext old_cxt, tmp_cxt; bool skipNested = false; @@ -1926,30 +1923,7 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) funcname))); rsi = (ReturnSetInfo *) fcinfo->resultinfo; - - if (!rsi || !IsA(rsi, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsi->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - rsi->returnMode = SFRM_Materialize; - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); - - ret_tdesc = CreateTupleDescCopy(tupdesc); - BlessTupleDesc(ret_tdesc); - tuple_store = - tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize_Random, - false, work_mem); - - MemoryContextSwitchTo(old_cxt); + SetSingleFuncCall(fcinfo, SRF_SINGLE_BLESS); tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "jsonb_each temporary cxt", @@ -1964,7 +1938,6 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) if (r == WJB_KEY) { text *key; - HeapTuple tuple; Datum values[2]; bool nulls[2] = {false, false}; @@ -2001,9 +1974,7 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) values[1] = PointerGetDatum(val); } - tuple = heap_form_tuple(ret_tdesc, values, nulls); - - tuplestore_puttuple(tuple_store, tuple); + tuplestore_putvalues(rsi->setResult, rsi->setDesc, values, nulls); /* clean up and switch back */ MemoryContextSwitchTo(old_cxt); @@ -2013,9 +1984,6 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) MemoryContextDelete(tmp_cxt); - rsi->setResult = tuple_store; - rsi->setDesc = ret_tdesc; - PG_RETURN_NULL(); } @@ -2027,8 +1995,6 @@ each_worker(FunctionCallInfo fcinfo, bool as_text) JsonLexContext *lex; JsonSemAction *sem; ReturnSetInfo *rsi; - MemoryContext old_cxt; - TupleDesc tupdesc; EachState *state; lex = makeJsonLexContext(json, true); @@ -2037,30 +2003,9 @@ each_worker(FunctionCallInfo fcinfo, bool as_text) rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - - if (!(rsi->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - rsi->returnMode = SFRM_Materialize; - - (void) get_call_result_type(fcinfo, NULL, &tupdesc); - - /* make these in a sufficiently long-lived memory context */ - old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); - - state->ret_tdesc = CreateTupleDescCopy(tupdesc); - BlessTupleDesc(state->ret_tdesc); - state->tuple_store = - tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize_Random, - false, work_mem); - - MemoryContextSwitchTo(old_cxt); + SetSingleFuncCall(fcinfo, SRF_SINGLE_BLESS); + state->tuple_store = rsi->setResult; + state->ret_tdesc = rsi->setDesc; sem->semstate = (void *) state; sem->array_start = each_array_start; @@ -2079,9 +2024,6 @@ each_worker(FunctionCallInfo fcinfo, bool as_text) MemoryContextDelete(state->tmp_cxt); - rsi->setResult = state->tuple_store; - rsi->setDesc = state->ret_tdesc; - PG_RETURN_NULL(); } @@ -2206,9 +2148,6 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, { Jsonb *jb = PG_GETARG_JSONB_P(0); ReturnSetInfo *rsi; - Tuplestorestate *tuple_store; - TupleDesc tupdesc; - TupleDesc ret_tdesc; MemoryContext old_cxt, tmp_cxt; bool skipNested = false; @@ -2227,31 +2166,8 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - - if (!(rsi->allowedModes & SFRM_Materialize) || - rsi->expectedDesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - rsi->returnMode = SFRM_Materialize; - - /* it's a simple type, so don't use get_call_result_type() */ - tupdesc = rsi->expectedDesc; - - old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); - - ret_tdesc = CreateTupleDescCopy(tupdesc); - BlessTupleDesc(ret_tdesc); - tuple_store = - tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize_Random, - false, work_mem); - - MemoryContextSwitchTo(old_cxt); + SetSingleFuncCall(fcinfo, + SRF_SINGLE_USE_EXPECTED | SRF_SINGLE_BLESS); tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "jsonb_array_elements temporary cxt", @@ -2265,7 +2181,6 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, if (r == WJB_ELEM) { - HeapTuple tuple; Datum values[1]; bool nulls[1] = {false}; @@ -2291,9 +2206,7 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, values[0] = PointerGetDatum(val); } - tuple = heap_form_tuple(ret_tdesc, values, nulls); - - tuplestore_puttuple(tuple_store, tuple); + tuplestore_putvalues(rsi->setResult, rsi->setDesc, values, nulls); /* clean up and switch back */ MemoryContextSwitchTo(old_cxt); @@ -2303,9 +2216,6 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, MemoryContextDelete(tmp_cxt); - rsi->setResult = tuple_store; - rsi->setDesc = ret_tdesc; - PG_RETURN_NULL(); } @@ -2330,41 +2240,15 @@ elements_worker(FunctionCallInfo fcinfo, const char *funcname, bool as_text) JsonLexContext *lex = makeJsonLexContext(json, as_text); JsonSemAction *sem; ReturnSetInfo *rsi; - MemoryContext old_cxt; - TupleDesc tupdesc; ElementsState *state; state = palloc0(sizeof(ElementsState)); sem = palloc0(sizeof(JsonSemAction)); + SetSingleFuncCall(fcinfo, SRF_SINGLE_USE_EXPECTED | SRF_SINGLE_BLESS); rsi = (ReturnSetInfo *) fcinfo->resultinfo; - - if (!rsi || !IsA(rsi, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - - if (!(rsi->allowedModes & SFRM_Materialize) || - rsi->expectedDesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - rsi->returnMode = SFRM_Materialize; - - /* it's a simple type, so don't use get_call_result_type() */ - tupdesc = rsi->expectedDesc; - - /* make these in a sufficiently long-lived memory context */ - old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); - - state->ret_tdesc = CreateTupleDescCopy(tupdesc); - BlessTupleDesc(state->ret_tdesc); - state->tuple_store = - tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize_Random, - false, work_mem); - - MemoryContextSwitchTo(old_cxt); + state->tuple_store = rsi->setResult; + state->ret_tdesc = rsi->setDesc; sem->semstate = (void *) state; sem->object_start = elements_object_start; @@ -2384,9 +2268,6 @@ elements_worker(FunctionCallInfo fcinfo, const char *funcname, bool as_text) MemoryContextDelete(state->tmp_cxt); - rsi->setResult = state->tuple_store; - rsi->setDesc = state->ret_tdesc; - PG_RETURN_NULL(); } diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c index c7c95adf97..bb7cc94024 100644 --- a/src/backend/utils/adt/mcxtfuncs.c +++ b/src/backend/utils/adt/mcxtfuncs.c @@ -120,36 +120,9 @@ Datum pg_get_backend_memory_contexts(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); - - PutMemoryContextsStatsTupleStore(tupstore, tupdesc, + + SetSingleFuncCall(fcinfo, 0); + PutMemoryContextsStatsTupleStore(rsinfo->setResult, rsinfo->setDesc, TopMemoryContext, NULL, 0); return (Datum) 0; diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index e79eb6b478..4568749d23 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -203,39 +203,11 @@ pg_tablespace_databases(PG_FUNCTION_ARGS) { Oid tablespaceOid = PG_GETARG_OID(0); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - bool randomAccess; - TupleDesc tupdesc; - Tuplestorestate *tupstore; char *location; DIR *dirdesc; struct dirent *de; - MemoryContext oldcontext; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - tupdesc = CreateTemplateTupleDesc(1); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pg_tablespace_databases", - OIDOID, -1, 0); - - randomAccess = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); - - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, SRF_SINGLE_USE_EXPECTED); if (tablespaceOid == GLOBALTABLESPACE_OID) { @@ -291,7 +263,8 @@ pg_tablespace_databases(PG_FUNCTION_ARGS) values[0] = ObjectIdGetDatum(datOid); nulls[0] = false; - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); } FreeDir(dirdesc); diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index fd993d0d5f..eff45b16f2 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -461,25 +461,7 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) int curr_backend; char *cmd = text_to_cstring(PG_GETARG_TEXT_PP(0)); ProgressCommandType cmdtype; - TupleDesc tupdesc; - Tuplestorestate *tupstore; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); /* Translate command name into command type code. */ if (pg_strcasecmp(cmd, "VACUUM") == 0) @@ -499,14 +481,7 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid command name: \"%s\"", cmd))); - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* 1-based index */ for (curr_backend = 1; curr_backend <= num_backends; curr_backend++) @@ -552,7 +527,7 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) nulls[i + 3] = true; } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } return (Datum) 0; @@ -569,34 +544,8 @@ pg_stat_get_activity(PG_FUNCTION_ARGS) int curr_backend; int pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* 1-based index */ for (curr_backend = 1; curr_backend <= num_backends; curr_backend++) @@ -629,7 +578,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS) nulls[5] = false; values[5] = CStringGetTextDatum(""); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); continue; } @@ -943,7 +892,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS) nulls[29] = true; } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); /* If only a single backend was requested, and we found it, break. */ if (pid != -1) @@ -1866,36 +1815,10 @@ pg_stat_get_slru(PG_FUNCTION_ARGS) { #define PG_STAT_GET_SLRU_COLS 9 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; int i; PgStat_SLRUStats *stats; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* request SLRU stats from the stat collector */ stats = pgstat_fetch_slru(); @@ -1927,7 +1850,7 @@ pg_stat_get_slru(PG_FUNCTION_ARGS) values[7] = Int64GetDatum(stat.truncate); values[8] = TimestampTzGetDatum(stat.stat_reset_timestamp); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } return (Datum) 0; diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index b2003f5672..22ab5a4329 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -24,6 +24,7 @@ #include "common/hashfn.h" #include "common/int.h" #include "common/unicode_norm.h" +#include "funcapi.h" #include "lib/hyperloglog.h" #include "libpq/pqformat.h" #include "miscadmin.h" @@ -4832,34 +4833,14 @@ text_to_table(PG_FUNCTION_ARGS) { ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo; SplitTextOutputData tstate; - MemoryContext old_cxt; - - /* check to see if caller supports us returning a tuplestore */ - if (rsi == NULL || !IsA(rsi, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsi->allowedModes & SFRM_Materialize) || - rsi->expectedDesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* OK, prepare tuplestore in per-query memory */ - old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); tstate.astate = NULL; - tstate.tupdesc = CreateTupleDescCopy(rsi->expectedDesc); - tstate.tupstore = tuplestore_begin_heap(true, false, work_mem); - - MemoryContextSwitchTo(old_cxt); + SetSingleFuncCall(fcinfo, SRF_SINGLE_USE_EXPECTED); + tstate.tupstore = rsi->setResult; + tstate.tupdesc = rsi->setDesc; (void) split_text(fcinfo, &tstate); - rsi->returnMode = SFRM_Materialize; - rsi->setResult = tstate.tupstore; - rsi->setDesc = tstate.tupdesc; - return (Datum) 0; } diff --git a/src/backend/utils/fmgr/README b/src/backend/utils/fmgr/README index 1e4c4b94a9..9d8848106d 100644 --- a/src/backend/utils/fmgr/README +++ b/src/backend/utils/fmgr/README @@ -305,6 +305,10 @@ If available, the expected tuple descriptor is passed in ReturnSetInfo; in other contexts the expectedDesc field will be NULL. The function need not pay attention to expectedDesc, but it may be useful in special cases. +SetSingleFuncCall() is a helper function able to setup the function's +ReturnSetInfo for a single call, filling in the Tuplestore and the +TupleDesc with the proper configuration for Materialize mode. + There is no support for functions accepting sets; instead, the function will be called multiple times, once for each element of the input set. diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index 5d913ae08d..d269662ad8 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -19,6 +19,7 @@ #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "funcapi.h" +#include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "utils/array.h" #include "utils/builtins.h" @@ -27,6 +28,7 @@ #include "utils/regproc.h" #include "utils/rel.h" #include "utils/syscache.h" +#include "utils/tuplestore.h" #include "utils/typcache.h" @@ -54,6 +56,73 @@ static bool resolve_polymorphic_tupdesc(TupleDesc tupdesc, static TypeFuncClass get_type_func_class(Oid typid, Oid *base_typeid); +/* + * SetSingleFuncCall + * + * Helper function to build the state of a set-returning function used + * in the context of a single call with materialize mode. This code + * includes sanity checks on ReturnSetInfo, creates the Tuplestore and + * the TupleDesc used with the function and stores them into the + * function's ReturnSetInfo. + * + * "flags" can be set to SRF_SINGLE_USE_EXPECTED, to use the tuple + * descriptor coming from expectedDesc, which is the tuple descriptor + * expected by the caller. SRF_SINGLE_BLESS can be set to complete the + * information associated to the tuple descriptor, which is necessary + * in some cases where the tuple descriptor comes from a transient + * RECORD datatype. + */ +void +SetSingleFuncCall(FunctionCallInfo fcinfo, bits32 flags) +{ + bool random_access; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + Tuplestorestate *tupstore; + MemoryContext old_context, + per_query_ctx; + TupleDesc stored_tupdesc; + + /* check to see if caller supports returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize) || + ((flags & SRF_SINGLE_USE_EXPECTED) != 0 && rsinfo->expectedDesc == NULL)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + + /* + * Store the tuplestore and the tuple descriptor in ReturnSetInfo. This + * must be done in the per-query memory context. + */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + old_context = MemoryContextSwitchTo(per_query_ctx); + + /* build a tuple descriptor for our result type */ + if ((flags & SRF_SINGLE_USE_EXPECTED) != 0) + stored_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); + else + { + if (get_call_result_type(fcinfo, NULL, &stored_tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + } + + /* If requested, bless the tuple descriptor */ + if ((flags & SRF_SINGLE_BLESS) != 0) + BlessTupleDesc(stored_tupdesc); + + random_access = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; + + tupstore = tuplestore_begin_heap(random_access, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = stored_tupdesc; + MemoryContextSwitchTo(old_context); +} + + /* * init_MultiFuncCall * Create an empty FuncCallContext data structure diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 1e3650184b..6d11f9c71b 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -10157,41 +10157,14 @@ show_all_file_settings(PG_FUNCTION_ARGS) { #define NUM_PG_FILE_SETTINGS_ATTS 7 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; ConfigVariable *conf; int seqno; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - /* Check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); /* Scan the config files using current context as workspace */ conf = ProcessConfigFileInternal(PGC_SIGHUP, false, DEBUG3); - /* Switch into long-lived context to construct returned data structures */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - /* Build a tuplestore to return our results in */ - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - /* The rest can be done in short-lived context */ - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* Process the results and create a tuplestore */ for (seqno = 1; conf != NULL; conf = conf->next, seqno++) @@ -10239,7 +10212,7 @@ show_all_file_settings(PG_FUNCTION_ARGS) nulls[6] = true; /* shove row into tuplestore */ - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } return (Datum) 0; diff --git a/src/backend/utils/misc/pg_config.c b/src/backend/utils/misc/pg_config.c index e646a41910..d9e18caf44 100644 --- a/src/backend/utils/misc/pg_config.c +++ b/src/backend/utils/misc/pg_config.c @@ -25,35 +25,12 @@ Datum pg_config(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - Tuplestorestate *tupstore; - TupleDesc tupdesc; - MemoryContext oldcontext; ConfigData *configdata; size_t configdata_len; int i = 0; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - /* Build tuplestore to hold the result rows */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + /* initialize our tuplestore */ + SetSingleFuncCall(fcinfo, 0); configdata = get_configdata(my_exec_path, &configdata_len); for (i = 0; i < configdata_len; i++) @@ -67,7 +44,7 @@ pg_config(PG_FUNCTION_ARGS) values[0] = CStringGetTextDatum(configdata[i].name); values[1] = CStringGetTextDatum(configdata[i].setting); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } return (Datum) 0; diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index afc03682d9..d549f66d4a 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -1132,43 +1132,14 @@ Datum pg_cursor(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; HASH_SEQ_STATUS hash_seq; PortalHashEnt *hentry; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* need to build tuplestore in query context */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - /* * We put all the tuples into a tuplestore in one scan of the hashtable. * This avoids any issue of the hashtable possibly changing between calls. */ - tupstore = - tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, - false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - /* generate junk in short-term context */ - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); hash_seq_init(&hash_seq, PortalHashTable); while ((hentry = hash_seq_search(&hash_seq)) != NULL) @@ -1190,13 +1161,9 @@ pg_cursor(PG_FUNCTION_ARGS) values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL); values[5] = TimestampTzGetDatum(portal->creation_time); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - return (Datum) 0; } diff --git a/src/include/funcapi.h b/src/include/funcapi.h index ba927c2f33..dc3d819a1c 100644 --- a/src/include/funcapi.h +++ b/src/include/funcapi.h @@ -278,14 +278,20 @@ extern Datum HeapTupleHeaderGetDatum(HeapTupleHeader tuple); * memory allocated in multi_call_memory_ctx, but holding file descriptors or * other non-memory resources open across calls is a bug. SRFs that need * such resources should not use these macros, but instead populate a - * tuplestore during a single call, and return that using SFRM_Materialize - * mode (see fmgr/README). Alternatively, set up a callback to release - * resources at query shutdown, using RegisterExprContextCallback(). + * tuplestore during a single call, as set up by SetSingleFuncCall() (see + * fmgr/README). Alternatively, set up a callback to release resources + * at query shutdown, using RegisterExprContextCallback(). * *---------- */ /* from funcapi.c */ + +/* flag bits for SetSingleFuncCall() */ +#define SRF_SINGLE_USE_EXPECTED 0x01 /* use expectedDesc as tupdesc */ +#define SRF_SINGLE_BLESS 0x02 /* validate tuple for SRF */ +extern void SetSingleFuncCall(FunctionCallInfo fcinfo, bits32 flags); + extern FuncCallContext *init_MultiFuncCall(PG_FUNCTION_ARGS); extern FuncCallContext *per_MultiFuncCall(PG_FUNCTION_ARGS); extern void end_MultiFuncCall(PG_FUNCTION_ARGS, FuncCallContext *funcctx); From 5e0e99a80b2f41c8e9ed0f4071892d9e797a12be Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Mon, 7 Mar 2022 08:33:58 +0530 Subject: [PATCH 092/108] Make the errcontext message in logical replication worker translation friendly. Previously, the message for logical replication worker errcontext is incrementally built, which was not translation friendly. Instead, we use complete sentences with if-else branches. We also remove the commit timestamp from the context message since it's not important information and made the message long. Author: Masahiko Sawada Reviewed-by: Takamichi Osumi, and Amit Kapila Discussion: https://postgr.es/m/CAD21AoBarBf2oTF71ig2g_o=3Z_Dt6_sOpMQma1kFgbnA5OZ_w@mail.gmail.com --- src/backend/replication/logical/worker.c | 73 +++++++++++------------- 1 file changed, 33 insertions(+), 40 deletions(-) diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 7e267f7960..92aa794706 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -226,7 +226,6 @@ typedef struct ApplyErrorCallbackArg /* Remote node information */ int remote_attnum; /* -1 if invalid */ TransactionId remote_xid; - TimestampTz ts; /* commit, rollback, or prepare timestamp */ } ApplyErrorCallbackArg; static ApplyErrorCallbackArg apply_error_callback_arg = @@ -235,7 +234,6 @@ static ApplyErrorCallbackArg apply_error_callback_arg = .rel = NULL, .remote_attnum = -1, .remote_xid = InvalidTransactionId, - .ts = 0, }; static MemoryContext ApplyMessageContext = NULL; @@ -334,7 +332,7 @@ static void apply_spooled_messages(TransactionId xid, XLogRecPtr lsn); /* Functions for apply error callback */ static void apply_error_callback(void *arg); -static inline void set_apply_error_context_xact(TransactionId xid, TimestampTz ts); +static inline void set_apply_error_context_xact(TransactionId xid); static inline void reset_apply_error_context_info(void); /* @@ -787,7 +785,7 @@ apply_handle_begin(StringInfo s) LogicalRepBeginData begin_data; logicalrep_read_begin(s, &begin_data); - set_apply_error_context_xact(begin_data.xid, begin_data.committime); + set_apply_error_context_xact(begin_data.xid); remote_final_lsn = begin_data.final_lsn; @@ -839,7 +837,7 @@ apply_handle_begin_prepare(StringInfo s) errmsg_internal("tablesync worker received a BEGIN PREPARE message"))); logicalrep_read_begin_prepare(s, &begin_data); - set_apply_error_context_xact(begin_data.xid, begin_data.prepare_time); + set_apply_error_context_xact(begin_data.xid); remote_final_lsn = begin_data.prepare_lsn; @@ -938,7 +936,7 @@ apply_handle_commit_prepared(StringInfo s) char gid[GIDSIZE]; logicalrep_read_commit_prepared(s, &prepare_data); - set_apply_error_context_xact(prepare_data.xid, prepare_data.commit_time); + set_apply_error_context_xact(prepare_data.xid); /* Compute GID for two_phase transactions. */ TwoPhaseTransactionGid(MySubscription->oid, prepare_data.xid, @@ -979,7 +977,7 @@ apply_handle_rollback_prepared(StringInfo s) char gid[GIDSIZE]; logicalrep_read_rollback_prepared(s, &rollback_data); - set_apply_error_context_xact(rollback_data.xid, rollback_data.rollback_time); + set_apply_error_context_xact(rollback_data.xid); /* Compute GID for two_phase transactions. */ TwoPhaseTransactionGid(MySubscription->oid, rollback_data.xid, @@ -1044,7 +1042,7 @@ apply_handle_stream_prepare(StringInfo s) errmsg_internal("tablesync worker received a STREAM PREPARE message"))); logicalrep_read_stream_prepare(s, &prepare_data); - set_apply_error_context_xact(prepare_data.xid, prepare_data.prepare_time); + set_apply_error_context_xact(prepare_data.xid); elog(DEBUG1, "received prepare for streamed transaction %u", prepare_data.xid); @@ -1126,7 +1124,7 @@ apply_handle_stream_start(StringInfo s) (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg_internal("invalid transaction ID in streamed replication transaction"))); - set_apply_error_context_xact(stream_xid, 0); + set_apply_error_context_xact(stream_xid); /* * Initialize the worker's stream_fileset if we haven't yet. This will be @@ -1215,7 +1213,7 @@ apply_handle_stream_abort(StringInfo s) */ if (xid == subxid) { - set_apply_error_context_xact(xid, 0); + set_apply_error_context_xact(xid); stream_cleanup_files(MyLogicalRepWorker->subid, xid); } else @@ -1241,7 +1239,7 @@ apply_handle_stream_abort(StringInfo s) bool found = false; char path[MAXPGPATH]; - set_apply_error_context_xact(subxid, 0); + set_apply_error_context_xact(subxid); subidx = -1; begin_replication_step(); @@ -1426,7 +1424,7 @@ apply_handle_stream_commit(StringInfo s) errmsg_internal("STREAM COMMIT message without STREAM STOP"))); xid = logicalrep_read_stream_commit(s, &commit_data); - set_apply_error_context_xact(xid, commit_data.committime); + set_apply_error_context_xact(xid); elog(DEBUG1, "received commit for streamed transaction %u", xid); @@ -3648,46 +3646,41 @@ IsLogicalWorker(void) static void apply_error_callback(void *arg) { - StringInfoData buf; ApplyErrorCallbackArg *errarg = &apply_error_callback_arg; if (apply_error_callback_arg.command == 0) return; - initStringInfo(&buf); - appendStringInfo(&buf, _("processing remote data during \"%s\""), - logicalrep_message_type(errarg->command)); - - /* append relation information */ - if (errarg->rel) - { - appendStringInfo(&buf, _(" for replication target relation \"%s.%s\""), - errarg->rel->remoterel.nspname, - errarg->rel->remoterel.relname); - if (errarg->remote_attnum >= 0) - appendStringInfo(&buf, _(" column \"%s\""), - errarg->rel->remoterel.attnames[errarg->remote_attnum]); - } - - /* append transaction information */ - if (TransactionIdIsNormal(errarg->remote_xid)) + if (errarg->rel == NULL) { - appendStringInfo(&buf, _(" in transaction %u"), errarg->remote_xid); - if (errarg->ts != 0) - appendStringInfo(&buf, _(" at %s"), - timestamptz_to_str(errarg->ts)); + if (!TransactionIdIsValid(errarg->remote_xid)) + errcontext("processing remote data during \"%s\"", + logicalrep_message_type(errarg->command)); + else + errcontext("processing remote data during \"%s\" in transaction %u", + logicalrep_message_type(errarg->command), + errarg->remote_xid); } - - errcontext("%s", buf.data); - pfree(buf.data); + else if (errarg->remote_attnum < 0) + errcontext("processing remote data during \"%s\" for replication target relation \"%s.%s\" in transaction %u", + logicalrep_message_type(errarg->command), + errarg->rel->remoterel.nspname, + errarg->rel->remoterel.relname, + errarg->remote_xid); + else + errcontext("processing remote data during \"%s\" for replication target relation \"%s.%s\" column \"%s\" in transaction %u", + logicalrep_message_type(errarg->command), + errarg->rel->remoterel.nspname, + errarg->rel->remoterel.relname, + errarg->rel->remoterel.attnames[errarg->remote_attnum], + errarg->remote_xid); } /* Set transaction information of apply error callback */ static inline void -set_apply_error_context_xact(TransactionId xid, TimestampTz ts) +set_apply_error_context_xact(TransactionId xid) { apply_error_callback_arg.remote_xid = xid; - apply_error_callback_arg.ts = ts; } /* Reset all information of apply error callback */ @@ -3697,5 +3690,5 @@ reset_apply_error_context_info(void) apply_error_callback_arg.command = 0; apply_error_callback_arg.rel = NULL; apply_error_callback_arg.remote_attnum = -1; - set_apply_error_context_xact(InvalidTransactionId, 0); + set_apply_error_context_xact(InvalidTransactionId); } From 25751f54b8e02a8fff62e9dbdbc9f2efbb4e8dc1 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Fri, 4 Mar 2022 14:49:37 +0100 Subject: [PATCH 093/108] Add pg_analyze_and_rewrite_varparams() This new function extracts common code from PrepareQuery() and exec_parse_message(). It is then exactly analogous to the existing pg_analyze_and_rewrite_fixedparams() and pg_analyze_and_rewrite_withcb(). To unify these two code paths, this makes PrepareQuery() now subject to log_parser_stats. Also, both paths now invoke TRACE_POSTGRESQL_QUERY_REWRITE_START(). PrepareQuery() no longer checks whether a utility statement was specified. The grammar doesn't allow that anyway, and exec_parse_message() supports it, so restricting it doesn't seem necessary. This also adds QueryEnvironment support to the *varparams functions, for consistency with its cousins, even though it is not used right now. Reviewed-by: Nathan Bossart Discussion: https://www.postgresql.org/message-id/flat/c67ce276-52b4-0239-dc0e-39875bf81840@enterprisedb.com --- src/backend/commands/prepare.c | 43 ++------------- src/backend/parser/analyze.c | 5 +- src/backend/tcop/postgres.c | 95 +++++++++++++++++++++++----------- src/include/parser/analyze.h | 2 +- src/include/tcop/tcopprot.h | 5 ++ 5 files changed, 78 insertions(+), 72 deletions(-) diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index d2d8ee120c..80738547ed 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -63,9 +63,7 @@ PrepareQuery(ParseState *pstate, PrepareStmt *stmt, CachedPlanSource *plansource; Oid *argtypes = NULL; int nargs; - Query *query; List *query_list; - int i; /* * Disallow empty-string statement name (conflicts with protocol-level @@ -97,6 +95,7 @@ PrepareQuery(ParseState *pstate, PrepareStmt *stmt, if (nargs) { + int i; ListCell *l; argtypes = (Oid *) palloc(nargs * sizeof(Oid)); @@ -115,44 +114,10 @@ PrepareQuery(ParseState *pstate, PrepareStmt *stmt, * Analyze the statement using these parameter types (any parameters * passed in from above us will not be visible to it), allowing * information about unknown parameters to be deduced from context. + * Rewrite the query. The result could be 0, 1, or many queries. */ - query = parse_analyze_varparams(rawstmt, pstate->p_sourcetext, - &argtypes, &nargs); - - /* - * Check that all parameter types were determined. - */ - for (i = 0; i < nargs; i++) - { - Oid argtype = argtypes[i]; - - if (argtype == InvalidOid || argtype == UNKNOWNOID) - ereport(ERROR, - (errcode(ERRCODE_INDETERMINATE_DATATYPE), - errmsg("could not determine data type of parameter $%d", - i + 1))); - } - - /* - * grammar only allows PreparableStmt, so this check should be redundant - */ - switch (query->commandType) - { - case CMD_SELECT: - case CMD_INSERT: - case CMD_UPDATE: - case CMD_DELETE: - /* OK */ - break; - default: - ereport(ERROR, - (errcode(ERRCODE_INVALID_PSTATEMENT_DEFINITION), - errmsg("utility statements cannot be prepared"))); - break; - } - - /* Rewrite the query. The result could be 0, 1, or many queries. */ - query_list = QueryRewrite(query); + query_list = pg_analyze_and_rewrite_varparams(rawstmt, pstate->p_sourcetext, + &argtypes, &nargs, NULL); /* Finish filling in the CachedPlanSource */ CompleteCachedPlan(plansource, diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 19d97fe731..53c11b3a15 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -148,7 +148,8 @@ parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText, */ Query * parse_analyze_varparams(RawStmt *parseTree, const char *sourceText, - Oid **paramTypes, int *numParams) + Oid **paramTypes, int *numParams, + QueryEnvironment *queryEnv) { ParseState *pstate = make_parsestate(NULL); Query *query; @@ -160,6 +161,8 @@ parse_analyze_varparams(RawStmt *parseTree, const char *sourceText, setup_parse_variable_parameters(pstate, paramTypes, numParams); + pstate->p_queryEnv = queryEnv; + query = transformTopLevelStmt(pstate, parseTree); /* make sure all is well with parameter types */ diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index c087db4445..d7e39aed64 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -637,9 +637,11 @@ pg_parse_query(const char *query_string) * NOTE: for reasons mentioned above, this must be separate from raw parsing. */ List * -pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree, const char *query_string, - const Oid *paramTypes, int numParams, - QueryEnvironment *queryEnv) +pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree, + const char *query_string, + const Oid *paramTypes, + int numParams, + QueryEnvironment *queryEnv) { Query *query; List *querytree_list; @@ -668,6 +670,59 @@ pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree, const char *query_string, return querytree_list; } +/* + * Do parse analysis and rewriting. This is the same as + * pg_analyze_and_rewrite_fixedparams except that it's okay to deduce + * information about $n symbol datatypes from context. + */ +List * +pg_analyze_and_rewrite_varparams(RawStmt *parsetree, + const char *query_string, + Oid **paramTypes, + int *numParams, + QueryEnvironment *queryEnv) +{ + Query *query; + List *querytree_list; + + TRACE_POSTGRESQL_QUERY_REWRITE_START(query_string); + + /* + * (1) Perform parse analysis. + */ + if (log_parser_stats) + ResetUsage(); + + query = parse_analyze_varparams(parsetree, query_string, paramTypes, numParams, + queryEnv); + + /* + * Check all parameter types got determined. + */ + for (int i = 0; i < *numParams; i++) + { + Oid ptype = (*paramTypes)[i]; + + if (ptype == InvalidOid || ptype == UNKNOWNOID) + ereport(ERROR, + (errcode(ERRCODE_INDETERMINATE_DATATYPE), + errmsg("could not determine data type of parameter $%d", + i + 1))); + } + + if (log_parser_stats) + ShowUsage("PARSE ANALYSIS STATISTICS"); + + /* + * (2) Rewrite the queries, as necessary + */ + querytree_list = pg_rewrite_query(query); + + TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string); + + return querytree_list; +} + /* * Do parse analysis and rewriting. This is the same as * pg_analyze_and_rewrite_fixedparams except that, instead of a fixed list of @@ -1409,7 +1464,6 @@ exec_parse_message(const char *query_string, /* string to execute */ if (parsetree_list != NIL) { - Query *query; bool snapshot_set = false; raw_parse_tree = linitial_node(RawStmt, parsetree_list); @@ -1449,34 +1503,13 @@ exec_parse_message(const char *query_string, /* string to execute */ /* * Analyze and rewrite the query. Note that the originally specified * parameter set is not required to be complete, so we have to use - * parse_analyze_varparams(). - */ - if (log_parser_stats) - ResetUsage(); - - query = parse_analyze_varparams(raw_parse_tree, - query_string, - ¶mTypes, - &numParams); - - /* - * Check all parameter types got determined. + * pg_analyze_and_rewrite_varparams(). */ - for (int i = 0; i < numParams; i++) - { - Oid ptype = paramTypes[i]; - - if (ptype == InvalidOid || ptype == UNKNOWNOID) - ereport(ERROR, - (errcode(ERRCODE_INDETERMINATE_DATATYPE), - errmsg("could not determine data type of parameter $%d", - i + 1))); - } - - if (log_parser_stats) - ShowUsage("PARSE ANALYSIS STATISTICS"); - - querytree_list = pg_rewrite_query(query); + querytree_list = pg_analyze_and_rewrite_varparams(raw_parse_tree, + query_string, + ¶mTypes, + &numParams, + NULL); /* Done with the snapshot used for parsing */ if (snapshot_set) diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h index ed989bb141..06b237c39c 100644 --- a/src/include/parser/analyze.h +++ b/src/include/parser/analyze.h @@ -27,7 +27,7 @@ extern PGDLLIMPORT post_parse_analyze_hook_type post_parse_analyze_hook; extern Query *parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText, const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv); extern Query *parse_analyze_varparams(RawStmt *parseTree, const char *sourceText, - Oid **paramTypes, int *numParams); + Oid **paramTypes, int *numParams, QueryEnvironment *queryEnv); extern Query *parse_sub_analyze(Node *parseTree, ParseState *parentParseState, CommonTableExpr *parentCTE, diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h index 00c20966ab..92291a750d 100644 --- a/src/include/tcop/tcopprot.h +++ b/src/include/tcop/tcopprot.h @@ -49,6 +49,11 @@ extern List *pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree, const char *query_string, const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv); +extern List *pg_analyze_and_rewrite_varparams(RawStmt *parsetree, + const char *query_string, + Oid **paramTypes, + int *numParams, + QueryEnvironment *queryEnv); extern List *pg_analyze_and_rewrite_withcb(RawStmt *parsetree, const char *query_string, ParserSetupHook parserSetup, From d5ed9da41d96988d905b49bebb273a9b2d6e2915 Mon Sep 17 00:00:00 2001 From: Tomas Vondra Date: Mon, 7 Mar 2022 20:53:16 +0100 Subject: [PATCH 094/108] Call ReorderBufferProcessXid from sequence_decode Commit 0da92dc530c added sequence_decode() implementing logical decoding of sequences, but it failed to call ReorderBufferProcessXid() as it should. So add the missing call. Reported-by: Amit Kapila Discussion: https://postgr.es/m/CAA4eK1KGn6cQqJEsubOOENwQOANsExiV2sKL52r4U10J8NJEMQ%40mail.gmail.com --- src/backend/replication/logical/decode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 18cf931822..8c00a73cb9 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -1321,6 +1321,8 @@ sequence_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) if (info != XLOG_SEQ_LOG) elog(ERROR, "unexpected RM_SEQ_ID record type: %u", info); + ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(r), buf->origptr); + /* * If we don't have snapshot or we are just fast-forwarding, there is no * point in decoding messages. From 5b81703787bfc1e6072c8e37125eba0c5598b807 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 8 Mar 2022 10:12:22 +0900 Subject: [PATCH 095/108] Simplify SRFs using materialize mode in contrib/ modules 9e98583 introduced a helper to centralize building their needed state (tuplestore, tuple descriptors, etc.), checking for any errors. This commit updates all places of contrib/ that can be switched to use SetSingleFuncCall() as a drop-in replacement, resulting in the removal of a lot of boilerplate code in all the modules updated by this commit. Per analysis, some places remain as they are: - pg_logdir_ls() in adminpack/ uses historically TYPEFUNC_RECORD as return type, and I suspect that changing it may cause issues at run-time with some of its past versions, down to 1.0. - dblink/ uses a wrapper function doing exactly the work of SetSingleFuncCall(). Here the switch should be possible, but rather invasive so it does not seem the extra backpatch maintenance cost. - tablefunc/, similarly, uses multiple helper functions with portions of SetSingleFuncCall() spread across the code paths of this module. Author: Melanie Plageman Discussion: https://postgr.es/m/CAAKRu_bvDPJoL9mH6eYwvBpPtTGQwbDzfJbCM-OjkSZDu5yTPg@mail.gmail.com --- contrib/amcheck/verify_heapam.c | 46 ++---------- contrib/dblink/dblink.c | 26 +------ contrib/pageinspect/brinfuncs.c | 31 +------- contrib/pageinspect/gistfuncs.c | 60 ++-------------- .../pg_stat_statements/pg_stat_statements.c | 33 +-------- contrib/pgrowlocks/pgrowlocks.c | 34 ++------- contrib/postgres_fdw/connection.c | 31 +------- contrib/xml2/xpath.c | 72 +++---------------- 8 files changed, 32 insertions(+), 301 deletions(-) diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c index f996f9a572..e5f7355dcb 100644 --- a/contrib/amcheck/verify_heapam.c +++ b/contrib/amcheck/verify_heapam.c @@ -165,7 +165,6 @@ static bool check_tuple_visibility(HeapCheckContext *ctx); static void report_corruption(HeapCheckContext *ctx, char *msg); static void report_toast_corruption(HeapCheckContext *ctx, ToastedAttribute *ta, char *msg); -static TupleDesc verify_heapam_tupdesc(void); static FullTransactionId FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx); static void update_cached_xid_range(HeapCheckContext *ctx); @@ -214,8 +213,6 @@ Datum verify_heapam(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - MemoryContext old_context; - bool random_access; HeapCheckContext ctx; Buffer vmbuffer = InvalidBuffer; Oid relid; @@ -227,16 +224,6 @@ verify_heapam(PG_FUNCTION_ARGS) BlockNumber nblocks; const char *skip; - /* Check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - /* Check supplied arguments */ if (PG_ARGISNULL(0)) ereport(ERROR, @@ -290,15 +277,10 @@ verify_heapam(PG_FUNCTION_ARGS) */ ctx.attnum = -1; - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - old_context = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - random_access = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - ctx.tupdesc = verify_heapam_tupdesc(); - ctx.tupstore = tuplestore_begin_heap(random_access, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = ctx.tupstore; - rsinfo->setDesc = ctx.tupdesc; - MemoryContextSwitchTo(old_context); + /* Construct the tuplestore and tuple descriptor */ + SetSingleFuncCall(fcinfo, 0); + ctx.tupdesc = rsinfo->setDesc; + ctx.tupstore = rsinfo->setResult; /* Open relation, check relkind and access method */ ctx.rel = relation_open(relid, AccessShareLock); @@ -630,26 +612,6 @@ report_toast_corruption(HeapCheckContext *ctx, ToastedAttribute *ta, ctx->is_corrupt = true; } -/* - * Construct the TupleDesc used to report messages about corruptions found - * while scanning the heap. - */ -static TupleDesc -verify_heapam_tupdesc(void) -{ - TupleDesc tupdesc; - AttrNumber a = 0; - - tupdesc = CreateTemplateTupleDesc(HEAPCHECK_RELATION_COLS); - TupleDescInitEntry(tupdesc, ++a, "blkno", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, ++a, "offnum", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, ++a, "attnum", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, ++a, "msg", TEXTOID, -1, 0); - Assert(a == HEAPCHECK_RELATION_COLS); - - return BlessTupleDesc(tupdesc); -} - /* * Check for tuple header corruption. * diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index efc4c94301..a06d4bd12d 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -1928,12 +1928,6 @@ dblink_get_notify(PG_FUNCTION_ARGS) PGconn *conn; PGnotify *notify; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - prepTuplestoreResult(fcinfo); dblink_init(); if (PG_NARGS() == 1) @@ -1941,23 +1935,7 @@ dblink_get_notify(PG_FUNCTION_ARGS) else conn = pconn->conn; - /* create the tuplestore in per-query memory */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupdesc = CreateTemplateTupleDesc(DBLINK_NOTIFY_COLS); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "notify_name", - TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "be_pid", - INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "extra", - TEXTOID, -1, 0); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); PQconsumeInput(conn); while ((notify = PQnotifies(conn)) != NULL) @@ -1980,7 +1958,7 @@ dblink_get_notify(PG_FUNCTION_ARGS) else nulls[2] = true; - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); PQfreemem(notify); PQconsumeInput(conn); diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c index 683749a150..b7c8365218 100644 --- a/contrib/pageinspect/brinfuncs.c +++ b/contrib/pageinspect/brinfuncs.c @@ -126,9 +126,6 @@ brin_page_items(PG_FUNCTION_ARGS) bytea *raw_page = PG_GETARG_BYTEA_P(0); Oid indexRelid = PG_GETARG_OID(1); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - MemoryContext oldcontext; - Tuplestorestate *tupstore; Relation indexRel; brin_column_state **columns; BrinDesc *bdesc; @@ -143,29 +140,7 @@ brin_page_items(PG_FUNCTION_ARGS) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use raw page functions"))); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - /* Build tuplestore to hold the result rows */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); indexRel = index_open(indexRelid, AccessShareLock); bdesc = brin_build_desc(indexRel); @@ -251,7 +226,7 @@ brin_page_items(PG_FUNCTION_ARGS) int att = attno - 1; values[0] = UInt16GetDatum(offset); - switch (TupleDescAttr(tupdesc, 1)->atttypid) + switch (TupleDescAttr(rsinfo->setDesc, 1)->atttypid) { case INT8OID: values[1] = Int64GetDatum((int64) dtup->bt_blkno); @@ -301,7 +276,7 @@ brin_page_items(PG_FUNCTION_ARGS) } } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); /* * If the item was unused, jump straight to the next one; otherwise, diff --git a/contrib/pageinspect/gistfuncs.c b/contrib/pageinspect/gistfuncs.c index 96e3cab1cc..10d6dd44d4 100644 --- a/contrib/pageinspect/gistfuncs.c +++ b/contrib/pageinspect/gistfuncs.c @@ -97,10 +97,6 @@ gist_page_items_bytea(PG_FUNCTION_ARGS) { bytea *raw_page = PG_GETARG_BYTEA_P(0); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - bool randomAccess; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext oldcontext; Page page; OffsetNumber offset; OffsetNumber maxoff = InvalidOffsetNumber; @@ -110,29 +106,7 @@ gist_page_items_bytea(PG_FUNCTION_ARGS) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use raw page functions"))); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - randomAccess = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); page = get_page_from_raw(raw_page); @@ -173,7 +147,7 @@ gist_page_items_bytea(PG_FUNCTION_ARGS) values[3] = BoolGetDatum(ItemIdIsDead(id)); values[4] = PointerGetDatum(tuple_bytea); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } return (Datum) 0; @@ -185,11 +159,7 @@ gist_page_items(PG_FUNCTION_ARGS) bytea *raw_page = PG_GETARG_BYTEA_P(0); Oid indexRelid = PG_GETARG_OID(1); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - bool randomAccess; Relation indexRel; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext oldcontext; Page page; OffsetNumber offset; OffsetNumber maxoff = InvalidOffsetNumber; @@ -199,29 +169,7 @@ gist_page_items(PG_FUNCTION_ARGS) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use raw page functions"))); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - randomAccess = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* Open the relation */ indexRel = index_open(indexRelid, AccessShareLock); @@ -272,7 +220,7 @@ gist_page_items(PG_FUNCTION_ARGS) nulls[4] = true; } - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } relation_close(indexRel, AccessShareLock); diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index d803253cea..9e525a6ad3 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -1494,10 +1494,6 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, bool showtext) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; Oid userid = GetUserId(); bool is_allowed_role = false; char *qbuffer = NULL; @@ -1516,30 +1512,14 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pg_stat_statements must be loaded via shared_preload_libraries"))); - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Switch into long-lived context to construct returned data structures */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); + SetSingleFuncCall(fcinfo, 0); /* * Check we have the expected number of output arguments. Aside from * being a good safety check, we need a kluge here to detect API version * 1.1, which was wedged into the code in an ill-considered way. */ - switch (tupdesc->natts) + switch (rsinfo->setDesc->natts) { case PG_STAT_STATEMENTS_COLS_V1_0: if (api_version != PGSS_V1_0) @@ -1571,13 +1551,6 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, elog(ERROR, "incorrect number of output arguments"); } - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); - /* * We'd like to load the query text file (if needed) while not holding any * lock on pgss->lock. In the worst case we'll have to do this again @@ -1800,7 +1773,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, api_version == PGSS_V1_9 ? PG_STAT_STATEMENTS_COLS_V1_9 : -1 /* fail if you forget to update this assert */ )); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } LWLockRelease(pgss->lock); diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c index d8946dc510..713a165203 100644 --- a/contrib/pgrowlocks/pgrowlocks.c +++ b/contrib/pgrowlocks/pgrowlocks.c @@ -66,42 +66,16 @@ pgrowlocks(PG_FUNCTION_ARGS) { text *relname = PG_GETARG_TEXT_PP(0); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - bool randomAccess; - TupleDesc tupdesc; - Tuplestorestate *tupstore; AttInMetadata *attinmeta; Relation rel; RangeVar *relrv; TableScanDesc scan; HeapScanDesc hscan; HeapTuple tuple; - MemoryContext oldcontext; AclResult aclresult; char **values; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ - oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - randomAccess = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; - tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* Access the table */ relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); @@ -140,9 +114,9 @@ pgrowlocks(PG_FUNCTION_ARGS) scan = table_beginscan(rel, GetActiveSnapshot(), 0, NULL); hscan = (HeapScanDesc) scan; - attinmeta = TupleDescGetAttInMetadata(tupdesc); + attinmeta = TupleDescGetAttInMetadata(rsinfo->setDesc); - values = (char **) palloc(tupdesc->natts * sizeof(char *)); + values = (char **) palloc(rsinfo->setDesc->natts * sizeof(char *)); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -288,7 +262,7 @@ pgrowlocks(PG_FUNCTION_ARGS) /* build a tuple */ tuple = BuildTupleFromCStrings(attinmeta, values); - tuplestore_puttuple(tupstore, tuple); + tuplestore_puttuple(rsinfo->setResult, tuple); } else { diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 8c64d42dda..74d3e73205 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -1661,37 +1661,10 @@ postgres_fdw_get_connections(PG_FUNCTION_ARGS) { #define POSTGRES_FDW_GET_CONNECTIONS_COLS 2 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; HASH_SEQ_STATUS scan; ConnCacheEntry *entry; - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); + SetSingleFuncCall(fcinfo, 0); /* If cache doesn't exist, we return no records */ if (!ConnectionHash) @@ -1757,7 +1730,7 @@ postgres_fdw_get_connections(PG_FUNCTION_ARGS) values[1] = BoolGetDatum(!entry->invalidated); - tuplestore_putvalues(tupstore, tupdesc, values, nulls); + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); } diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index a2e5fb54e2..b8ee757674 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -491,15 +491,9 @@ xpath_table(PG_FUNCTION_ARGS) HeapTuple spi_tuple; TupleDesc spi_tupdesc; - /* Output tuple (tuplestore) support */ - Tuplestorestate *tupstore = NULL; - TupleDesc ret_tupdesc; - HeapTuple ret_tuple; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; AttInMetadata *attinmeta; - MemoryContext per_query_ctx; - MemoryContext oldcontext; char **values; xmlChar **xpaths; @@ -517,48 +511,10 @@ xpath_table(PG_FUNCTION_ARGS) PgXmlErrorContext *xmlerrcxt; volatile xmlDocPtr doctree = NULL; - /* We only have a valid tuple description in table function mode */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (rsinfo->expectedDesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("xpath_table must be called as a table function"))); - - /* - * We want to materialise because it means that we don't have to carry - * libxml2 parser state between invocations of this function - */ - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("xpath_table requires Materialize mode, but it is not " - "allowed in this context"))); - - /* - * The tuplestore must exist in a higher context than this function call - * (per_query_ctx is used) - */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - /* - * Create the tuplestore - work_mem is the max in-memory size before a - * file is created on disk to hold it. - */ - tupstore = - tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, - false, work_mem); - - MemoryContextSwitchTo(oldcontext); - - /* get the requested return tuple description */ - ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); + SetSingleFuncCall(fcinfo, SRF_SINGLE_USE_EXPECTED); /* must have at least one output column (for the pkey) */ - if (ret_tupdesc->natts < 1) + if (rsinfo->setDesc->natts < 1) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("xpath_table must have at least one output column"))); @@ -571,14 +527,10 @@ xpath_table(PG_FUNCTION_ARGS) * representation. */ - attinmeta = TupleDescGetAttInMetadata(ret_tupdesc); - - /* Set return mode and allocate value space. */ - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setDesc = ret_tupdesc; + attinmeta = TupleDescGetAttInMetadata(rsinfo->setDesc); - values = (char **) palloc(ret_tupdesc->natts * sizeof(char *)); - xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *)); + values = (char **) palloc(rsinfo->setDesc->natts * sizeof(char *)); + xpaths = (xmlChar **) palloc(rsinfo->setDesc->natts * sizeof(xmlChar *)); /* * Split XPaths. xpathset is a writable CString. @@ -587,7 +539,7 @@ xpath_table(PG_FUNCTION_ARGS) */ numpaths = 0; pos = xpathset; - while (numpaths < (ret_tupdesc->natts - 1)) + while (numpaths < (rsinfo->setDesc->natts - 1)) { xpaths[numpaths++] = (xmlChar *) pos; pos = strstr(pos, pathsep); @@ -621,9 +573,6 @@ xpath_table(PG_FUNCTION_ARGS) tuptable = SPI_tuptable; spi_tupdesc = tuptable->tupdesc; - /* Switch out of SPI context */ - MemoryContextSwitchTo(oldcontext); - /* * Check that SPI returned correct result. If you put a comma into one of * the function parameters, this will catch it when the SPI query returns @@ -655,6 +604,7 @@ xpath_table(PG_FUNCTION_ARGS) xmlXPathObjectPtr res; xmlChar *resstr; xmlXPathCompExprPtr comppath; + HeapTuple ret_tuple; /* Extract the row data as C Strings */ spi_tuple = tuptable->vals[i]; @@ -666,7 +616,7 @@ xpath_table(PG_FUNCTION_ARGS) * return NULL in all columns. Note that this also means that * spare columns will be NULL. */ - for (j = 0; j < ret_tupdesc->natts; j++) + for (j = 0; j < rsinfo->setDesc->natts; j++) values[j] = NULL; /* Insert primary key */ @@ -682,7 +632,7 @@ xpath_table(PG_FUNCTION_ARGS) { /* not well-formed, so output all-NULL tuple */ ret_tuple = BuildTupleFromCStrings(attinmeta, values); - tuplestore_puttuple(tupstore, ret_tuple); + tuplestore_puttuple(rsinfo->setResult, ret_tuple); heap_freetuple(ret_tuple); } else @@ -749,7 +699,7 @@ xpath_table(PG_FUNCTION_ARGS) if (had_values) { ret_tuple = BuildTupleFromCStrings(attinmeta, values); - tuplestore_puttuple(tupstore, ret_tuple); + tuplestore_puttuple(rsinfo->setResult, ret_tuple); heap_freetuple(ret_tuple); } @@ -785,8 +735,6 @@ xpath_table(PG_FUNCTION_ARGS) SPI_finish(); - rsinfo->setResult = tupstore; - /* * SFRM_Materialize mode expects us to return a NULL Datum. The actual * tuples are in our tuplestore and passed back through rsinfo->setResult. From 76a29adee749f41e277459cbf2e47a2ff7777f31 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 7 Mar 2022 18:19:56 -0800 Subject: [PATCH 096/108] plpython: Remove plpythonu, plpython2u and associated transform extensions. Since 19252e8ec93 we reject Python 2 during build configuration. Now that the dust on the buildfarm has settled, remove extension variants specific to Python 2. Reviewed-By: Peter Eisentraut Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20211031184548.g4sxfe47n2kyi55r@alap3.anarazel.de --- contrib/hstore_plpython/Makefile | 4 ++-- .../hstore_plpython2u--1.0.sql | 19 ------------------- .../hstore_plpython/hstore_plpython2u.control | 6 ------ .../hstore_plpython/hstore_plpythonu--1.0.sql | 19 ------------------- .../hstore_plpython/hstore_plpythonu.control | 6 ------ contrib/jsonb_plpython/Makefile | 6 +++--- .../jsonb_plpython/jsonb_plpython2u--1.0.sql | 19 ------------------- .../jsonb_plpython/jsonb_plpython2u.control | 6 ------ .../jsonb_plpython/jsonb_plpythonu--1.0.sql | 19 ------------------- .../jsonb_plpython/jsonb_plpythonu.control | 6 ------ contrib/ltree_plpython/Makefile | 4 ++-- .../ltree_plpython/ltree_plpython2u--1.0.sql | 12 ------------ .../ltree_plpython/ltree_plpython2u.control | 6 ------ .../ltree_plpython/ltree_plpythonu--1.0.sql | 12 ------------ .../ltree_plpython/ltree_plpythonu.control | 6 ------ src/pl/plpython/plpython2u--1.0.sql | 17 ----------------- src/pl/plpython/plpython2u.control | 7 ------- src/pl/plpython/plpythonu--1.0.sql | 17 ----------------- src/pl/plpython/plpythonu.control | 7 ------- 19 files changed, 7 insertions(+), 191 deletions(-) delete mode 100644 contrib/hstore_plpython/hstore_plpython2u--1.0.sql delete mode 100644 contrib/hstore_plpython/hstore_plpython2u.control delete mode 100644 contrib/hstore_plpython/hstore_plpythonu--1.0.sql delete mode 100644 contrib/hstore_plpython/hstore_plpythonu.control delete mode 100644 contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql delete mode 100644 contrib/jsonb_plpython/jsonb_plpython2u.control delete mode 100644 contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql delete mode 100644 contrib/jsonb_plpython/jsonb_plpythonu.control delete mode 100644 contrib/ltree_plpython/ltree_plpython2u--1.0.sql delete mode 100644 contrib/ltree_plpython/ltree_plpython2u.control delete mode 100644 contrib/ltree_plpython/ltree_plpythonu--1.0.sql delete mode 100644 contrib/ltree_plpython/ltree_plpythonu.control delete mode 100644 src/pl/plpython/plpython2u--1.0.sql delete mode 100644 src/pl/plpython/plpython2u.control delete mode 100644 src/pl/plpython/plpythonu--1.0.sql delete mode 100644 src/pl/plpython/plpythonu.control diff --git a/contrib/hstore_plpython/Makefile b/contrib/hstore_plpython/Makefile index 6af097ae68..19d99a8045 100644 --- a/contrib/hstore_plpython/Makefile +++ b/contrib/hstore_plpython/Makefile @@ -6,8 +6,8 @@ OBJS = \ hstore_plpython.o PGFILEDESC = "hstore_plpython - hstore transform for plpython" -EXTENSION = hstore_plpythonu hstore_plpython2u hstore_plpython3u -DATA = hstore_plpythonu--1.0.sql hstore_plpython2u--1.0.sql hstore_plpython3u--1.0.sql +EXTENSION = hstore_plpython3u +DATA = hstore_plpython3u--1.0.sql REGRESS = hstore_plpython REGRESS_PLPYTHON3_MANGLE := $(REGRESS) diff --git a/contrib/hstore_plpython/hstore_plpython2u--1.0.sql b/contrib/hstore_plpython/hstore_plpython2u--1.0.sql deleted file mode 100644 index 800765f3f0..0000000000 --- a/contrib/hstore_plpython/hstore_plpython2u--1.0.sql +++ /dev/null @@ -1,19 +0,0 @@ -/* contrib/hstore_plpython/hstore_plpython2u--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION hstore_plpython2u" to load this file. \quit - -CREATE FUNCTION hstore_to_plpython2(val internal) RETURNS internal -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME', 'hstore_to_plpython'; - -CREATE FUNCTION plpython2_to_hstore(val internal) RETURNS hstore -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME', 'plpython_to_hstore'; - -CREATE TRANSFORM FOR hstore LANGUAGE plpython2u ( - FROM SQL WITH FUNCTION hstore_to_plpython2(internal), - TO SQL WITH FUNCTION plpython2_to_hstore(internal) -); - -COMMENT ON TRANSFORM FOR hstore LANGUAGE plpython2u IS 'transform between hstore and Python dict'; diff --git a/contrib/hstore_plpython/hstore_plpython2u.control b/contrib/hstore_plpython/hstore_plpython2u.control deleted file mode 100644 index ed90567112..0000000000 --- a/contrib/hstore_plpython/hstore_plpython2u.control +++ /dev/null @@ -1,6 +0,0 @@ -# hstore_plpython2u extension -comment = 'transform between hstore and plpython2u' -default_version = '1.0' -module_pathname = '$libdir/hstore_plpython2' -relocatable = true -requires = 'hstore,plpython2u' diff --git a/contrib/hstore_plpython/hstore_plpythonu--1.0.sql b/contrib/hstore_plpython/hstore_plpythonu--1.0.sql deleted file mode 100644 index 52832912ab..0000000000 --- a/contrib/hstore_plpython/hstore_plpythonu--1.0.sql +++ /dev/null @@ -1,19 +0,0 @@ -/* contrib/hstore_plpython/hstore_plpythonu--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION hstore_plpythonu" to load this file. \quit - -CREATE FUNCTION hstore_to_plpython(val internal) RETURNS internal -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION plpython_to_hstore(val internal) RETURNS hstore -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME'; - -CREATE TRANSFORM FOR hstore LANGUAGE plpythonu ( - FROM SQL WITH FUNCTION hstore_to_plpython(internal), - TO SQL WITH FUNCTION plpython_to_hstore(internal) -); - -COMMENT ON TRANSFORM FOR hstore LANGUAGE plpythonu IS 'transform between hstore and Python dict'; diff --git a/contrib/hstore_plpython/hstore_plpythonu.control b/contrib/hstore_plpython/hstore_plpythonu.control deleted file mode 100644 index 8e9b35e43b..0000000000 --- a/contrib/hstore_plpython/hstore_plpythonu.control +++ /dev/null @@ -1,6 +0,0 @@ -# hstore_plpythonu extension -comment = 'transform between hstore and plpythonu' -default_version = '1.0' -module_pathname = '$libdir/hstore_plpython2' -relocatable = true -requires = 'hstore,plpythonu' diff --git a/contrib/jsonb_plpython/Makefile b/contrib/jsonb_plpython/Makefile index ca76741894..eaab5ca260 100644 --- a/contrib/jsonb_plpython/Makefile +++ b/contrib/jsonb_plpython/Makefile @@ -4,12 +4,12 @@ MODULE_big = jsonb_plpython$(python_majorversion) OBJS = \ $(WIN32RES) \ jsonb_plpython.o -PGFILEDESC = "jsonb_plpython - transform between jsonb and plpythonu" +PGFILEDESC = "jsonb_plpython - jsonb transform for plpython" PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' -EXTENSION = jsonb_plpythonu jsonb_plpython2u jsonb_plpython3u -DATA = jsonb_plpythonu--1.0.sql jsonb_plpython2u--1.0.sql jsonb_plpython3u--1.0.sql +EXTENSION = jsonb_plpython3u +DATA = jsonb_plpython3u--1.0.sql REGRESS = jsonb_plpython REGRESS_PLPYTHON3_MANGLE := $(REGRESS) diff --git a/contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql b/contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql deleted file mode 100644 index 2526d14ee1..0000000000 --- a/contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql +++ /dev/null @@ -1,19 +0,0 @@ -/* contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION jsonb_plpython2u" to load this file. \quit - -CREATE FUNCTION jsonb_to_plpython2(val internal) RETURNS internal -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME', 'jsonb_to_plpython'; - -CREATE FUNCTION plpython2_to_jsonb(val internal) RETURNS jsonb -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME', 'plpython_to_jsonb'; - -CREATE TRANSFORM FOR jsonb LANGUAGE plpython2u ( - FROM SQL WITH FUNCTION jsonb_to_plpython2(internal), - TO SQL WITH FUNCTION plpython2_to_jsonb(internal) -); - -COMMENT ON TRANSFORM FOR jsonb LANGUAGE plpython2u IS 'transform between jsonb and Python'; diff --git a/contrib/jsonb_plpython/jsonb_plpython2u.control b/contrib/jsonb_plpython/jsonb_plpython2u.control deleted file mode 100644 index d26368316b..0000000000 --- a/contrib/jsonb_plpython/jsonb_plpython2u.control +++ /dev/null @@ -1,6 +0,0 @@ -# jsonb_plpython2u extension -comment = 'transform between jsonb and plpython2u' -default_version = '1.0' -module_pathname = '$libdir/jsonb_plpython2' -relocatable = true -requires = 'plpython2u' diff --git a/contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql b/contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql deleted file mode 100644 index 3fa89885a6..0000000000 --- a/contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql +++ /dev/null @@ -1,19 +0,0 @@ -/* contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION jsonb_plpythonu" to load this file. \quit - -CREATE FUNCTION jsonb_to_plpython(val internal) RETURNS internal -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION plpython_to_jsonb(val internal) RETURNS jsonb -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME'; - -CREATE TRANSFORM FOR jsonb LANGUAGE plpythonu ( - FROM SQL WITH FUNCTION jsonb_to_plpython(internal), - TO SQL WITH FUNCTION plpython_to_jsonb(internal) -); - -COMMENT ON TRANSFORM FOR jsonb LANGUAGE plpythonu IS 'transform between jsonb and Python'; diff --git a/contrib/jsonb_plpython/jsonb_plpythonu.control b/contrib/jsonb_plpython/jsonb_plpythonu.control deleted file mode 100644 index 6f8fa4f184..0000000000 --- a/contrib/jsonb_plpython/jsonb_plpythonu.control +++ /dev/null @@ -1,6 +0,0 @@ -# jsonb_plpythonu extension -comment = 'transform between jsonb and plpythonu' -default_version = '1.0' -module_pathname = '$libdir/jsonb_plpython2' -relocatable = true -requires = 'plpythonu' diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile index 12a0146772..0bccb111e6 100644 --- a/contrib/ltree_plpython/Makefile +++ b/contrib/ltree_plpython/Makefile @@ -6,8 +6,8 @@ OBJS = \ ltree_plpython.o PGFILEDESC = "ltree_plpython - ltree transform for plpython" -EXTENSION = ltree_plpythonu ltree_plpython2u ltree_plpython3u -DATA = ltree_plpythonu--1.0.sql ltree_plpython2u--1.0.sql ltree_plpython3u--1.0.sql +EXTENSION = ltree_plpython3u +DATA = ltree_plpython3u--1.0.sql REGRESS = ltree_plpython REGRESS_PLPYTHON3_MANGLE := $(REGRESS) diff --git a/contrib/ltree_plpython/ltree_plpython2u--1.0.sql b/contrib/ltree_plpython/ltree_plpython2u--1.0.sql deleted file mode 100644 index 5c4a703701..0000000000 --- a/contrib/ltree_plpython/ltree_plpython2u--1.0.sql +++ /dev/null @@ -1,12 +0,0 @@ -/* contrib/ltree_plpython/ltree_plpython2u--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION ltree_plpython2u" to load this file. \quit - -CREATE FUNCTION ltree_to_plpython2(val internal) RETURNS internal -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME', 'ltree_to_plpython'; - -CREATE TRANSFORM FOR ltree LANGUAGE plpython2u ( - FROM SQL WITH FUNCTION ltree_to_plpython2(internal) -); diff --git a/contrib/ltree_plpython/ltree_plpython2u.control b/contrib/ltree_plpython/ltree_plpython2u.control deleted file mode 100644 index bedfd0acba..0000000000 --- a/contrib/ltree_plpython/ltree_plpython2u.control +++ /dev/null @@ -1,6 +0,0 @@ -# ltree_plpython2u extension -comment = 'transform between ltree and plpython2u' -default_version = '1.0' -module_pathname = '$libdir/ltree_plpython2' -relocatable = true -requires = 'ltree,plpython2u' diff --git a/contrib/ltree_plpython/ltree_plpythonu--1.0.sql b/contrib/ltree_plpython/ltree_plpythonu--1.0.sql deleted file mode 100644 index ee93edf28b..0000000000 --- a/contrib/ltree_plpython/ltree_plpythonu--1.0.sql +++ /dev/null @@ -1,12 +0,0 @@ -/* contrib/ltree_plpython/ltree_plpythonu--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION ltree_plpythonu" to load this file. \quit - -CREATE FUNCTION ltree_to_plpython(val internal) RETURNS internal -LANGUAGE C STRICT IMMUTABLE -AS 'MODULE_PATHNAME'; - -CREATE TRANSFORM FOR ltree LANGUAGE plpythonu ( - FROM SQL WITH FUNCTION ltree_to_plpython(internal) -); diff --git a/contrib/ltree_plpython/ltree_plpythonu.control b/contrib/ltree_plpython/ltree_plpythonu.control deleted file mode 100644 index b03c89a2e6..0000000000 --- a/contrib/ltree_plpython/ltree_plpythonu.control +++ /dev/null @@ -1,6 +0,0 @@ -# ltree_plpythonu extension -comment = 'transform between ltree and plpythonu' -default_version = '1.0' -module_pathname = '$libdir/ltree_plpython2' -relocatable = true -requires = 'ltree,plpythonu' diff --git a/src/pl/plpython/plpython2u--1.0.sql b/src/pl/plpython/plpython2u--1.0.sql deleted file mode 100644 index 69f7477567..0000000000 --- a/src/pl/plpython/plpython2u--1.0.sql +++ /dev/null @@ -1,17 +0,0 @@ -/* src/pl/plpython/plpython2u--1.0.sql */ - -CREATE FUNCTION plpython2_call_handler() RETURNS language_handler - LANGUAGE c AS 'MODULE_PATHNAME'; - -CREATE FUNCTION plpython2_inline_handler(internal) RETURNS void - STRICT LANGUAGE c AS 'MODULE_PATHNAME'; - -CREATE FUNCTION plpython2_validator(oid) RETURNS void - STRICT LANGUAGE c AS 'MODULE_PATHNAME'; - -CREATE LANGUAGE plpython2u - HANDLER plpython2_call_handler - INLINE plpython2_inline_handler - VALIDATOR plpython2_validator; - -COMMENT ON LANGUAGE plpython2u IS 'PL/Python2U untrusted procedural language'; diff --git a/src/pl/plpython/plpython2u.control b/src/pl/plpython/plpython2u.control deleted file mode 100644 index 39c2b791ef..0000000000 --- a/src/pl/plpython/plpython2u.control +++ /dev/null @@ -1,7 +0,0 @@ -# plpython2u extension -comment = 'PL/Python2U untrusted procedural language' -default_version = '1.0' -module_pathname = '$libdir/plpython2' -relocatable = false -schema = pg_catalog -superuser = true diff --git a/src/pl/plpython/plpythonu--1.0.sql b/src/pl/plpython/plpythonu--1.0.sql deleted file mode 100644 index 4c6f7c3f14..0000000000 --- a/src/pl/plpython/plpythonu--1.0.sql +++ /dev/null @@ -1,17 +0,0 @@ -/* src/pl/plpython/plpythonu--1.0.sql */ - -CREATE FUNCTION plpython_call_handler() RETURNS language_handler - LANGUAGE c AS 'MODULE_PATHNAME'; - -CREATE FUNCTION plpython_inline_handler(internal) RETURNS void - STRICT LANGUAGE c AS 'MODULE_PATHNAME'; - -CREATE FUNCTION plpython_validator(oid) RETURNS void - STRICT LANGUAGE c AS 'MODULE_PATHNAME'; - -CREATE LANGUAGE plpythonu - HANDLER plpython_call_handler - INLINE plpython_inline_handler - VALIDATOR plpython_validator; - -COMMENT ON LANGUAGE plpythonu IS 'PL/PythonU untrusted procedural language'; diff --git a/src/pl/plpython/plpythonu.control b/src/pl/plpython/plpythonu.control deleted file mode 100644 index ae91b1c255..0000000000 --- a/src/pl/plpython/plpythonu.control +++ /dev/null @@ -1,7 +0,0 @@ -# plpythonu extension -comment = 'PL/PythonU untrusted procedural language' -default_version = '1.0' -module_pathname = '$libdir/plpython2' -relocatable = false -schema = pg_catalog -superuser = true From db23464715f4792298c639153dda7bfd9ad9d602 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 7 Mar 2022 18:19:56 -0800 Subject: [PATCH 097/108] plpython: Remove regression test infrastructure for Python 2. Since 19252e8ec93 we reject Python 2 during build configuration. Now that the dust on the buildfarm has settled, remove regression testing infrastructure dealing with differing output between Python 2 / 3. Reviewed-By: Peter Eisentraut Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20211031184548.g4sxfe47n2kyi55r@alap3.anarazel.de --- contrib/hstore_plpython/.gitignore | 2 - contrib/hstore_plpython/Makefile | 6 - .../expected/hstore_plpython.out | 22 +- .../hstore_plpython/sql/hstore_plpython.sql | 18 +- contrib/jsonb_plpython/.gitignore | 2 - contrib/jsonb_plpython/Makefile | 7 - .../expected/jsonb_plpython.out | 32 +- contrib/jsonb_plpython/sql/jsonb_plpython.sql | 30 +- contrib/ltree_plpython/.gitignore | 2 - contrib/ltree_plpython/Makefile | 6 - .../expected/ltree_plpython.out | 10 +- contrib/ltree_plpython/sql/ltree_plpython.sql | 8 +- src/pl/plpython/.gitignore | 2 - src/pl/plpython/Makefile | 14 - src/pl/plpython/expected/plpython_call.out | 12 +- .../plpython/expected/plpython_composite.out | 32 +- src/pl/plpython/expected/plpython_do.out | 8 +- src/pl/plpython/expected/plpython_drop.out | 3 +- src/pl/plpython/expected/plpython_ereport.out | 37 +- src/pl/plpython/expected/plpython_error.out | 52 +- src/pl/plpython/expected/plpython_error_5.out | 447 -------- src/pl/plpython/expected/plpython_global.out | 6 +- src/pl/plpython/expected/plpython_import.out | 8 +- src/pl/plpython/expected/plpython_newline.out | 6 +- src/pl/plpython/expected/plpython_params.out | 8 +- src/pl/plpython/expected/plpython_quote.out | 2 +- src/pl/plpython/expected/plpython_record.out | 18 +- src/pl/plpython/expected/plpython_setof.out | 18 +- src/pl/plpython/expected/plpython_spi.out | 50 +- .../expected/plpython_subtransaction.out | 38 +- src/pl/plpython/expected/plpython_test.out | 12 +- .../expected/plpython_transaction.out | 28 +- src/pl/plpython/expected/plpython_trigger.out | 46 +- src/pl/plpython/expected/plpython_types.out | 230 ++-- src/pl/plpython/expected/plpython_types_3.out | 1009 ----------------- src/pl/plpython/expected/plpython_unicode.out | 16 +- src/pl/plpython/expected/plpython_void.out | 6 +- src/pl/plpython/regress-python3-mangle.mk | 38 - src/pl/plpython/sql/plpython_call.sql | 12 +- src/pl/plpython/sql/plpython_composite.sql | 32 +- src/pl/plpython/sql/plpython_do.sql | 6 +- src/pl/plpython/sql/plpython_drop.sql | 4 +- src/pl/plpython/sql/plpython_ereport.sql | 26 +- src/pl/plpython/sql/plpython_error.sql | 48 +- src/pl/plpython/sql/plpython_global.sql | 6 +- src/pl/plpython/sql/plpython_import.sql | 8 +- src/pl/plpython/sql/plpython_newline.sql | 6 +- src/pl/plpython/sql/plpython_params.sql | 8 +- src/pl/plpython/sql/plpython_quote.sql | 2 +- src/pl/plpython/sql/plpython_record.sql | 18 +- src/pl/plpython/sql/plpython_setof.sql | 18 +- src/pl/plpython/sql/plpython_spi.sql | 50 +- .../plpython/sql/plpython_subtransaction.sql | 38 +- src/pl/plpython/sql/plpython_test.sql | 12 +- src/pl/plpython/sql/plpython_transaction.sql | 26 +- src/pl/plpython/sql/plpython_trigger.sql | 46 +- src/pl/plpython/sql/plpython_types.sql | 106 +- src/pl/plpython/sql/plpython_unicode.sql | 16 +- src/pl/plpython/sql/plpython_void.sql | 6 +- src/tools/msvc/vcregress.pl | 76 +- 60 files changed, 625 insertions(+), 2236 deletions(-) delete mode 100644 src/pl/plpython/expected/plpython_error_5.out delete mode 100644 src/pl/plpython/expected/plpython_types_3.out delete mode 100644 src/pl/plpython/regress-python3-mangle.mk diff --git a/contrib/hstore_plpython/.gitignore b/contrib/hstore_plpython/.gitignore index ce6fab94a0..5dcb3ff972 100644 --- a/contrib/hstore_plpython/.gitignore +++ b/contrib/hstore_plpython/.gitignore @@ -1,6 +1,4 @@ # Generated subdirectories -/expected/python3/ /log/ /results/ -/sql/python3/ /tmp_check/ diff --git a/contrib/hstore_plpython/Makefile b/contrib/hstore_plpython/Makefile index 19d99a8045..9d88cda1d0 100644 --- a/contrib/hstore_plpython/Makefile +++ b/contrib/hstore_plpython/Makefile @@ -10,7 +10,6 @@ EXTENSION = hstore_plpython3u DATA = hstore_plpython3u--1.0.sql REGRESS = hstore_plpython -REGRESS_PLPYTHON3_MANGLE := $(REGRESS) PG_CPPFLAGS = $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' @@ -37,9 +36,4 @@ SHLIB_LINK += $(python_libspec) $(python_additional_libs) endif REGRESS_OPTS += --load-extension=hstore -ifeq ($(python_majorversion),2) -REGRESS_OPTS += --load-extension=plpythonu --load-extension=hstore_plpythonu -endif EXTRA_INSTALL += contrib/hstore - -include $(top_srcdir)/src/pl/plpython/regress-python3-mangle.mk diff --git a/contrib/hstore_plpython/expected/hstore_plpython.out b/contrib/hstore_plpython/expected/hstore_plpython.out index ecf1dd61bc..bf238701fe 100644 --- a/contrib/hstore_plpython/expected/hstore_plpython.out +++ b/contrib/hstore_plpython/expected/hstore_plpython.out @@ -1,8 +1,8 @@ -CREATE EXTENSION hstore_plpython2u CASCADE; -NOTICE: installing required extension "plpython2u" +CREATE EXTENSION hstore_plpython3u CASCADE; +NOTICE: installing required extension "plpython3u" -- test hstore -> python CREATE FUNCTION test1(val hstore) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert isinstance(val, dict) @@ -18,7 +18,7 @@ INFO: [('aa', 'bb'), ('cc', None)] -- the same with the versioned language name CREATE FUNCTION test1n(val hstore) RETURNS int -LANGUAGE plpython2u +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert isinstance(val, dict) @@ -34,7 +34,7 @@ INFO: [('aa', 'bb'), ('cc', None)] -- test hstore[] -> python CREATE FUNCTION test1arr(val hstore[]) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert(val == [{'aa': 'bb', 'cc': None}, {'dd': 'ee'}]) @@ -48,7 +48,7 @@ SELECT test1arr(array['aa=>bb, cc=>NULL'::hstore, 'dd=>ee']); -- test python -> hstore CREATE FUNCTION test2(a int, b text) RETURNS hstore -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ val = {'a': a, 'b': b, 'c': None} @@ -65,14 +65,14 @@ SELECT test2(1, 'boo'); CREATE OR REPLACE FUNCTION public.test2(a integer, b text) RETURNS hstore TRANSFORM FOR TYPE hstore - LANGUAGE plpythonu + LANGUAGE plpython3u AS $function$ val = {'a': a, 'b': b, 'c': None} return val $function$ -- test python -> hstore[] CREATE FUNCTION test2arr() RETURNS hstore[] -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ val = [{'a': 1, 'b': 'boo', 'c': None}, {'d': 2}] @@ -87,7 +87,7 @@ SELECT test2arr(); -- test python -> domain over hstore CREATE DOMAIN hstore_foo AS hstore CHECK(VALUE ? 'foo'); CREATE FUNCTION test2dom(fn text) RETURNS hstore_foo -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ return {'a': 1, fn: 'boo', 'c': None} @@ -104,7 +104,7 @@ CONTEXT: while creating return value PL/Python function "test2dom" -- test as part of prepare/execute CREATE FUNCTION test3() RETURNS void -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ rv = plpy.execute("SELECT 'aa=>bb, cc=>NULL'::hstore AS col1") @@ -131,7 +131,7 @@ SELECT * FROM test1; (1 row) CREATE FUNCTION test4() RETURNS trigger -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert(TD["new"] == {'a': 1, 'b': {'aa': 'bb', 'cc': None}}) diff --git a/contrib/hstore_plpython/sql/hstore_plpython.sql b/contrib/hstore_plpython/sql/hstore_plpython.sql index b6d98b7dd5..a9cfbbe13e 100644 --- a/contrib/hstore_plpython/sql/hstore_plpython.sql +++ b/contrib/hstore_plpython/sql/hstore_plpython.sql @@ -1,9 +1,9 @@ -CREATE EXTENSION hstore_plpython2u CASCADE; +CREATE EXTENSION hstore_plpython3u CASCADE; -- test hstore -> python CREATE FUNCTION test1(val hstore) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert isinstance(val, dict) @@ -16,7 +16,7 @@ SELECT test1('aa=>bb, cc=>NULL'::hstore); -- the same with the versioned language name CREATE FUNCTION test1n(val hstore) RETURNS int -LANGUAGE plpython2u +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert isinstance(val, dict) @@ -29,7 +29,7 @@ SELECT test1n('aa=>bb, cc=>NULL'::hstore); -- test hstore[] -> python CREATE FUNCTION test1arr(val hstore[]) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert(val == [{'aa': 'bb', 'cc': None}, {'dd': 'ee'}]) @@ -41,7 +41,7 @@ SELECT test1arr(array['aa=>bb, cc=>NULL'::hstore, 'dd=>ee']); -- test python -> hstore CREATE FUNCTION test2(a int, b text) RETURNS hstore -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ val = {'a': a, 'b': b, 'c': None} @@ -56,7 +56,7 @@ SELECT test2(1, 'boo'); -- test python -> hstore[] CREATE FUNCTION test2arr() RETURNS hstore[] -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ val = [{'a': 1, 'b': 'boo', 'c': None}, {'d': 2}] @@ -70,7 +70,7 @@ SELECT test2arr(); CREATE DOMAIN hstore_foo AS hstore CHECK(VALUE ? 'foo'); CREATE FUNCTION test2dom(fn text) RETURNS hstore_foo -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ return {'a': 1, fn: 'boo', 'c': None} @@ -82,7 +82,7 @@ SELECT test2dom('bar'); -- fail -- test as part of prepare/execute CREATE FUNCTION test3() RETURNS void -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ rv = plpy.execute("SELECT 'aa=>bb, cc=>NULL'::hstore AS col1") @@ -103,7 +103,7 @@ INSERT INTO test1 VALUES (1, 'aa=>bb, cc=>NULL'); SELECT * FROM test1; CREATE FUNCTION test4() RETURNS trigger -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE hstore AS $$ assert(TD["new"] == {'a': 1, 'b': {'aa': 'bb', 'cc': None}}) diff --git a/contrib/jsonb_plpython/.gitignore b/contrib/jsonb_plpython/.gitignore index ce6fab94a0..5dcb3ff972 100644 --- a/contrib/jsonb_plpython/.gitignore +++ b/contrib/jsonb_plpython/.gitignore @@ -1,6 +1,4 @@ # Generated subdirectories -/expected/python3/ /log/ /results/ -/sql/python3/ /tmp_check/ diff --git a/contrib/jsonb_plpython/Makefile b/contrib/jsonb_plpython/Makefile index eaab5ca260..fea7bdfc00 100644 --- a/contrib/jsonb_plpython/Makefile +++ b/contrib/jsonb_plpython/Makefile @@ -12,7 +12,6 @@ EXTENSION = jsonb_plpython3u DATA = jsonb_plpython3u--1.0.sql REGRESS = jsonb_plpython -REGRESS_PLPYTHON3_MANGLE := $(REGRESS) ifdef USE_PGXS PG_CONFIG = pg_config @@ -33,9 +32,3 @@ else rpathdir = $(python_libdir) SHLIB_LINK += $(python_libspec) $(python_additional_libs) endif - -ifeq ($(python_majorversion),2) -REGRESS_OPTS += --load-extension=plpythonu --load-extension=jsonb_plpythonu -endif - -include $(top_srcdir)/src/pl/plpython/regress-python3-mangle.mk diff --git a/contrib/jsonb_plpython/expected/jsonb_plpython.out b/contrib/jsonb_plpython/expected/jsonb_plpython.out index b491fe9cc6..cac963de69 100644 --- a/contrib/jsonb_plpython/expected/jsonb_plpython.out +++ b/contrib/jsonb_plpython/expected/jsonb_plpython.out @@ -1,8 +1,8 @@ -CREATE EXTENSION jsonb_plpython2u CASCADE; -NOTICE: installing required extension "plpython2u" +CREATE EXTENSION jsonb_plpython3u CASCADE; +NOTICE: installing required extension "plpython3u" -- test jsonb -> python dict CREATE FUNCTION test1(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, dict) @@ -18,7 +18,7 @@ SELECT test1('{"a": 1, "c": "NULL"}'::jsonb); -- test jsonb -> python dict -- complex dict with dicts as value CREATE FUNCTION test1complex(val jsonb) RETURNS int -LANGUAGE plpython2u +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, dict) @@ -34,7 +34,7 @@ SELECT test1complex('{"d": {"d": 1}}'::jsonb); -- test jsonb[] -> python dict -- dict with array as value CREATE FUNCTION test1arr(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, dict) @@ -50,7 +50,7 @@ SELECT test1arr('{"d":[12, 1]}'::jsonb); -- test jsonb[] -> python list -- simple list CREATE FUNCTION test2arr(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, list) @@ -66,7 +66,7 @@ SELECT test2arr('[12, 1]'::jsonb); -- test jsonb[] -> python list -- array of dicts CREATE FUNCTION test3arr(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, list) @@ -81,7 +81,7 @@ SELECT test3arr('[{"a": 1, "b": 2}, {"c": 3,"d": 4}]'::jsonb); -- test jsonb int -> python int CREATE FUNCTION test1int(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert(val == 1) @@ -95,7 +95,7 @@ SELECT test1int('1'::jsonb); -- test jsonb string -> python string CREATE FUNCTION test1string(val jsonb) RETURNS text -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert(val == "a") @@ -109,7 +109,7 @@ SELECT test1string('"a"'::jsonb); -- test jsonb null -> python None CREATE FUNCTION test1null(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert(val == None) @@ -123,7 +123,7 @@ SELECT test1null('null'::jsonb); -- test python -> jsonb CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb as $$ return val @@ -238,7 +238,7 @@ SELECT roundtrip('["string", "string2"]'::jsonb); -- complex numbers -> jsonb CREATE FUNCTION testComplexNumbers() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = 1 + 2j @@ -250,7 +250,7 @@ CONTEXT: while creating return value PL/Python function "testcomplexnumbers" -- range -> jsonb CREATE FUNCTION testRange() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = range(3) @@ -264,7 +264,7 @@ SELECT testRange(); -- 0xff -> jsonb CREATE FUNCTION testDecimal() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = 0xff @@ -278,7 +278,7 @@ SELECT testDecimal(); -- tuple -> jsonb CREATE FUNCTION testTuple() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = (1, 'String', None) @@ -292,7 +292,7 @@ SELECT testTuple(); -- interesting dict -> jsonb CREATE FUNCTION test_dict1() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = {"a": 1, None: 2, 33: 3} diff --git a/contrib/jsonb_plpython/sql/jsonb_plpython.sql b/contrib/jsonb_plpython/sql/jsonb_plpython.sql index 2ee1bca0a9..29dc33279a 100644 --- a/contrib/jsonb_plpython/sql/jsonb_plpython.sql +++ b/contrib/jsonb_plpython/sql/jsonb_plpython.sql @@ -1,8 +1,8 @@ -CREATE EXTENSION jsonb_plpython2u CASCADE; +CREATE EXTENSION jsonb_plpython3u CASCADE; -- test jsonb -> python dict CREATE FUNCTION test1(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, dict) @@ -15,7 +15,7 @@ SELECT test1('{"a": 1, "c": "NULL"}'::jsonb); -- test jsonb -> python dict -- complex dict with dicts as value CREATE FUNCTION test1complex(val jsonb) RETURNS int -LANGUAGE plpython2u +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, dict) @@ -29,7 +29,7 @@ SELECT test1complex('{"d": {"d": 1}}'::jsonb); -- test jsonb[] -> python dict -- dict with array as value CREATE FUNCTION test1arr(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, dict) @@ -42,7 +42,7 @@ SELECT test1arr('{"d":[12, 1]}'::jsonb); -- test jsonb[] -> python list -- simple list CREATE FUNCTION test2arr(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, list) @@ -55,7 +55,7 @@ SELECT test2arr('[12, 1]'::jsonb); -- test jsonb[] -> python list -- array of dicts CREATE FUNCTION test3arr(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert isinstance(val, list) @@ -67,7 +67,7 @@ SELECT test3arr('[{"a": 1, "b": 2}, {"c": 3,"d": 4}]'::jsonb); -- test jsonb int -> python int CREATE FUNCTION test1int(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert(val == 1) @@ -78,7 +78,7 @@ SELECT test1int('1'::jsonb); -- test jsonb string -> python string CREATE FUNCTION test1string(val jsonb) RETURNS text -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert(val == "a") @@ -89,7 +89,7 @@ SELECT test1string('"a"'::jsonb); -- test jsonb null -> python None CREATE FUNCTION test1null(val jsonb) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ assert(val == None) @@ -100,7 +100,7 @@ SELECT test1null('null'::jsonb); -- test python -> jsonb CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb as $$ return val @@ -129,7 +129,7 @@ SELECT roundtrip('["string", "string2"]'::jsonb); -- complex numbers -> jsonb CREATE FUNCTION testComplexNumbers() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = 1 + 2j @@ -140,7 +140,7 @@ SELECT testComplexNumbers(); -- range -> jsonb CREATE FUNCTION testRange() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = range(3) @@ -151,7 +151,7 @@ SELECT testRange(); -- 0xff -> jsonb CREATE FUNCTION testDecimal() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = 0xff @@ -162,7 +162,7 @@ SELECT testDecimal(); -- tuple -> jsonb CREATE FUNCTION testTuple() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = (1, 'String', None) @@ -173,7 +173,7 @@ SELECT testTuple(); -- interesting dict -> jsonb CREATE FUNCTION test_dict1() RETURNS jsonb -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE jsonb AS $$ x = {"a": 1, None: 2, 33: 3} diff --git a/contrib/ltree_plpython/.gitignore b/contrib/ltree_plpython/.gitignore index ce6fab94a0..5dcb3ff972 100644 --- a/contrib/ltree_plpython/.gitignore +++ b/contrib/ltree_plpython/.gitignore @@ -1,6 +1,4 @@ # Generated subdirectories -/expected/python3/ /log/ /results/ -/sql/python3/ /tmp_check/ diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile index 0bccb111e6..406d2789c9 100644 --- a/contrib/ltree_plpython/Makefile +++ b/contrib/ltree_plpython/Makefile @@ -10,7 +10,6 @@ EXTENSION = ltree_plpython3u DATA = ltree_plpython3u--1.0.sql REGRESS = ltree_plpython -REGRESS_PLPYTHON3_MANGLE := $(REGRESS) PG_CPPFLAGS = $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' @@ -37,9 +36,4 @@ SHLIB_LINK += $(python_libspec) $(python_additional_libs) endif REGRESS_OPTS += --load-extension=ltree -ifeq ($(python_majorversion),2) -REGRESS_OPTS += --load-extension=plpythonu --load-extension=ltree_plpythonu -endif EXTRA_INSTALL += contrib/ltree - -include $(top_srcdir)/src/pl/plpython/regress-python3-mangle.mk diff --git a/contrib/ltree_plpython/expected/ltree_plpython.out b/contrib/ltree_plpython/expected/ltree_plpython.out index f28897fee4..bd32541fdb 100644 --- a/contrib/ltree_plpython/expected/ltree_plpython.out +++ b/contrib/ltree_plpython/expected/ltree_plpython.out @@ -1,7 +1,7 @@ -CREATE EXTENSION ltree_plpython2u CASCADE; -NOTICE: installing required extension "plpython2u" +CREATE EXTENSION ltree_plpython3u CASCADE; +NOTICE: installing required extension "plpython3u" CREATE FUNCTION test1(val ltree) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE ltree AS $$ plpy.info(repr(val)) @@ -15,7 +15,7 @@ INFO: ['aa', 'bb', 'cc'] (1 row) CREATE FUNCTION test1n(val ltree) RETURNS int -LANGUAGE plpython2u +LANGUAGE plpython3u TRANSFORM FOR TYPE ltree AS $$ plpy.info(repr(val)) @@ -29,7 +29,7 @@ INFO: ['aa', 'bb', 'cc'] (1 row) CREATE FUNCTION test2() RETURNS ltree -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE ltree AS $$ return ['foo', 'bar', 'baz'] diff --git a/contrib/ltree_plpython/sql/ltree_plpython.sql b/contrib/ltree_plpython/sql/ltree_plpython.sql index 210f5428a5..0b8d28399a 100644 --- a/contrib/ltree_plpython/sql/ltree_plpython.sql +++ b/contrib/ltree_plpython/sql/ltree_plpython.sql @@ -1,8 +1,8 @@ -CREATE EXTENSION ltree_plpython2u CASCADE; +CREATE EXTENSION ltree_plpython3u CASCADE; CREATE FUNCTION test1(val ltree) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE ltree AS $$ plpy.info(repr(val)) @@ -13,7 +13,7 @@ SELECT test1('aa.bb.cc'::ltree); CREATE FUNCTION test1n(val ltree) RETURNS int -LANGUAGE plpython2u +LANGUAGE plpython3u TRANSFORM FOR TYPE ltree AS $$ plpy.info(repr(val)) @@ -24,7 +24,7 @@ SELECT test1n('aa.bb.cc'::ltree); CREATE FUNCTION test2() RETURNS ltree -LANGUAGE plpythonu +LANGUAGE plpython3u TRANSFORM FOR TYPE ltree AS $$ return ['foo', 'bar', 'baz'] diff --git a/src/pl/plpython/.gitignore b/src/pl/plpython/.gitignore index 70c08db323..07bee6a29c 100644 --- a/src/pl/plpython/.gitignore +++ b/src/pl/plpython/.gitignore @@ -1,7 +1,5 @@ /spiexceptions.h # Generated subdirectories -/expected/python3/ /log/ /results/ -/sql/python3/ /tmp_check/ diff --git a/src/pl/plpython/Makefile b/src/pl/plpython/Makefile index a83ae8865c..0d6f74de71 100644 --- a/src/pl/plpython/Makefile +++ b/src/pl/plpython/Makefile @@ -35,9 +35,6 @@ OBJS = \ plpy_util.o DATA = $(NAME)u.control $(NAME)u--1.0.sql -ifeq ($(python_majorversion),2) -DATA += plpythonu.control plpythonu--1.0.sql -endif # header files to install - it's not clear which of these might be needed # so install them all. @@ -77,11 +74,6 @@ endif # win32 SHLIB_LINK = $(python_libspec) $(python_additional_libs) $(filter -lintl,$(LIBS)) REGRESS_OPTS = --dbname=$(PL_TESTDB) -# Only load plpythonu with Python 2. The test files themselves load -# the versioned language plpython(2|3)u. -ifeq ($(python_majorversion),2) -REGRESS_OPTS += --load-extension=plpythonu -endif REGRESS = \ plpython_schema \ @@ -108,8 +100,6 @@ REGRESS = \ plpython_transaction \ plpython_drop -REGRESS_PLPYTHON3_MANGLE := $(REGRESS) - include $(top_srcdir)/src/Makefile.shlib all: all-lib @@ -127,7 +117,6 @@ uninstall: uninstall-lib uninstall-data install-data: installdirs $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/' $(INSTALL_DATA) $(addprefix $(srcdir)/, $(INCS)) '$(DESTDIR)$(includedir_server)' - $(INSTALL_DATA) $(srcdir)/regress-python3-mangle.mk '$(DESTDIR)$(pgxsdir)/src/pl/plpython' uninstall-data: rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA))) @@ -136,9 +125,6 @@ uninstall-data: .PHONY: install-data uninstall-data -include $(srcdir)/regress-python3-mangle.mk - - check: submake-pg-regress $(pg_regress_check) $(REGRESS_OPTS) $(REGRESS) diff --git a/src/pl/plpython/expected/plpython_call.out b/src/pl/plpython/expected/plpython_call.out index 55e1027246..4c0690067a 100644 --- a/src/pl/plpython/expected/plpython_call.out +++ b/src/pl/plpython/expected/plpython_call.out @@ -2,14 +2,14 @@ -- Tests for procedures / CALL syntax -- CREATE PROCEDURE test_proc1() -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ pass $$; CALL test_proc1(); -- error: can't return non-None CREATE PROCEDURE test_proc2() -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ return 5 $$; @@ -18,7 +18,7 @@ ERROR: PL/Python procedure did not return None CONTEXT: PL/Python procedure "test_proc2" CREATE TABLE test1 (a int); CREATE PROCEDURE test_proc3(x int) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plpy.execute("INSERT INTO test1 VALUES (%s)" % x) $$; @@ -31,7 +31,7 @@ SELECT * FROM test1; -- output arguments CREATE PROCEDURE test_proc5(INOUT a text) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ return [a + '+' + a] $$; @@ -42,7 +42,7 @@ CALL test_proc5('abc'); (1 row) CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ return (b * a, c * a) $$; @@ -54,7 +54,7 @@ CALL test_proc6(2, 3, 4); -- OUT parameters CREATE PROCEDURE test_proc9(IN a int, OUT b int) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plpy.notice("a: %s" % (a)) return (a * 2,) diff --git a/src/pl/plpython/expected/plpython_composite.out b/src/pl/plpython/expected/plpython_composite.out index af80192334..bb101e07d5 100644 --- a/src/pl/plpython/expected/plpython_composite.out +++ b/src/pl/plpython/expected/plpython_composite.out @@ -1,6 +1,6 @@ CREATE FUNCTION multiout_simple(OUT i integer, OUT j integer) AS $$ return (1, 2) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT multiout_simple(); multiout_simple ----------------- @@ -27,7 +27,7 @@ SELECT (multiout_simple()).j + 3; CREATE FUNCTION multiout_simple_setof(n integer = 1, OUT integer, OUT integer) RETURNS SETOF record AS $$ return [(1, 2)] * n -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT multiout_simple_setof(); multiout_simple_setof ----------------------- @@ -67,7 +67,7 @@ elif typ == 'obj': return type_record elif typ == 'str': return "('%s',%r)" % (first, second) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_record_as('dict', 'foo', 1, 'f'); first | second -------+-------- @@ -237,7 +237,7 @@ for i in range(n): power = 2 ** i length = plpy.execute("select length('%d')" % power)[0]['length'] yield power, length -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_setof(3); power_of_2 | length ------------+-------- @@ -260,7 +260,7 @@ CREATE FUNCTION multiout_return_table() RETURNS TABLE (x integer, y text) AS $$ return [{'x': 4, 'y' :'four'}, {'x': 7, 'y' :'seven'}, {'x': 0, 'y' :'zero'}] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_return_table(); x | y ---+------- @@ -273,7 +273,7 @@ CREATE FUNCTION multiout_array(OUT integer[], OUT text) RETURNS SETOF record AS yield [[1], 'a'] yield [[1,2], 'b'] yield [[1,2,3], None] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_array(); column1 | column2 ---------+--------- @@ -284,11 +284,11 @@ SELECT * FROM multiout_array(); CREATE FUNCTION singleout_composite(OUT type_record) AS $$ return {'first': 1, 'second': 2} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION multiout_composite(OUT type_record) RETURNS SETOF type_record AS $$ return [{'first': 1, 'second': 2}, {'first': 3, 'second': 4 }] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM singleout_composite(); first | second -------+-------- @@ -305,7 +305,7 @@ SELECT * FROM multiout_composite(); -- composite OUT parameters in functions returning RECORD not supported yet CREATE FUNCTION multiout_composite(INOUT n integer, OUT type_record) AS $$ return (n, (n * 2, n * 3)) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION multiout_table_type_setof(typ text, returnnull boolean, INOUT n integer, OUT table_record) RETURNS SETOF record AS $$ if returnnull: d = None @@ -323,7 +323,7 @@ elif typ == 'str': d = "(%r,%r)" % (n * 2, n * 3) for i in range(n): yield (i, d) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_composite(2); n | column2 ---+--------- @@ -438,7 +438,7 @@ CREATE TABLE changing ( CREATE FUNCTION changing_test(OUT n integer, OUT changing) RETURNS SETOF record AS $$ return [(1, {'i': 1, 'j': 2}), (1, (3, 4))] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM changing_test(); n | column2 ---+--------- @@ -474,7 +474,7 @@ yield {'tab': [('first', 1), ('second', 2)], yield {'tab': [('first', 1), ('second', 2)], 'typ': [{'first': 'third', 'second': 3}, {'first': 'fourth', 'second': 4}]} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM composite_types_table(); tab | typ ----------------------------+---------------------------- @@ -486,7 +486,7 @@ SELECT * FROM composite_types_table(); -- check what happens if the output record descriptor changes CREATE FUNCTION return_record(t text) RETURNS record AS $$ return {'t': t, 'val': 10} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM return_record('abc') AS r(t text, val integer); t | val -----+----- @@ -525,7 +525,7 @@ SELECT * FROM return_record('999') AS r(val text, t integer); CREATE FUNCTION return_record_2(t text) RETURNS record AS $$ return {'v1':1,'v2':2,t:3} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM return_record_2('v3') AS (v3 int, v2 int, v1 int); v3 | v2 | v1 ----+----+---- @@ -572,7 +572,7 @@ SELECT * FROM return_record_2('v3') AS (v1 int, v2 int, v3 int); -- multi-dimensional array of composite types. CREATE FUNCTION composite_type_as_list() RETURNS type_record[] AS $$ return [[('first', 1), ('second', 1)], [('first', 2), ('second', 2)], [('first', 3), ('second', 3)]]; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM composite_type_as_list(); composite_type_as_list ------------------------------------------------------------------------------------ @@ -585,7 +585,7 @@ SELECT * FROM composite_type_as_list(); -- on the issue. CREATE FUNCTION composite_type_as_list_broken() RETURNS type_record[] AS $$ return [['first', 1]]; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM composite_type_as_list_broken(); ERROR: malformed record literal: "first" DETAIL: Missing left parenthesis. diff --git a/src/pl/plpython/expected/plpython_do.out b/src/pl/plpython/expected/plpython_do.out index e300530e03..d131a4c0ed 100644 --- a/src/pl/plpython/expected/plpython_do.out +++ b/src/pl/plpython/expected/plpython_do.out @@ -1,8 +1,6 @@ -DO $$ plpy.notice("This is plpythonu.") $$ LANGUAGE plpythonu; -NOTICE: This is plpythonu. -DO $$ plpy.notice("This is plpython2u.") $$ LANGUAGE plpython2u; -NOTICE: This is plpython2u. -DO $$ raise Exception("error test") $$ LANGUAGE plpythonu; +DO $$ plpy.notice("This is plpython3u.") $$ LANGUAGE plpython3u; +NOTICE: This is plpython3u. +DO $$ raise Exception("error test") $$ LANGUAGE plpython3u; ERROR: Exception: error test CONTEXT: Traceback (most recent call last): PL/Python anonymous code block, line 1, in diff --git a/src/pl/plpython/expected/plpython_drop.out b/src/pl/plpython/expected/plpython_drop.out index a0e3b5c4ef..97bb54a55e 100644 --- a/src/pl/plpython/expected/plpython_drop.out +++ b/src/pl/plpython/expected/plpython_drop.out @@ -2,5 +2,4 @@ -- For paranoia's sake, don't leave an untrusted language sitting around -- SET client_min_messages = WARNING; -DROP EXTENSION plpythonu CASCADE; -DROP EXTENSION IF EXISTS plpython2u CASCADE; +DROP EXTENSION plpython3u CASCADE; diff --git a/src/pl/plpython/expected/plpython_ereport.out b/src/pl/plpython/expected/plpython_ereport.out index b73bfff511..74dcc419ce 100644 --- a/src/pl/plpython/expected/plpython_ereport.out +++ b/src/pl/plpython/expected/plpython_ereport.out @@ -17,7 +17,7 @@ plpy.info('This is message text.', plpy.notice('notice', detail='some detail') plpy.warning('warning', detail='some detail') plpy.error('stop on error', detail='some detail', hint='some hint') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT elog_test(); INFO: info DETAIL: some detail @@ -38,42 +38,42 @@ CONTEXT: Traceback (most recent call last): PL/Python function "elog_test", line 18, in plpy.error('stop on error', detail='some detail', hint='some hint') PL/Python function "elog_test" -DO $$ plpy.info('other types', detail=(10, 20)) $$ LANGUAGE plpythonu; +DO $$ plpy.info('other types', detail=(10, 20)) $$ LANGUAGE plpython3u; INFO: other types DETAIL: (10, 20) DO $$ import time; from datetime import date plpy.info('other types', detail=date(2016, 2, 26)) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; INFO: other types DETAIL: 2016-02-26 DO $$ basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana'] plpy.info('other types', detail=basket) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; INFO: other types DETAIL: ['apple', 'orange', 'apple', 'pear', 'orange', 'banana'] -- should fail -DO $$ plpy.info('wrong sqlstate', sqlstate='54444A') $$ LANGUAGE plpythonu; +DO $$ plpy.info('wrong sqlstate', sqlstate='54444A') $$ LANGUAGE plpython3u; ERROR: ValueError: invalid SQLSTATE code CONTEXT: Traceback (most recent call last): PL/Python anonymous code block, line 1, in plpy.info('wrong sqlstate', sqlstate='54444A') PL/Python anonymous code block -DO $$ plpy.info('unsupported argument', blabla='fooboo') $$ LANGUAGE plpythonu; +DO $$ plpy.info('unsupported argument', blabla='fooboo') $$ LANGUAGE plpython3u; ERROR: TypeError: 'blabla' is an invalid keyword argument for this function CONTEXT: Traceback (most recent call last): PL/Python anonymous code block, line 1, in plpy.info('unsupported argument', blabla='fooboo') PL/Python anonymous code block -DO $$ plpy.info('first message', message='second message') $$ LANGUAGE plpythonu; +DO $$ plpy.info('first message', message='second message') $$ LANGUAGE plpython3u; ERROR: TypeError: argument 'message' given by name and position CONTEXT: Traceback (most recent call last): PL/Python anonymous code block, line 1, in plpy.info('first message', message='second message') PL/Python anonymous code block -DO $$ plpy.info('first message', 'second message', message='third message') $$ LANGUAGE plpythonu; +DO $$ plpy.info('first message', 'second message', message='third message') $$ LANGUAGE plpython3u; ERROR: TypeError: argument 'message' given by name and position CONTEXT: Traceback (most recent call last): PL/Python anonymous code block, line 1, in @@ -96,7 +96,7 @@ kwargs = { } # ignore None values plpy.error(**dict((k, v) for k, v in iter(kwargs.items()) if v)) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT raise_exception('hello', 'world'); ERROR: plpy.Error: hello DETAIL: world @@ -180,26 +180,35 @@ END; $$; NOTICE: handled exception DETAIL: message:(plpy.Error: message text), detail:(detail text), hint: (hint text), sqlstate: (XX555), schema_name:(schema text), table_name:(table text), column_name:(column text), datatype_name:(datatype text), constraint_name:(constraint text) --- The displayed context is different between Python2 and Python3, --- but that's not important for this test. -\set SHOW_CONTEXT never DO $$ try: plpy.execute("select raise_exception(_message => 'my message', _sqlstate => 'XX987', _hint => 'some hint', _table_name => 'users_tab', _datatype_name => 'user_type')") except Exception as e: plpy.info(e.spidata) raise e -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; INFO: (119577128, None, 'some hint', None, 0, None, 'users_tab', None, 'user_type', None) ERROR: plpy.SPIError: plpy.Error: my message HINT: some hint +CONTEXT: Traceback (most recent call last): + PL/Python anonymous code block, line 6, in + raise e + PL/Python anonymous code block, line 3, in __plpython_inline_block + plpy.execute("select raise_exception(_message => 'my message', _sqlstate => 'XX987', _hint => 'some hint', _table_name => 'users_tab', _datatype_name => 'user_type')") +PL/Python anonymous code block DO $$ try: plpy.error(message = 'my message', sqlstate = 'XX987', hint = 'some hint', table_name = 'users_tab', datatype_name = 'user_type') except Exception as e: plpy.info('sqlstate: %s, hint: %s, table_name: %s, datatype_name: %s' % (e.sqlstate, e.hint, e.table_name, e.datatype_name)) raise e -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; INFO: sqlstate: XX987, hint: some hint, table_name: users_tab, datatype_name: user_type ERROR: plpy.Error: my message HINT: some hint +CONTEXT: Traceback (most recent call last): + PL/Python anonymous code block, line 6, in + raise e + PL/Python anonymous code block, line 3, in __plpython_inline_block + plpy.error(message = 'my message', sqlstate = 'XX987', hint = 'some hint', table_name = 'users_tab', datatype_name = 'user_type') +PL/Python anonymous code block diff --git a/src/pl/plpython/expected/plpython_error.out b/src/pl/plpython/expected/plpython_error.out index b2f8fe83eb..7fe864a1a5 100644 --- a/src/pl/plpython/expected/plpython_error.out +++ b/src/pl/plpython/expected/plpython_error.out @@ -6,7 +6,7 @@ CREATE FUNCTION python_syntax_error() RETURNS text AS '.syntaxerror' - LANGUAGE plpythonu; + LANGUAGE plpython3u; ERROR: could not compile PL/Python function "python_syntax_error" DETAIL: SyntaxError: invalid syntax (, line 2) /* With check_function_bodies = false the function should get defined @@ -16,7 +16,7 @@ SET check_function_bodies = false; CREATE FUNCTION python_syntax_error() RETURNS text AS '.syntaxerror' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT python_syntax_error(); ERROR: could not compile PL/Python function "python_syntax_error" DETAIL: SyntaxError: invalid syntax (, line 2) @@ -30,7 +30,7 @@ RESET check_function_bodies; CREATE FUNCTION sql_syntax_error() RETURNS text AS 'plpy.execute("syntax error")' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT sql_syntax_error(); ERROR: spiexceptions.SyntaxError: syntax error at or near "syntax" LINE 1: syntax error @@ -45,7 +45,7 @@ PL/Python function "sql_syntax_error" CREATE FUNCTION exception_index_invalid(text) RETURNS text AS 'return args[1]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT exception_index_invalid('test'); ERROR: IndexError: list index out of range CONTEXT: Traceback (most recent call last): @@ -58,7 +58,7 @@ CREATE FUNCTION exception_index_invalid_nested() RETURNS text AS 'rv = plpy.execute("SELECT test5(''foo'')") return rv[0]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT exception_index_invalid_nested(); ERROR: spiexceptions.UndefinedFunction: function test5(unknown) does not exist LINE 1: SELECT test5('foo') @@ -81,7 +81,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT invalid_type_uncaught('rick'); ERROR: spiexceptions.UndefinedObject: type "test" does not exist CONTEXT: Traceback (most recent call last): @@ -105,7 +105,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT invalid_type_caught('rick'); NOTICE: type "test" does not exist invalid_type_caught @@ -129,7 +129,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT invalid_type_reraised('rick'); ERROR: plpy.Error: type "test" does not exist CONTEXT: Traceback (most recent call last): @@ -147,7 +147,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT valid_type('rick'); valid_type ------------ @@ -170,7 +170,7 @@ def fun3(): fun3() return "not reached" ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT nested_error(); ERROR: plpy.Error: boom CONTEXT: Traceback (most recent call last): @@ -199,7 +199,7 @@ def fun3(): fun3() return "not reached" ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT nested_error_raise(); ERROR: plpy.Error: boom CONTEXT: Traceback (most recent call last): @@ -228,7 +228,7 @@ def fun3(): fun3() return "you''ve been warned" ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT nested_warning(); WARNING: boom nested_warning @@ -241,9 +241,9 @@ WARNING: boom CREATE FUNCTION toplevel_attribute_error() RETURNS void AS $$ plpy.nonexistent -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT toplevel_attribute_error(); -ERROR: AttributeError: 'module' object has no attribute 'nonexistent' +ERROR: AttributeError: module 'plpy' has no attribute 'nonexistent' CONTEXT: Traceback (most recent call last): PL/Python function "toplevel_attribute_error", line 2, in plpy.nonexistent @@ -261,7 +261,7 @@ def third(): plpy.execute("select sql_error()") first() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE OR REPLACE FUNCTION sql_error() RETURNS void AS $$ begin select 1/0; @@ -274,7 +274,7 @@ end $$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION sql_from_python_error() RETURNS void AS $$ plpy.execute("select sql_error()") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT python_traceback(); ERROR: spiexceptions.DivisionByZero: division by zero CONTEXT: Traceback (most recent call last): @@ -325,7 +325,7 @@ except spiexceptions.NotNullViolation as e: plpy.notice("Violated the NOT NULL constraint, sqlstate %s" % e.sqlstate) except spiexceptions.UniqueViolation as e: plpy.notice("Violated the UNIQUE constraint, sqlstate %s" % e.sqlstate) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT specific_exception(2); specific_exception -------------------- @@ -351,7 +351,7 @@ NOTICE: Violated the UNIQUE constraint, sqlstate 23505 CREATE FUNCTION python_unique_violation() RETURNS void AS $$ plpy.execute("insert into specific values (1)") plpy.execute("insert into specific values (1)") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION catch_python_unique_violation() RETURNS text AS $$ begin begin @@ -374,7 +374,7 @@ CREATE FUNCTION manual_subxact() RETURNS void AS $$ plpy.execute("savepoint save") plpy.execute("create table foo(x integer)") plpy.execute("rollback to save") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT manual_subxact(); ERROR: plpy.SPIError: SPI_execute failed: SPI_ERROR_TRANSACTION CONTEXT: Traceback (most recent call last): @@ -389,7 +389,7 @@ rollback = plpy.prepare("rollback to save") plpy.execute(save) plpy.execute("create table foo(x integer)") plpy.execute(rollback) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT manual_subxact_prepared(); ERROR: plpy.SPIError: SPI_execute_plan failed: SPI_ERROR_TRANSACTION CONTEXT: Traceback (most recent call last): @@ -400,7 +400,7 @@ PL/Python function "manual_subxact_prepared" */ CREATE FUNCTION plpy_raise_spiexception() RETURNS void AS $$ raise plpy.spiexceptions.DivisionByZero() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; DO $$ BEGIN SELECT plpy_raise_spiexception(); @@ -414,7 +414,7 @@ CREATE FUNCTION plpy_raise_spiexception_override() RETURNS void AS $$ exc = plpy.spiexceptions.DivisionByZero() exc.sqlstate = 'SILLY' raise exc -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; DO $$ BEGIN SELECT plpy_raise_spiexception_override(); @@ -425,18 +425,18 @@ $$ LANGUAGE plpgsql; /* test the context stack trace for nested execution levels */ CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ -plpy.execute("DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$") +plpy.execute("DO LANGUAGE plpython3u $x$ plpy.notice('inside DO') $x$") return 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ plpy.execute("SELECT notice_innerfunc()") return 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; \set SHOW_CONTEXT always SELECT notice_outerfunc(); NOTICE: inside DO CONTEXT: PL/Python anonymous code block -SQL statement "DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$" +SQL statement "DO LANGUAGE plpython3u $x$ plpy.notice('inside DO') $x$" PL/Python function "notice_innerfunc" SQL statement "SELECT notice_innerfunc()" PL/Python function "notice_outerfunc" diff --git a/src/pl/plpython/expected/plpython_error_5.out b/src/pl/plpython/expected/plpython_error_5.out deleted file mode 100644 index bc66ab5534..0000000000 --- a/src/pl/plpython/expected/plpython_error_5.out +++ /dev/null @@ -1,447 +0,0 @@ --- test error handling, i forgot to restore Warn_restart in --- the trigger handler once. the errors and subsequent core dump were --- interesting. -/* Flat out Python syntax error - */ -CREATE FUNCTION python_syntax_error() RETURNS text - AS -'.syntaxerror' - LANGUAGE plpython3u; -ERROR: could not compile PL/Python function "python_syntax_error" -DETAIL: SyntaxError: invalid syntax (, line 2) -/* With check_function_bodies = false the function should get defined - * and the error reported when called - */ -SET check_function_bodies = false; -CREATE FUNCTION python_syntax_error() RETURNS text - AS -'.syntaxerror' - LANGUAGE plpython3u; -SELECT python_syntax_error(); -ERROR: could not compile PL/Python function "python_syntax_error" -DETAIL: SyntaxError: invalid syntax (, line 2) -/* Run the function twice to check if the hashtable entry gets cleaned up */ -SELECT python_syntax_error(); -ERROR: could not compile PL/Python function "python_syntax_error" -DETAIL: SyntaxError: invalid syntax (, line 2) -RESET check_function_bodies; -/* Flat out syntax error - */ -CREATE FUNCTION sql_syntax_error() RETURNS text - AS -'plpy.execute("syntax error")' - LANGUAGE plpython3u; -SELECT sql_syntax_error(); -ERROR: spiexceptions.SyntaxError: syntax error at or near "syntax" -LINE 1: syntax error - ^ -QUERY: syntax error -CONTEXT: Traceback (most recent call last): - PL/Python function "sql_syntax_error", line 1, in - plpy.execute("syntax error") -PL/Python function "sql_syntax_error" -/* check the handling of uncaught python exceptions - */ -CREATE FUNCTION exception_index_invalid(text) RETURNS text - AS -'return args[1]' - LANGUAGE plpython3u; -SELECT exception_index_invalid('test'); -ERROR: IndexError: list index out of range -CONTEXT: Traceback (most recent call last): - PL/Python function "exception_index_invalid", line 1, in - return args[1] -PL/Python function "exception_index_invalid" -/* check handling of nested exceptions - */ -CREATE FUNCTION exception_index_invalid_nested() RETURNS text - AS -'rv = plpy.execute("SELECT test5(''foo'')") -return rv[0]' - LANGUAGE plpython3u; -SELECT exception_index_invalid_nested(); -ERROR: spiexceptions.UndefinedFunction: function test5(unknown) does not exist -LINE 1: SELECT test5('foo') - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -QUERY: SELECT test5('foo') -CONTEXT: Traceback (most recent call last): - PL/Python function "exception_index_invalid_nested", line 1, in - rv = plpy.execute("SELECT test5('foo')") -PL/Python function "exception_index_invalid_nested" -/* a typo - */ -CREATE FUNCTION invalid_type_uncaught(a text) RETURNS text - AS -'if "plan" not in SD: - q = "SELECT fname FROM users WHERE lname = $1" - SD["plan"] = plpy.prepare(q, [ "test" ]) -rv = plpy.execute(SD["plan"], [ a ]) -if len(rv): - return rv[0]["fname"] -return None -' - LANGUAGE plpython3u; -SELECT invalid_type_uncaught('rick'); -ERROR: spiexceptions.UndefinedObject: type "test" does not exist -CONTEXT: Traceback (most recent call last): - PL/Python function "invalid_type_uncaught", line 3, in - SD["plan"] = plpy.prepare(q, [ "test" ]) -PL/Python function "invalid_type_uncaught" -/* for what it's worth catch the exception generated by - * the typo, and return None - */ -CREATE FUNCTION invalid_type_caught(a text) RETURNS text - AS -'if "plan" not in SD: - q = "SELECT fname FROM users WHERE lname = $1" - try: - SD["plan"] = plpy.prepare(q, [ "test" ]) - except plpy.SPIError as ex: - plpy.notice(str(ex)) - return None -rv = plpy.execute(SD["plan"], [ a ]) -if len(rv): - return rv[0]["fname"] -return None -' - LANGUAGE plpython3u; -SELECT invalid_type_caught('rick'); -NOTICE: type "test" does not exist - invalid_type_caught ---------------------- - -(1 row) - -/* for what it's worth catch the exception generated by - * the typo, and reraise it as a plain error - */ -CREATE FUNCTION invalid_type_reraised(a text) RETURNS text - AS -'if "plan" not in SD: - q = "SELECT fname FROM users WHERE lname = $1" - try: - SD["plan"] = plpy.prepare(q, [ "test" ]) - except plpy.SPIError as ex: - plpy.error(str(ex)) -rv = plpy.execute(SD["plan"], [ a ]) -if len(rv): - return rv[0]["fname"] -return None -' - LANGUAGE plpython3u; -SELECT invalid_type_reraised('rick'); -ERROR: plpy.Error: type "test" does not exist -CONTEXT: Traceback (most recent call last): - PL/Python function "invalid_type_reraised", line 6, in - plpy.error(str(ex)) -PL/Python function "invalid_type_reraised" -/* no typo no messing about - */ -CREATE FUNCTION valid_type(a text) RETURNS text - AS -'if "plan" not in SD: - SD["plan"] = plpy.prepare("SELECT fname FROM users WHERE lname = $1", [ "text" ]) -rv = plpy.execute(SD["plan"], [ a ]) -if len(rv): - return rv[0]["fname"] -return None -' - LANGUAGE plpython3u; -SELECT valid_type('rick'); - valid_type ------------- - -(1 row) - -/* error in nested functions to get a traceback -*/ -CREATE FUNCTION nested_error() RETURNS text - AS -'def fun1(): - plpy.error("boom") - -def fun2(): - fun1() - -def fun3(): - fun2() - -fun3() -return "not reached" -' - LANGUAGE plpython3u; -SELECT nested_error(); -ERROR: plpy.Error: boom -CONTEXT: Traceback (most recent call last): - PL/Python function "nested_error", line 10, in - fun3() - PL/Python function "nested_error", line 8, in fun3 - fun2() - PL/Python function "nested_error", line 5, in fun2 - fun1() - PL/Python function "nested_error", line 2, in fun1 - plpy.error("boom") -PL/Python function "nested_error" -/* raising plpy.Error is just like calling plpy.error -*/ -CREATE FUNCTION nested_error_raise() RETURNS text - AS -'def fun1(): - raise plpy.Error("boom") - -def fun2(): - fun1() - -def fun3(): - fun2() - -fun3() -return "not reached" -' - LANGUAGE plpython3u; -SELECT nested_error_raise(); -ERROR: plpy.Error: boom -CONTEXT: Traceback (most recent call last): - PL/Python function "nested_error_raise", line 10, in - fun3() - PL/Python function "nested_error_raise", line 8, in fun3 - fun2() - PL/Python function "nested_error_raise", line 5, in fun2 - fun1() - PL/Python function "nested_error_raise", line 2, in fun1 - raise plpy.Error("boom") -PL/Python function "nested_error_raise" -/* using plpy.warning should not produce a traceback -*/ -CREATE FUNCTION nested_warning() RETURNS text - AS -'def fun1(): - plpy.warning("boom") - -def fun2(): - fun1() - -def fun3(): - fun2() - -fun3() -return "you''ve been warned" -' - LANGUAGE plpython3u; -SELECT nested_warning(); -WARNING: boom - nested_warning --------------------- - you've been warned -(1 row) - -/* AttributeError at toplevel used to give segfaults with the traceback -*/ -CREATE FUNCTION toplevel_attribute_error() RETURNS void AS -$$ -plpy.nonexistent -$$ LANGUAGE plpython3u; -SELECT toplevel_attribute_error(); -ERROR: AttributeError: module 'plpy' has no attribute 'nonexistent' -CONTEXT: Traceback (most recent call last): - PL/Python function "toplevel_attribute_error", line 2, in - plpy.nonexistent -PL/Python function "toplevel_attribute_error" -/* Calling PL/Python functions from SQL and vice versa should not lose context. - */ -CREATE OR REPLACE FUNCTION python_traceback() RETURNS void AS $$ -def first(): - second() - -def second(): - third() - -def third(): - plpy.execute("select sql_error()") - -first() -$$ LANGUAGE plpython3u; -CREATE OR REPLACE FUNCTION sql_error() RETURNS void AS $$ -begin - select 1/0; -end -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION python_from_sql_error() RETURNS void AS $$ -begin - select python_traceback(); -end -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION sql_from_python_error() RETURNS void AS $$ -plpy.execute("select sql_error()") -$$ LANGUAGE plpython3u; -SELECT python_traceback(); -ERROR: spiexceptions.DivisionByZero: division by zero -CONTEXT: Traceback (most recent call last): - PL/Python function "python_traceback", line 11, in - first() - PL/Python function "python_traceback", line 3, in first - second() - PL/Python function "python_traceback", line 6, in second - third() - PL/Python function "python_traceback", line 9, in third - plpy.execute("select sql_error()") -PL/Python function "python_traceback" -SELECT sql_error(); -ERROR: division by zero -CONTEXT: SQL statement "select 1/0" -PL/pgSQL function sql_error() line 3 at SQL statement -SELECT python_from_sql_error(); -ERROR: spiexceptions.DivisionByZero: division by zero -CONTEXT: Traceback (most recent call last): - PL/Python function "python_traceback", line 11, in - first() - PL/Python function "python_traceback", line 3, in first - second() - PL/Python function "python_traceback", line 6, in second - third() - PL/Python function "python_traceback", line 9, in third - plpy.execute("select sql_error()") -PL/Python function "python_traceback" -SQL statement "select python_traceback()" -PL/pgSQL function python_from_sql_error() line 3 at SQL statement -SELECT sql_from_python_error(); -ERROR: spiexceptions.DivisionByZero: division by zero -CONTEXT: Traceback (most recent call last): - PL/Python function "sql_from_python_error", line 2, in - plpy.execute("select sql_error()") -PL/Python function "sql_from_python_error" -/* check catching specific types of exceptions - */ -CREATE TABLE specific ( - i integer PRIMARY KEY -); -CREATE FUNCTION specific_exception(i integer) RETURNS void AS -$$ -from plpy import spiexceptions -try: - plpy.execute("insert into specific values (%s)" % (i or "NULL")); -except spiexceptions.NotNullViolation as e: - plpy.notice("Violated the NOT NULL constraint, sqlstate %s" % e.sqlstate) -except spiexceptions.UniqueViolation as e: - plpy.notice("Violated the UNIQUE constraint, sqlstate %s" % e.sqlstate) -$$ LANGUAGE plpython3u; -SELECT specific_exception(2); - specific_exception --------------------- - -(1 row) - -SELECT specific_exception(NULL); -NOTICE: Violated the NOT NULL constraint, sqlstate 23502 - specific_exception --------------------- - -(1 row) - -SELECT specific_exception(2); -NOTICE: Violated the UNIQUE constraint, sqlstate 23505 - specific_exception --------------------- - -(1 row) - -/* SPI errors in PL/Python functions should preserve the SQLSTATE value - */ -CREATE FUNCTION python_unique_violation() RETURNS void AS $$ -plpy.execute("insert into specific values (1)") -plpy.execute("insert into specific values (1)") -$$ LANGUAGE plpython3u; -CREATE FUNCTION catch_python_unique_violation() RETURNS text AS $$ -begin - begin - perform python_unique_violation(); - exception when unique_violation then - return 'ok'; - end; - return 'not reached'; -end; -$$ language plpgsql; -SELECT catch_python_unique_violation(); - catch_python_unique_violation -------------------------------- - ok -(1 row) - -/* manually starting subtransactions - a bad idea - */ -CREATE FUNCTION manual_subxact() RETURNS void AS $$ -plpy.execute("savepoint save") -plpy.execute("create table foo(x integer)") -plpy.execute("rollback to save") -$$ LANGUAGE plpython3u; -SELECT manual_subxact(); -ERROR: plpy.SPIError: SPI_execute failed: SPI_ERROR_TRANSACTION -CONTEXT: Traceback (most recent call last): - PL/Python function "manual_subxact", line 2, in - plpy.execute("savepoint save") -PL/Python function "manual_subxact" -/* same for prepared plans - */ -CREATE FUNCTION manual_subxact_prepared() RETURNS void AS $$ -save = plpy.prepare("savepoint save") -rollback = plpy.prepare("rollback to save") -plpy.execute(save) -plpy.execute("create table foo(x integer)") -plpy.execute(rollback) -$$ LANGUAGE plpython3u; -SELECT manual_subxact_prepared(); -ERROR: plpy.SPIError: SPI_execute_plan failed: SPI_ERROR_TRANSACTION -CONTEXT: Traceback (most recent call last): - PL/Python function "manual_subxact_prepared", line 4, in - plpy.execute(save) -PL/Python function "manual_subxact_prepared" -/* raising plpy.spiexception.* from python code should preserve sqlstate - */ -CREATE FUNCTION plpy_raise_spiexception() RETURNS void AS $$ -raise plpy.spiexceptions.DivisionByZero() -$$ LANGUAGE plpython3u; -DO $$ -BEGIN - SELECT plpy_raise_spiexception(); -EXCEPTION WHEN division_by_zero THEN - -- NOOP -END -$$ LANGUAGE plpgsql; -/* setting a custom sqlstate should be handled - */ -CREATE FUNCTION plpy_raise_spiexception_override() RETURNS void AS $$ -exc = plpy.spiexceptions.DivisionByZero() -exc.sqlstate = 'SILLY' -raise exc -$$ LANGUAGE plpython3u; -DO $$ -BEGIN - SELECT plpy_raise_spiexception_override(); -EXCEPTION WHEN SQLSTATE 'SILLY' THEN - -- NOOP -END -$$ LANGUAGE plpgsql; -/* test the context stack trace for nested execution levels - */ -CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ -plpy.execute("DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$") -return 1 -$$ LANGUAGE plpythonu; -CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ -plpy.execute("SELECT notice_innerfunc()") -return 1 -$$ LANGUAGE plpythonu; -\set SHOW_CONTEXT always -SELECT notice_outerfunc(); -NOTICE: inside DO -CONTEXT: PL/Python anonymous code block -SQL statement "DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$" -PL/Python function "notice_innerfunc" -SQL statement "SELECT notice_innerfunc()" -PL/Python function "notice_outerfunc" - notice_outerfunc ------------------- - 1 -(1 row) - diff --git a/src/pl/plpython/expected/plpython_global.out b/src/pl/plpython/expected/plpython_global.out index 192e3e48a7..a4cfb1483f 100644 --- a/src/pl/plpython/expected/plpython_global.out +++ b/src/pl/plpython/expected/plpython_global.out @@ -8,7 +8,7 @@ CREATE FUNCTION global_test_one() returns text if "global_test" not in GD: GD["global_test"] = "set by global_test_one" return "SD: " + SD["global_test"] + ", GD: " + GD["global_test"]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION global_test_two() returns text AS 'if "global_test" not in SD: @@ -16,7 +16,7 @@ CREATE FUNCTION global_test_two() returns text if "global_test" not in GD: GD["global_test"] = "set by global_test_two" return "SD: " + SD["global_test"] + ", GD: " + GD["global_test"]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION static_test() returns int4 AS 'if "call" in SD: @@ -25,7 +25,7 @@ else: SD["call"] = 1 return SD["call"] ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT static_test(); static_test ------------- diff --git a/src/pl/plpython/expected/plpython_import.out b/src/pl/plpython/expected/plpython_import.out index b59e1821a7..854e989eaf 100644 --- a/src/pl/plpython/expected/plpython_import.out +++ b/src/pl/plpython/expected/plpython_import.out @@ -6,7 +6,7 @@ CREATE FUNCTION import_fail() returns text except ImportError: return "failed as expected" return "succeeded, that wasn''t supposed to happen"' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION import_succeed() returns text AS 'try: @@ -25,7 +25,7 @@ except Exception as ex: plpy.notice("import failed -- %s" % str(ex)) return "failed, that wasn''t supposed to happen" return "succeeded, as expected"' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION import_test_one(p text) RETURNS text AS 'try: @@ -35,7 +35,7 @@ except ImportError: import sha digest = sha.new(p) return digest.hexdigest()' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION import_test_two(u users) RETURNS text AS 'plain = u["fname"] + u["lname"] @@ -46,7 +46,7 @@ except ImportError: import sha digest = sha.new(plain); return "sha hash of " + plain + " is " + digest.hexdigest()' - LANGUAGE plpythonu; + LANGUAGE plpython3u; -- import python modules -- SELECT import_fail(); diff --git a/src/pl/plpython/expected/plpython_newline.out b/src/pl/plpython/expected/plpython_newline.out index 27dc2f8ab0..2bc149257e 100644 --- a/src/pl/plpython/expected/plpython_newline.out +++ b/src/pl/plpython/expected/plpython_newline.out @@ -3,13 +3,13 @@ -- CREATE OR REPLACE FUNCTION newline_lf() RETURNS integer AS E'x = 100\ny = 23\nreturn x + y\n' -LANGUAGE plpythonu; +LANGUAGE plpython3u; CREATE OR REPLACE FUNCTION newline_cr() RETURNS integer AS E'x = 100\ry = 23\rreturn x + y\r' -LANGUAGE plpythonu; +LANGUAGE plpython3u; CREATE OR REPLACE FUNCTION newline_crlf() RETURNS integer AS E'x = 100\r\ny = 23\r\nreturn x + y\r\n' -LANGUAGE plpythonu; +LANGUAGE plpython3u; SELECT newline_lf(); newline_lf ------------ diff --git a/src/pl/plpython/expected/plpython_params.out b/src/pl/plpython/expected/plpython_params.out index 46ea7dfb90..d1a36f3623 100644 --- a/src/pl/plpython/expected/plpython_params.out +++ b/src/pl/plpython/expected/plpython_params.out @@ -3,12 +3,12 @@ -- CREATE FUNCTION test_param_names0(integer, integer) RETURNS int AS $$ return args[0] + args[1] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_param_names1(a0 integer, a1 text) RETURNS boolean AS $$ assert a0 == args[0] assert a1 == args[1] return True -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_param_names2(u users) RETURNS text AS $$ assert u == args[0] if isinstance(u, dict): @@ -19,7 +19,7 @@ if isinstance(u, dict): else: s = str(u) return s -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- use deliberately wrong parameter names CREATE FUNCTION test_param_names3(a0 integer) RETURNS boolean AS $$ try: @@ -28,7 +28,7 @@ try: except NameError as e: assert e.args[0].find("a1") > -1 return True -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_param_names0(2,7); test_param_names0 ------------------- diff --git a/src/pl/plpython/expected/plpython_quote.out b/src/pl/plpython/expected/plpython_quote.out index eed72923ae..1fbe93d535 100644 --- a/src/pl/plpython/expected/plpython_quote.out +++ b/src/pl/plpython/expected/plpython_quote.out @@ -8,7 +8,7 @@ CREATE FUNCTION quote(t text, how text) RETURNS text AS $$ return plpy.quote_ident(t) else: raise plpy.Error("unrecognized quote type %s" % how) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT quote(t, 'literal') FROM (VALUES ('abc'), ('a''bc'), diff --git a/src/pl/plpython/expected/plpython_record.out b/src/pl/plpython/expected/plpython_record.out index 458330713a..31de198582 100644 --- a/src/pl/plpython/expected/plpython_record.out +++ b/src/pl/plpython/expected/plpython_record.out @@ -23,7 +23,7 @@ elif typ == 'obj': type_record.first = first type_record.second = second return type_record -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_type_record_as(typ text, first text, second integer, retnull boolean) RETURNS type_record AS $$ if retnull: return None @@ -40,17 +40,17 @@ elif typ == 'obj': return type_record elif typ == 'str': return "('%s',%r)" % (first, second) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_in_out_params(first in text, second out text) AS $$ return first + '_in_to_out'; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_in_out_params_multi(first in text, second out text, third out text) AS $$ return (first + '_record_in_to_out_1', first + '_record_in_to_out_2'); -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_inout_params(first inout text) AS $$ return first + '_inout'; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- Test tuple returning functions SELECT * FROM test_table_record_as('dict', null, null, false); first | second @@ -340,7 +340,7 @@ SELECT * FROM test_type_record_as('obj', 'one', 1, false); -- errors cases CREATE FUNCTION test_type_record_error1() RETURNS type_record AS $$ return { 'first': 'first' } -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error1(); ERROR: key "second" not found in mapping HINT: To return null in a column, add the value None to the mapping with the key named after the column. @@ -348,7 +348,7 @@ CONTEXT: while creating return value PL/Python function "test_type_record_error1" CREATE FUNCTION test_type_record_error2() RETURNS type_record AS $$ return [ 'first' ] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error2(); ERROR: length of returned sequence did not match number of columns in row CONTEXT: while creating return value @@ -357,7 +357,7 @@ CREATE FUNCTION test_type_record_error3() RETURNS type_record AS $$ class type_record: pass type_record.first = 'first' return type_record -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error3(); ERROR: attribute "second" does not exist in Python object HINT: To return null in a column, let the returned object have an attribute named after column with value None. @@ -365,7 +365,7 @@ CONTEXT: while creating return value PL/Python function "test_type_record_error3" CREATE FUNCTION test_type_record_error4() RETURNS type_record AS $$ return 'foo' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error4(); ERROR: malformed record literal: "foo" DETAIL: Missing left parenthesis. diff --git a/src/pl/plpython/expected/plpython_setof.out b/src/pl/plpython/expected/plpython_setof.out index 170dbc394d..3940940029 100644 --- a/src/pl/plpython/expected/plpython_setof.out +++ b/src/pl/plpython/expected/plpython_setof.out @@ -3,20 +3,20 @@ -- CREATE FUNCTION test_setof_error() RETURNS SETOF text AS $$ return 37 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_setof_error(); ERROR: returned object cannot be iterated DETAIL: PL/Python set-returning functions must return an iterable object. CONTEXT: PL/Python function "test_setof_error" CREATE FUNCTION test_setof_as_list(count integer, content text) RETURNS SETOF text AS $$ return [ content ]*count -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_setof_as_tuple(count integer, content text) RETURNS SETOF text AS $$ t = () for i in range(count): t += ( content, ) return t -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_setof_as_iterator(count integer, content text) RETURNS SETOF text AS $$ class producer: def __init__ (self, icount, icontent): @@ -24,13 +24,13 @@ class producer: self.icount = icount def __iter__ (self): return self - def next (self): + def __next__ (self): if self.icount == 0: raise StopIteration self.icount -= 1 return self.icontent return producer(count, content) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_setof_spi_in_iterator() RETURNS SETOF text AS $$ for s in ('Hello', 'Brave', 'New', 'World'): @@ -38,7 +38,7 @@ $$ yield s plpy.execute('select 2') $$ -LANGUAGE plpythonu; +LANGUAGE plpython3u; -- Test set returning functions SELECT test_setof_as_list(0, 'list'); test_setof_as_list @@ -130,7 +130,7 @@ global x while x <= lim: yield x x = x + 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT ugly(1, 5); ugly ------ @@ -155,7 +155,7 @@ CREATE OR REPLACE FUNCTION get_user_records() RETURNS SETOF users AS $$ return plpy.execute("SELECT * FROM users ORDER BY username") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT get_user_records(); get_user_records ---------------------- @@ -179,7 +179,7 @@ CREATE OR REPLACE FUNCTION get_user_records2() RETURNS TABLE(fname text, lname text, username text, userid int) AS $$ return plpy.execute("SELECT * FROM users ORDER BY username") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT get_user_records2(); get_user_records2 ---------------------- diff --git a/src/pl/plpython/expected/plpython_spi.out b/src/pl/plpython/expected/plpython_spi.out index a09df68c7d..8853e2540d 100644 --- a/src/pl/plpython/expected/plpython_spi.out +++ b/src/pl/plpython/expected/plpython_spi.out @@ -6,17 +6,17 @@ CREATE FUNCTION nested_call_one(a text) RETURNS text 'q = "SELECT nested_call_two(''%s'')" % a r = plpy.execute(q) return r[0]' - LANGUAGE plpythonu ; + LANGUAGE plpython3u ; CREATE FUNCTION nested_call_two(a text) RETURNS text AS 'q = "SELECT nested_call_three(''%s'')" % a r = plpy.execute(q) return r[0]' - LANGUAGE plpythonu ; + LANGUAGE plpython3u ; CREATE FUNCTION nested_call_three(a text) RETURNS text AS 'return a' - LANGUAGE plpythonu ; + LANGUAGE plpython3u ; -- some spi stuff CREATE FUNCTION spi_prepared_plan_test_one(a text) RETURNS text AS @@ -30,7 +30,7 @@ except Exception as ex: plpy.error(str(ex)) return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION spi_prepared_plan_test_two(a text) RETURNS text AS 'if "myplan" not in SD: @@ -43,7 +43,7 @@ except Exception as ex: plpy.error(str(ex)) return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION spi_prepared_plan_test_nested(a text) RETURNS text AS 'if "myplan" not in SD: @@ -57,7 +57,7 @@ except Exception as ex: plpy.error(str(ex)) return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION join_sequences(s sequences) RETURNS text AS 'if not s["multipart"]: @@ -69,7 +69,7 @@ for r in rv: seq = seq + r["sequence"] return seq ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION spi_recursive_sum(a int) RETURNS int AS 'r = 0 @@ -77,7 +77,7 @@ if a > 1: r = plpy.execute("SELECT spi_recursive_sum(%d) as a" % (a-1))[0]["a"] return a + r ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; -- -- spi and nested calls -- @@ -155,7 +155,7 @@ if result.status() > 0: return result.nrows() else: return None -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_metadata_test($$SELECT 1 AS foo, '11'::text AS bar UNION SELECT 2, '22'$$); INFO: True INFO: ['foo', 'bar'] @@ -177,7 +177,7 @@ CREATE FUNCTION result_nrows_test(cmd text) RETURNS int AS $$ result = plpy.execute(cmd) return result.nrows() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_nrows_test($$SELECT 1$$); result_nrows_test ------------------- @@ -206,7 +206,7 @@ CREATE FUNCTION result_len_test(cmd text) RETURNS int AS $$ result = plpy.execute(cmd) return len(result) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_len_test($$SELECT 1$$); result_len_test ----------------- @@ -246,7 +246,7 @@ result[-1] = {'c': 1000} result[:2] = [{'c': 10}, {'c': 100}] plpy.info([item['c'] for item in result[:]]) -# raises TypeError, but the message differs on Python 2.6, so silence it +# raises TypeError, catch so further tests could be added try: plpy.info(result['foo']) except TypeError: @@ -254,7 +254,7 @@ except TypeError: else: assert False, "TypeError not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_subscript_test(); INFO: 2 INFO: 4 @@ -272,7 +272,7 @@ result = plpy.execute("select 1 where false") plpy.info(result[:]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_empty_test(); INFO: [] result_empty_test @@ -285,7 +285,7 @@ AS $$ plan = plpy.prepare(cmd) result = plpy.execute(plan) return str(result) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_str_test($$SELECT 1 AS foo UNION SELECT 2$$); result_str_test ------------------------------------------------------------ @@ -306,12 +306,12 @@ for row in res: if row['lname'] == 'doe': does += 1 return does -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION double_cursor_close() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") res.close() res.close() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_fetch() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") assert len(res.fetch(3)) == 3 @@ -329,7 +329,7 @@ except StopIteration: pass else: assert False, "StopIteration not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_mix_next_and_fetch() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users order by fname") assert len(res.fetch(2)) == 2 @@ -342,7 +342,7 @@ except AttributeError: assert item['fname'] == 'rick' assert len(res.fetch(2)) == 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION fetch_after_close() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") res.close() @@ -352,7 +352,7 @@ except ValueError: pass else: assert False, "ValueError not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION next_after_close() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") res.close() @@ -365,7 +365,7 @@ except ValueError: pass else: assert False, "ValueError not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_fetch_next_empty() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users where false") assert len(res.fetch(1)) == 0 @@ -378,7 +378,7 @@ except StopIteration: pass else: assert False, "StopIteration not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_plan() RETURNS SETOF text AS $$ plan = plpy.prepare( "select fname, lname from users where fname like $1 || '%' order by fname", @@ -387,12 +387,12 @@ for row in plpy.cursor(plan, ["w"]): yield row['fname'] for row in plan.cursor(["j"]): yield row['fname'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_plan_wrong_args() RETURNS SETOF text AS $$ plan = plpy.prepare("select fname, lname from users where fname like $1 || '%'", ["text"]) c = plpy.cursor(plan, ["a", "b"]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TYPE test_composite_type AS ( a1 int, a2 varchar @@ -401,7 +401,7 @@ CREATE OR REPLACE FUNCTION plan_composite_args() RETURNS test_composite_type AS plan = plpy.prepare("select $1 as c1", ["test_composite_type"]) res = plpy.execute(plan, [{"a1": 3, "a2": "label"}]) return res[0]["c1"] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT simple_cursor_test(); simple_cursor_test -------------------- diff --git a/src/pl/plpython/expected/plpython_subtransaction.out b/src/pl/plpython/expected/plpython_subtransaction.out index 2a56541917..43d9277a33 100644 --- a/src/pl/plpython/expected/plpython_subtransaction.out +++ b/src/pl/plpython/expected/plpython_subtransaction.out @@ -14,7 +14,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES ('oops')") elif what_error == "Python": raise Exception("Python exception") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_ctx_test(); subtransaction_ctx_test ------------------------- @@ -71,7 +71,7 @@ with plpy.subtransaction(): raise plpy.notice("Swallowed %s(%r)" % (e.__class__.__name__, e.args[0])) return "ok" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_nested_test(); ERROR: spiexceptions.SyntaxError: syntax error at or near "error" LINE 1: error @@ -111,7 +111,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES (2)") plpy.execute("SELECT subtransaction_nested_test('t')") return "ok" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_deeply_nested_test(); NOTICE: Swallowed SyntaxError('syntax error at or near "error"') subtransaction_deeply_nested_test @@ -133,42 +133,42 @@ TRUNCATE subtransaction_tbl; CREATE FUNCTION subtransaction_exit_without_enter() RETURNS void AS $$ plpy.subtransaction().__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_enter_without_exit() RETURNS void AS $$ plpy.subtransaction().__enter__() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_exit_twice() RETURNS void AS $$ plpy.subtransaction().__enter__() plpy.subtransaction().__exit__(None, None, None) plpy.subtransaction().__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_enter_twice() RETURNS void AS $$ plpy.subtransaction().__enter__() plpy.subtransaction().__enter__() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_exit_same_subtransaction_twice() RETURNS void AS $$ s = plpy.subtransaction() s.__enter__() s.__exit__(None, None, None) s.__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_enter_same_subtransaction_twice() RETURNS void AS $$ s = plpy.subtransaction() s.__enter__() s.__enter__() s.__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- No warnings here, as the subtransaction gets indeed closed CREATE FUNCTION subtransaction_enter_subtransaction_in_with() RETURNS void AS $$ with plpy.subtransaction() as s: s.__enter__() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_exit_subtransaction_in_with() RETURNS void AS $$ try: @@ -176,7 +176,7 @@ try: s.__exit__(None, None, None) except ValueError as e: raise ValueError(e) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_exit_without_enter(); ERROR: ValueError: this subtransaction has not been entered CONTEXT: Traceback (most recent call last): @@ -255,7 +255,7 @@ try: plpy.execute(p, ["wrong"]) except plpy.SPIError: plpy.warning("Caught a SPI error") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_mix_explicit_and_implicit(); WARNING: Caught a SPI error from an explicit subtransaction WARNING: Caught a SPI error @@ -278,7 +278,7 @@ AS $$ s = plpy.subtransaction() s.enter() s.exit(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_alternative_names(); subtransaction_alternative_names ---------------------------------- @@ -294,7 +294,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES ('a')") except plpy.SPIError: plpy.notice("caught") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT try_catch_inside_subtransaction(); NOTICE: caught try_catch_inside_subtransaction @@ -318,7 +318,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES (1)") except plpy.SPIError: plpy.notice("caught") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT pk_violation_inside_subtransaction(); NOTICE: caught pk_violation_inside_subtransaction @@ -340,7 +340,7 @@ with plpy.subtransaction(): cur.fetch(10) fetched = cur.fetch(10); return int(fetched[5]["i"]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_aborted_subxact() RETURNS int AS $$ try: with plpy.subtransaction(): @@ -351,7 +351,7 @@ except plpy.SPIError: fetched = cur.fetch(10) return int(fetched[5]["i"]) return 0 # not reached -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_plan_aborted_subxact() RETURNS int AS $$ try: with plpy.subtransaction(): @@ -364,7 +364,7 @@ except plpy.SPIError: fetched = cur.fetch(5) return fetched[2]["i"] return 0 # not reached -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_close_aborted_subxact() RETURNS boolean AS $$ try: with plpy.subtransaction(): @@ -374,7 +374,7 @@ except plpy.SPIError: cur.close() return True return False # not reached -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT cursor_in_subxact(); cursor_in_subxact ------------------- diff --git a/src/pl/plpython/expected/plpython_test.out b/src/pl/plpython/expected/plpython_test.out index 39b994f446..13c14119c0 100644 --- a/src/pl/plpython/expected/plpython_test.out +++ b/src/pl/plpython/expected/plpython_test.out @@ -1,7 +1,7 @@ -- first some tests of basic functionality -CREATE EXTENSION plpython2u; +CREATE EXTENSION plpython3u; -- really stupid function just to get the module loaded -CREATE FUNCTION stupid() RETURNS text AS 'return "zarkon"' LANGUAGE plpythonu; +CREATE FUNCTION stupid() RETURNS text AS 'return "zarkon"' LANGUAGE plpython3u; select stupid(); stupid -------- @@ -9,7 +9,7 @@ select stupid(); (1 row) -- check 2/3 versioning -CREATE FUNCTION stupidn() RETURNS text AS 'return "zarkon"' LANGUAGE plpython2u; +CREATE FUNCTION stupidn() RETURNS text AS 'return "zarkon"' LANGUAGE plpython3u; select stupidn(); stupidn --------- @@ -26,7 +26,7 @@ for key in keys: out.append("%s: %s" % (key, u[key])) words = a1 + " " + a2 + " => {" + ", ".join(out) + "}" return words' - LANGUAGE plpythonu; + LANGUAGE plpython3u; select "Argument test #1"(users, fname, lname) from users where lname = 'doe' order by 1; Argument test #1 ----------------------------------------------------------------------- @@ -41,7 +41,7 @@ $$ contents = list(filter(lambda x: not x.startswith("__"), dir(plpy))) contents.sort() return contents -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select module_contents(); module_contents ----------------- @@ -78,7 +78,7 @@ plpy.info('info', 37, [1, 2, 3]) plpy.notice('notice') plpy.warning('warning') plpy.error('error') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT elog_test_basic(); INFO: info INFO: 37 diff --git a/src/pl/plpython/expected/plpython_transaction.out b/src/pl/plpython/expected/plpython_transaction.out index 72d1e45a76..659ccefc79 100644 --- a/src/pl/plpython/expected/plpython_transaction.out +++ b/src/pl/plpython/expected/plpython_transaction.out @@ -1,6 +1,6 @@ CREATE TABLE test1 (a int, b text); CREATE PROCEDURE transaction_test1() -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ for i in range(0, 10): plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) @@ -22,7 +22,7 @@ SELECT * FROM test1; TRUNCATE test1; DO -LANGUAGE plpythonu +LANGUAGE plpython3u $$ for i in range(0, 10): plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) @@ -44,7 +44,7 @@ SELECT * FROM test1; TRUNCATE test1; -- not allowed in a function CREATE FUNCTION transaction_test2() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ for i in range(0, 10): plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) @@ -67,7 +67,7 @@ SELECT * FROM test1; -- also not allowed if procedure is called from a function CREATE FUNCTION transaction_test3() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plpy.execute("CALL transaction_test1()") return 1 @@ -85,19 +85,19 @@ SELECT * FROM test1; -- DO block inside function CREATE FUNCTION transaction_test4() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ -plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") +plpy.execute("DO LANGUAGE plpython3u $x$ plpy.commit() $x$") return 1 $$; SELECT transaction_test4(); ERROR: spiexceptions.InvalidTransactionTermination: spiexceptions.InvalidTransactionTermination: invalid transaction termination CONTEXT: Traceback (most recent call last): PL/Python function "transaction_test4", line 2, in - plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") + plpy.execute("DO LANGUAGE plpython3u $x$ plpy.commit() $x$") PL/Python function "transaction_test4" -- commit inside subtransaction (prohibited) -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ s = plpy.subtransaction() s.enter() plpy.commit() @@ -112,7 +112,7 @@ PL/Python anonymous code block CREATE TABLE test2 (x int); INSERT INTO test2 VALUES (0), (1), (2), (3), (4); TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) plpy.commit() @@ -135,7 +135,7 @@ SELECT * FROM pg_cursors; -- error in cursor loop with commit TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (12/(%s-2))" % row['x']) plpy.commit() @@ -159,7 +159,7 @@ SELECT * FROM pg_cursors; -- rollback inside cursor loop TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) plpy.rollback() @@ -176,7 +176,7 @@ SELECT * FROM pg_cursors; -- first commit then rollback inside cursor loop TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) if row['x'] % 2 == 0: @@ -200,7 +200,7 @@ SELECT * FROM pg_cursors; -- check handling of an error during COMMIT CREATE TABLE testpk (id int PRIMARY KEY); CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ # this insert will fail during commit: plpy.execute("INSERT INTO testfk VALUES (0)") plpy.commit() @@ -222,7 +222,7 @@ SELECT * FROM testfk; ---- (0 rows) -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ # this insert will fail during commit: plpy.execute("INSERT INTO testfk VALUES (0)") try: diff --git a/src/pl/plpython/expected/plpython_trigger.out b/src/pl/plpython/expected/plpython_trigger.out index 742988a5b5..dd1ca32fa4 100644 --- a/src/pl/plpython/expected/plpython_trigger.out +++ b/src/pl/plpython/expected/plpython_trigger.out @@ -15,20 +15,20 @@ if TD["new"]["fname"] == "william": TD["new"]["fname"] = TD["args"][0] rv = "MODIFY" return rv' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION users_update() returns trigger AS 'if TD["event"] == "UPDATE": if TD["old"]["fname"] != TD["new"]["fname"] and TD["old"]["fname"] == TD["args"][0]: return "SKIP" return None' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION users_delete() RETURNS trigger AS 'if TD["old"]["fname"] == TD["args"][0]: return "SKIP" return None' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE TRIGGER users_insert_trig BEFORE INSERT ON users FOR EACH ROW EXECUTE PROCEDURE users_insert ('willem'); CREATE TRIGGER users_update_trig BEFORE UPDATE ON users FOR EACH ROW @@ -71,7 +71,7 @@ CREATE TABLE trigger_test_generated ( i int, j int GENERATED ALWAYS AS (i * 2) STORED ); -CREATE FUNCTION trigger_data() RETURNS trigger LANGUAGE plpythonu AS $$ +CREATE FUNCTION trigger_data() RETURNS trigger LANGUAGE plpython3u AS $$ if 'relid' in TD: TD['relid'] = "bogus:12345" @@ -328,7 +328,7 @@ INSERT INTO trigger_test VALUES (0, 'zero'); CREATE FUNCTION stupid1() RETURNS trigger AS $$ return 37 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger1 BEFORE INSERT ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid1(); @@ -341,7 +341,7 @@ DROP TRIGGER stupid_trigger1 ON trigger_test; CREATE FUNCTION stupid2() RETURNS trigger AS $$ return "MODIFY" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger2 BEFORE DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid2(); @@ -353,7 +353,7 @@ INSERT INTO trigger_test VALUES (0, 'zero'); CREATE FUNCTION stupid3() RETURNS trigger AS $$ return "foo" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger3 BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid3(); @@ -365,8 +365,8 @@ DROP TRIGGER stupid_trigger3 ON trigger_test; -- Unicode variant CREATE FUNCTION stupid3u() RETURNS trigger AS $$ - return u"foo" -$$ LANGUAGE plpythonu; + return "foo" +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger3 BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid3u(); @@ -380,7 +380,7 @@ CREATE FUNCTION stupid4() RETURNS trigger AS $$ del TD["new"] return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger4 BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid4(); @@ -394,7 +394,7 @@ CREATE FUNCTION stupid5() RETURNS trigger AS $$ TD["new"] = ['foo', 'bar'] return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger5 BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid5(); @@ -408,7 +408,7 @@ CREATE FUNCTION stupid6() RETURNS trigger AS $$ TD["new"] = {1: 'foo', 2: 'bar'} return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger6 BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid6(); @@ -422,7 +422,7 @@ CREATE FUNCTION stupid7() RETURNS trigger AS $$ TD["new"] = {'v': 'foo', 'a': 'bar'} return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger7 BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid7(); @@ -434,9 +434,9 @@ DROP TRIGGER stupid_trigger7 ON trigger_test; -- Unicode variant CREATE FUNCTION stupid7u() RETURNS trigger AS $$ - TD["new"] = {u'v': 'foo', u'a': 'bar'} + TD["new"] = {'v': 'foo', 'a': 'bar'} return "MODIFY" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger7 BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE stupid7u(); @@ -461,7 +461,7 @@ CREATE FUNCTION test_null() RETURNS trigger AS $$ TD["new"]['v'] = None return "MODIFY" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER test_null_trigger BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE test_null(); @@ -481,7 +481,7 @@ SET DateStyle = 'ISO'; CREATE FUNCTION set_modif_time() RETURNS trigger AS $$ TD['new']['modif_time'] = '2010-10-13 21:57:28.930486' return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TABLE pb (a TEXT, modif_time TIMESTAMP(0) WITHOUT TIME ZONE); CREATE TRIGGER set_modif_time BEFORE UPDATE ON pb FOR EACH ROW EXECUTE PROCEDURE set_modif_time(); @@ -507,7 +507,7 @@ CREATE FUNCTION composite_trigger_f() RETURNS trigger AS $$ TD['new']['f1'] = (3, False) TD['new']['f2'] = {'k': 7, 'l': 'yes', 'ignored': 10} return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER composite_trigger BEFORE INSERT ON composite_trigger_test FOR EACH ROW EXECUTE PROCEDURE composite_trigger_f(); INSERT INTO composite_trigger_test VALUES (NULL, NULL); @@ -521,7 +521,7 @@ SELECT * FROM composite_trigger_test; CREATE TABLE composite_trigger_noop_test (f1 comp1, f2 comp2); CREATE FUNCTION composite_trigger_noop_f() RETURNS trigger AS $$ return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER composite_trigger_noop BEFORE INSERT ON composite_trigger_noop_test FOR EACH ROW EXECUTE PROCEDURE composite_trigger_noop_f(); INSERT INTO composite_trigger_noop_test VALUES (NULL, NULL); @@ -540,7 +540,7 @@ CREATE TYPE comp3 AS (c1 comp1, c2 comp2, m integer); CREATE TABLE composite_trigger_nested_test(c comp3); CREATE FUNCTION composite_trigger_nested_f() RETURNS trigger AS $$ return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER composite_trigger_nested BEFORE INSERT ON composite_trigger_nested_test FOR EACH ROW EXECUTE PROCEDURE composite_trigger_nested_f(); INSERT INTO composite_trigger_nested_test VALUES (NULL); @@ -555,7 +555,7 @@ SELECT * FROM composite_trigger_nested_test; (3 rows) -- check that using a function as a trigger over two tables works correctly -CREATE FUNCTION trig1234() RETURNS trigger LANGUAGE plpythonu AS $$ +CREATE FUNCTION trig1234() RETURNS trigger LANGUAGE plpython3u AS $$ TD["new"]["data"] = '1234' return 'MODIFY' $$; @@ -581,7 +581,7 @@ SELECT * FROM b; -- check that SQL run in trigger code can see transition tables CREATE TABLE transition_table_test (id int, name text); INSERT INTO transition_table_test VALUES (1, 'a'); -CREATE FUNCTION transition_table_test_f() RETURNS trigger LANGUAGE plpythonu AS +CREATE FUNCTION transition_table_test_f() RETURNS trigger LANGUAGE plpython3u AS $$ rv = plpy.execute("SELECT * FROM old_table") assert(rv.nrows() == 1) @@ -601,7 +601,7 @@ DROP TABLE transition_table_test; DROP FUNCTION transition_table_test_f(); -- dealing with generated columns CREATE FUNCTION generated_test_func1() RETURNS trigger -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ TD['new']['j'] = 5 # not allowed return 'MODIFY' diff --git a/src/pl/plpython/expected/plpython_types.out b/src/pl/plpython/expected/plpython_types.out index 0a2659fe29..a470911c2e 100644 --- a/src/pl/plpython/expected/plpython_types.out +++ b/src/pl/plpython/expected/plpython_types.out @@ -7,23 +7,23 @@ CREATE FUNCTION test_type_conversion_bool(x bool) RETURNS bool AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bool(true); -INFO: (True, ) +INFO: (True, ) test_type_conversion_bool --------------------------- t (1 row) SELECT * FROM test_type_conversion_bool(false); -INFO: (False, ) +INFO: (False, ) test_type_conversion_bool --------------------------- f (1 row) SELECT * FROM test_type_conversion_bool(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_bool --------------------------- @@ -48,7 +48,7 @@ elif n == 5: ret = [0] plpy.info(ret, not not ret) return ret -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bool_other(0); INFO: (0, False) test_type_conversion_bool_other @@ -94,16 +94,16 @@ INFO: ([0], True) CREATE FUNCTION test_type_conversion_char(x char) RETURNS char AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_char('a'); -INFO: ('a', ) +INFO: ('a', ) test_type_conversion_char --------------------------- a (1 row) SELECT * FROM test_type_conversion_char(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_char --------------------------- @@ -112,23 +112,23 @@ INFO: (None, ) CREATE FUNCTION test_type_conversion_int2(x int2) RETURNS int2 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_int2(100::int2); -INFO: (100, ) +INFO: (100, ) test_type_conversion_int2 --------------------------- 100 (1 row) SELECT * FROM test_type_conversion_int2(-100::int2); -INFO: (-100, ) +INFO: (-100, ) test_type_conversion_int2 --------------------------- -100 (1 row) SELECT * FROM test_type_conversion_int2(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_int2 --------------------------- @@ -137,23 +137,23 @@ INFO: (None, ) CREATE FUNCTION test_type_conversion_int4(x int4) RETURNS int4 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_int4(100); -INFO: (100, ) +INFO: (100, ) test_type_conversion_int4 --------------------------- 100 (1 row) SELECT * FROM test_type_conversion_int4(-100); -INFO: (-100, ) +INFO: (-100, ) test_type_conversion_int4 --------------------------- -100 (1 row) SELECT * FROM test_type_conversion_int4(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_int4 --------------------------- @@ -162,30 +162,30 @@ INFO: (None, ) CREATE FUNCTION test_type_conversion_int8(x int8) RETURNS int8 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_int8(100); -INFO: (100L, ) +INFO: (100, ) test_type_conversion_int8 --------------------------- 100 (1 row) SELECT * FROM test_type_conversion_int8(-100); -INFO: (-100L, ) +INFO: (-100, ) test_type_conversion_int8 --------------------------- -100 (1 row) SELECT * FROM test_type_conversion_int8(5000000000); -INFO: (5000000000L, ) +INFO: (5000000000, ) test_type_conversion_int8 --------------------------- 5000000000 (1 row) SELECT * FROM test_type_conversion_int8(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_int8 --------------------------- @@ -196,7 +196,7 @@ CREATE FUNCTION test_type_conversion_numeric(x numeric) RETURNS numeric AS $$ # between decimal and cdecimal plpy.info(str(x), x.__class__.__name__) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_numeric(100); INFO: ('100', 'Decimal') test_type_conversion_numeric @@ -256,30 +256,30 @@ INFO: ('None', 'NoneType') CREATE FUNCTION test_type_conversion_float4(x float4) RETURNS float4 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_float4(100); -INFO: (100.0, ) +INFO: (100.0, ) test_type_conversion_float4 ----------------------------- 100 (1 row) SELECT * FROM test_type_conversion_float4(-100); -INFO: (-100.0, ) +INFO: (-100.0, ) test_type_conversion_float4 ----------------------------- -100 (1 row) SELECT * FROM test_type_conversion_float4(5000.5); -INFO: (5000.5, ) +INFO: (5000.5, ) test_type_conversion_float4 ----------------------------- 5000.5 (1 row) SELECT * FROM test_type_conversion_float4(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_float4 ----------------------------- @@ -288,37 +288,37 @@ INFO: (None, ) CREATE FUNCTION test_type_conversion_float8(x float8) RETURNS float8 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_float8(100); -INFO: (100.0, ) +INFO: (100.0, ) test_type_conversion_float8 ----------------------------- 100 (1 row) SELECT * FROM test_type_conversion_float8(-100); -INFO: (-100.0, ) +INFO: (-100.0, ) test_type_conversion_float8 ----------------------------- -100 (1 row) SELECT * FROM test_type_conversion_float8(5000000000.5); -INFO: (5000000000.5, ) +INFO: (5000000000.5, ) test_type_conversion_float8 ----------------------------- 5000000000.5 (1 row) SELECT * FROM test_type_conversion_float8(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_float8 ----------------------------- (1 row) SELECT * FROM test_type_conversion_float8(100100100.654321); -INFO: (100100100.654321, ) +INFO: (100100100.654321, ) test_type_conversion_float8 ----------------------------- 100100100.654321 @@ -327,23 +327,23 @@ INFO: (100100100.654321, ) CREATE FUNCTION test_type_conversion_oid(x oid) RETURNS oid AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_oid(100); -INFO: (100L, ) +INFO: (100, ) test_type_conversion_oid -------------------------- 100 (1 row) SELECT * FROM test_type_conversion_oid(2147483649); -INFO: (2147483649L, ) +INFO: (2147483649, ) test_type_conversion_oid -------------------------- 2147483649 (1 row) SELECT * FROM test_type_conversion_oid(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_oid -------------------------- @@ -352,16 +352,16 @@ INFO: (None, ) CREATE FUNCTION test_type_conversion_text(x text) RETURNS text AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_text('hello world'); -INFO: ('hello world', ) +INFO: ('hello world', ) test_type_conversion_text --------------------------- hello world (1 row) SELECT * FROM test_type_conversion_text(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_text --------------------------- @@ -370,23 +370,23 @@ INFO: (None, ) CREATE FUNCTION test_type_conversion_bytea(x bytea) RETURNS bytea AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bytea('hello world'); -INFO: ('hello world', ) +INFO: (b'hello world', ) test_type_conversion_bytea ---------------------------- \x68656c6c6f20776f726c64 (1 row) SELECT * FROM test_type_conversion_bytea(E'null\\000byte'); -INFO: ('null\x00byte', ) +INFO: (b'null\x00byte', ) test_type_conversion_bytea ---------------------------- \x6e756c6c0062797465 (1 row) SELECT * FROM test_type_conversion_bytea(null); -INFO: (None, ) +INFO: (None, ) test_type_conversion_bytea ---------------------------- @@ -395,14 +395,14 @@ INFO: (None, ) CREATE FUNCTION test_type_marshal() RETURNS bytea AS $$ import marshal return marshal.dumps('hello world') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_type_unmarshal(x bytea) RETURNS text AS $$ import marshal try: return marshal.loads(x) except ValueError as e: return 'FAILED: ' + str(e) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_type_unmarshal(x) FROM test_type_marshal() x; test_type_unmarshal --------------------- @@ -415,7 +415,7 @@ SELECT test_type_unmarshal(x) FROM test_type_marshal() x; CREATE DOMAIN booltrue AS bool CHECK (VALUE IS TRUE OR VALUE IS NULL); CREATE FUNCTION test_type_conversion_booltrue(x booltrue, y bool) RETURNS booltrue AS $$ return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_booltrue(true, true); test_type_conversion_booltrue ------------------------------- @@ -432,21 +432,21 @@ CREATE DOMAIN uint2 AS int2 CHECK (VALUE >= 0); CREATE FUNCTION test_type_conversion_uint2(x uint2, y int) RETURNS uint2 AS $$ plpy.info(x, type(x)) return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_uint2(100::uint2, 50); -INFO: (100, ) +INFO: (100, ) test_type_conversion_uint2 ---------------------------- 50 (1 row) SELECT * FROM test_type_conversion_uint2(100::uint2, -50); -INFO: (100, ) +INFO: (100, ) ERROR: value for domain uint2 violates check constraint "uint2_check" CONTEXT: while creating return value PL/Python function "test_type_conversion_uint2" SELECT * FROM test_type_conversion_uint2(null, 1); -INFO: (None, ) +INFO: (None, ) test_type_conversion_uint2 ---------------------------- 1 @@ -455,7 +455,7 @@ INFO: (None, ) CREATE DOMAIN nnint AS int CHECK (VALUE IS NOT NULL); CREATE FUNCTION test_type_conversion_nnint(x nnint, y int) RETURNS nnint AS $$ return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_nnint(10, 20); test_type_conversion_nnint ---------------------------- @@ -472,9 +472,9 @@ CREATE DOMAIN bytea10 AS bytea CHECK (octet_length(VALUE) = 10 AND VALUE IS NOT CREATE FUNCTION test_type_conversion_bytea10(x bytea10, y bytea) RETURNS bytea10 AS $$ plpy.info(x, type(x)) return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bytea10('hello wold', 'hello wold'); -INFO: ('hello wold', ) +INFO: (b'hello wold', ) test_type_conversion_bytea10 ------------------------------ \x68656c6c6f20776f6c64 @@ -483,14 +483,14 @@ INFO: ('hello wold', ) SELECT * FROM test_type_conversion_bytea10('hello world', 'hello wold'); ERROR: value for domain bytea10 violates check constraint "bytea10_check" SELECT * FROM test_type_conversion_bytea10('hello word', 'hello world'); -INFO: ('hello word', ) +INFO: (b'hello word', ) ERROR: value for domain bytea10 violates check constraint "bytea10_check" CONTEXT: while creating return value PL/Python function "test_type_conversion_bytea10" SELECT * FROM test_type_conversion_bytea10(null, 'hello word'); ERROR: value for domain bytea10 violates check constraint "bytea10_check" SELECT * FROM test_type_conversion_bytea10('hello word', null); -INFO: ('hello word', ) +INFO: (b'hello word', ) ERROR: value for domain bytea10 violates check constraint "bytea10_check" CONTEXT: while creating return value PL/Python function "test_type_conversion_bytea10" @@ -500,58 +500,58 @@ PL/Python function "test_type_conversion_bytea10" CREATE FUNCTION test_type_conversion_array_int4(x int4[]) RETURNS int4[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_int4(ARRAY[0, 100]); -INFO: ([0, 100], ) +INFO: ([0, 100], ) test_type_conversion_array_int4 --------------------------------- {0,100} (1 row) SELECT * FROM test_type_conversion_array_int4(ARRAY[0,-100,55]); -INFO: ([0, -100, 55], ) +INFO: ([0, -100, 55], ) test_type_conversion_array_int4 --------------------------------- {0,-100,55} (1 row) SELECT * FROM test_type_conversion_array_int4(ARRAY[NULL,1]); -INFO: ([None, 1], ) +INFO: ([None, 1], ) test_type_conversion_array_int4 --------------------------------- {NULL,1} (1 row) SELECT * FROM test_type_conversion_array_int4(ARRAY[]::integer[]); -INFO: ([], ) +INFO: ([], ) test_type_conversion_array_int4 --------------------------------- {} (1 row) SELECT * FROM test_type_conversion_array_int4(NULL); -INFO: (None, ) +INFO: (None, ) test_type_conversion_array_int4 --------------------------------- (1 row) SELECT * FROM test_type_conversion_array_int4(ARRAY[[1,2,3],[4,5,6]]); -INFO: ([[1, 2, 3], [4, 5, 6]], ) +INFO: ([[1, 2, 3], [4, 5, 6]], ) test_type_conversion_array_int4 --------------------------------- {{1,2,3},{4,5,6}} (1 row) SELECT * FROM test_type_conversion_array_int4(ARRAY[[[1,2,NULL],[NULL,5,6]],[[NULL,8,9],[10,11,12]]]); -INFO: ([[[1, 2, None], [None, 5, 6]], [[None, 8, 9], [10, 11, 12]]], ) +INFO: ([[[1, 2, None], [None, 5, 6]], [[None, 8, 9], [10, 11, 12]]], ) test_type_conversion_array_int4 --------------------------------------------------- {{{1,2,NULL},{NULL,5,6}},{{NULL,8,9},{10,11,12}}} (1 row) SELECT * FROM test_type_conversion_array_int4('[2:4]={1,2,3}'); -INFO: ([1, 2, 3], ) +INFO: ([1, 2, 3], ) test_type_conversion_array_int4 --------------------------------- {1,2,3} @@ -560,9 +560,9 @@ INFO: ([1, 2, 3], ) CREATE FUNCTION test_type_conversion_array_int8(x int8[]) RETURNS int8[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_int8(ARRAY[[[1,2,NULL],[NULL,5,6]],[[NULL,8,9],[10,11,12]]]::int8[]); -INFO: ([[[1L, 2L, None], [None, 5L, 6L]], [[None, 8L, 9L], [10L, 11L, 12L]]], ) +INFO: ([[[1, 2, None], [None, 5, 6]], [[None, 8, 9], [10, 11, 12]]], ) test_type_conversion_array_int8 --------------------------------------------------- {{{1,2,NULL},{NULL,5,6}},{{NULL,8,9},{10,11,12}}} @@ -571,10 +571,10 @@ INFO: ([[[1L, 2L, None], [None, 5L, 6L]], [[None, 8L, 9L], [10L, 11L, 12L]]], < CREATE FUNCTION test_type_conversion_array_date(x date[]) RETURNS date[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_date(ARRAY[[['2016-09-21','2016-09-22',NULL],[NULL,'2016-10-21','2016-10-22']], [[NULL,'2016-11-21','2016-10-21'],['2015-09-21','2015-09-22','2014-09-21']]]::date[]); -INFO: ([[['09-21-2016', '09-22-2016', None], [None, '10-21-2016', '10-22-2016']], [[None, '11-21-2016', '10-21-2016'], ['09-21-2015', '09-22-2015', '09-21-2014']]], ) +INFO: ([[['09-21-2016', '09-22-2016', None], [None, '10-21-2016', '10-22-2016']], [[None, '11-21-2016', '10-21-2016'], ['09-21-2015', '09-22-2015', '09-21-2014']]], ) test_type_conversion_array_date --------------------------------------------------------------------------------------------------------------------------------- {{{09-21-2016,09-22-2016,NULL},{NULL,10-21-2016,10-22-2016}},{{NULL,11-21-2016,10-21-2016},{09-21-2015,09-22-2015,09-21-2014}}} @@ -583,12 +583,12 @@ INFO: ([[['09-21-2016', '09-22-2016', None], [None, '10-21-2016', '10-22-2016'] CREATE FUNCTION test_type_conversion_array_timestamp(x timestamp[]) RETURNS timestamp[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_timestamp(ARRAY[[['2016-09-21 15:34:24.078792-04','2016-10-22 11:34:24.078795-04',NULL], [NULL,'2016-10-21 11:34:25.078792-04','2016-10-21 11:34:24.098792-04']], [[NULL,'2016-01-21 11:34:24.078792-04','2016-11-21 11:34:24.108792-04'], ['2015-09-21 11:34:24.079792-04','2014-09-21 11:34:24.078792-04','2013-09-21 11:34:24.078792-04']]]::timestamp[]); -INFO: ([[['Wed Sep 21 15:34:24.078792 2016', 'Sat Oct 22 11:34:24.078795 2016', None], [None, 'Fri Oct 21 11:34:25.078792 2016', 'Fri Oct 21 11:34:24.098792 2016']], [[None, 'Thu Jan 21 11:34:24.078792 2016', 'Mon Nov 21 11:34:24.108792 2016'], ['Mon Sep 21 11:34:24.079792 2015', 'Sun Sep 21 11:34:24.078792 2014', 'Sat Sep 21 11:34:24.078792 2013']]], ) +INFO: ([[['Wed Sep 21 15:34:24.078792 2016', 'Sat Oct 22 11:34:24.078795 2016', None], [None, 'Fri Oct 21 11:34:25.078792 2016', 'Fri Oct 21 11:34:24.098792 2016']], [[None, 'Thu Jan 21 11:34:24.078792 2016', 'Mon Nov 21 11:34:24.108792 2016'], ['Mon Sep 21 11:34:24.079792 2015', 'Sun Sep 21 11:34:24.078792 2014', 'Sat Sep 21 11:34:24.078792 2013']]], ) test_type_conversion_array_timestamp ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ {{{"Wed Sep 21 15:34:24.078792 2016","Sat Oct 22 11:34:24.078795 2016",NULL},{NULL,"Fri Oct 21 11:34:25.078792 2016","Fri Oct 21 11:34:24.098792 2016"}},{{NULL,"Thu Jan 21 11:34:24.078792 2016","Mon Nov 21 11:34:24.108792 2016"},{"Mon Sep 21 11:34:24.079792 2015","Sun Sep 21 11:34:24.078792 2014","Sat Sep 21 11:34:24.078792 2013"}}} @@ -598,9 +598,9 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemint4(h int4, i int4, j int4, k int4 ) m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemint4(8,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]], [[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]]], ) +INFO: ([[[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]], [[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]]], ) pyreturnmultidemint4 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {{{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}}},{{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}}}} @@ -610,9 +610,9 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemint8(h int4, i int4, j int4, k int4 ) m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemint8(5,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]], [[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]], ) +INFO: ([[[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]], [[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]], ) pyreturnmultidemint8 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {{{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}}},{{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}}}} @@ -622,9 +622,9 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemfloat4(h int4, i int4, j int4, k int4 m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemfloat4(6,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]], [[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]]], ) +INFO: ([[[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]], [[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]]], ) pyreturnmultidemfloat4 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {{{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}}},{{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}}}} @@ -634,9 +634,9 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemfloat8(h int4, i int4, j int4, k int4 m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemfloat8(7,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]]], [[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]]]], ) +INFO: ([[[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]]], [[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]]]], ) pyreturnmultidemfloat8 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {{{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}}},{{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}}}} @@ -645,16 +645,16 @@ INFO: ([[[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], CREATE FUNCTION test_type_conversion_array_text(x text[]) RETURNS text[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_text(ARRAY['foo', 'bar']); -INFO: (['foo', 'bar'], ) +INFO: (['foo', 'bar'], ) test_type_conversion_array_text --------------------------------- {foo,bar} (1 row) SELECT * FROM test_type_conversion_array_text(ARRAY[['foo', 'bar'],['foo2', 'bar2']]); -INFO: ([['foo', 'bar'], ['foo2', 'bar2']], ) +INFO: ([['foo', 'bar'], ['foo2', 'bar2']], ) test_type_conversion_array_text --------------------------------- {{foo,bar},{foo2,bar2}} @@ -663,9 +663,9 @@ INFO: ([['foo', 'bar'], ['foo2', 'bar2']], ) CREATE FUNCTION test_type_conversion_array_bytea(x bytea[]) RETURNS bytea[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_bytea(ARRAY[E'\\xdeadbeef'::bytea, NULL]); -INFO: (['\xde\xad\xbe\xef', None], ) +INFO: ([b'\xde\xad\xbe\xef', None], ) test_type_conversion_array_bytea ---------------------------------- {"\\xdeadbeef",NULL} @@ -673,7 +673,7 @@ INFO: (['\xde\xad\xbe\xef', None], ) CREATE FUNCTION test_type_conversion_array_mixed1() RETURNS text[] AS $$ return [123, 'abc'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_mixed1(); test_type_conversion_array_mixed1 ----------------------------------- @@ -682,14 +682,14 @@ SELECT * FROM test_type_conversion_array_mixed1(); CREATE FUNCTION test_type_conversion_array_mixed2() RETURNS int[] AS $$ return [123, 'abc'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_mixed2(); ERROR: invalid input syntax for type integer: "abc" CONTEXT: while creating return value PL/Python function "test_type_conversion_array_mixed2" CREATE FUNCTION test_type_conversion_mdarray_malformed() RETURNS int[] AS $$ return [[1,2,3],[4,5]] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_mdarray_malformed(); ERROR: wrong length of inner sequence: has length 2, but 3 was expected DETAIL: To construct a multidimensional array, the inner sequences must all have the same length. @@ -697,14 +697,14 @@ CONTEXT: while creating return value PL/Python function "test_type_conversion_mdarray_malformed" CREATE FUNCTION test_type_conversion_mdarray_toodeep() RETURNS int[] AS $$ return [[[[[[[1]]]]]]] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_mdarray_toodeep(); ERROR: number of array dimensions exceeds the maximum allowed (6) CONTEXT: while creating return value PL/Python function "test_type_conversion_mdarray_toodeep" CREATE FUNCTION test_type_conversion_array_record() RETURNS type_record[] AS $$ return [{'first': 'one', 'second': 42}, {'first': 'two', 'second': 11}] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_record(); test_type_conversion_array_record ----------------------------------- @@ -713,7 +713,7 @@ SELECT * FROM test_type_conversion_array_record(); CREATE FUNCTION test_type_conversion_array_string() RETURNS text[] AS $$ return 'abc' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_string(); test_type_conversion_array_string ----------------------------------- @@ -722,7 +722,7 @@ SELECT * FROM test_type_conversion_array_string(); CREATE FUNCTION test_type_conversion_array_tuple() RETURNS text[] AS $$ return ('abc', 'def') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_tuple(); test_type_conversion_array_tuple ---------------------------------- @@ -731,7 +731,7 @@ SELECT * FROM test_type_conversion_array_tuple(); CREATE FUNCTION test_type_conversion_array_error() RETURNS int[] AS $$ return 5 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_error(); ERROR: return value of function with array return type is not a Python sequence CONTEXT: while creating return value @@ -743,16 +743,16 @@ CREATE DOMAIN ordered_pair_domain AS integer[] CHECK (array_length(VALUE,1)=2 AN CREATE FUNCTION test_type_conversion_array_domain(x ordered_pair_domain) RETURNS ordered_pair_domain AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_domain(ARRAY[0, 100]::ordered_pair_domain); -INFO: ([0, 100], ) +INFO: ([0, 100], ) test_type_conversion_array_domain ----------------------------------- {0,100} (1 row) SELECT * FROM test_type_conversion_array_domain(NULL::ordered_pair_domain); -INFO: (None, ) +INFO: (None, ) test_type_conversion_array_domain ----------------------------------- @@ -760,7 +760,7 @@ INFO: (None, ) CREATE FUNCTION test_type_conversion_array_domain_check_violation() RETURNS ordered_pair_domain AS $$ return [2,1] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_domain_check_violation(); ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" CONTEXT: while creating return value @@ -771,9 +771,9 @@ PL/Python function "test_type_conversion_array_domain_check_violation" CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ plpy.info(x, type(x)) return x[0] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_read_uint2_array(array[1::uint2]); -INFO: ([1], ) +INFO: ([1], ) test_read_uint2_array ----------------------- 1 @@ -781,7 +781,7 @@ INFO: ([1], ) CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ return [x, x] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_build_uint2_array(1::int2); test_build_uint2_array ------------------------ @@ -800,7 +800,7 @@ PL/Python function "test_build_uint2_array" CREATE FUNCTION test_type_conversion_domain_array(x integer[]) RETURNS ordered_pair_domain[] AS $$ return [x, x] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_type_conversion_domain_array(array[2,4]); ERROR: return value of function with array return type is not a Python sequence CONTEXT: while creating return value @@ -813,9 +813,9 @@ CREATE FUNCTION test_type_conversion_domain_array2(x ordered_pair_domain) RETURNS integer AS $$ plpy.info(x, type(x)) return x[1] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_type_conversion_domain_array2(array[2,4]); -INFO: ([2, 4], ) +INFO: ([2, 4], ) test_type_conversion_domain_array2 ------------------------------------ 4 @@ -827,9 +827,9 @@ CREATE FUNCTION test_type_conversion_array_domain_array(x ordered_pair_domain[]) RETURNS ordered_pair_domain AS $$ plpy.info(x, type(x)) return x[0] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_type_conversion_array_domain_array(array[array[2,4]::ordered_pair_domain]); -INFO: ([[2, 4]], ) +INFO: ([[2, 4]], ) test_type_conversion_array_domain_array ----------------------------------------- {2,4} @@ -846,7 +846,7 @@ CREATE TABLE employee ( INSERT INTO employee VALUES ('John', 100, 10), ('Mary', 200, 10); CREATE OR REPLACE FUNCTION test_composite_table_input(e employee) RETURNS integer AS $$ return e['basesalary'] + e['bonus'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT name, test_composite_table_input(employee.*) FROM employee; name | test_composite_table_input ------+---------------------------- @@ -876,7 +876,7 @@ CREATE TYPE named_pair AS ( ); CREATE OR REPLACE FUNCTION test_composite_type_input(p named_pair) RETURNS integer AS $$ return sum(p.values()) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_composite_type_input(row(1, 2)); test_composite_type_input --------------------------- @@ -896,7 +896,7 @@ SELECT test_composite_type_input(row(1, 2)); CREATE TYPE nnint_container AS (f1 int, f2 nnint); CREATE FUNCTION nnint_test(x int, y int) RETURNS nnint_container AS $$ return {'f1': x, 'f2': y} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT nnint_test(null, 3); nnint_test ------------ @@ -913,7 +913,7 @@ PL/Python function "nnint_test" CREATE DOMAIN ordered_named_pair AS named_pair_2 CHECK((VALUE).i <= (VALUE).j); CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ return p['i'] + p['j'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT read_ordered_named_pair(row(1, 2)); read_ordered_named_pair ------------------------- @@ -924,7 +924,7 @@ SELECT read_ordered_named_pair(row(2, 1)); -- fail ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ return {'i': i, 'j': j} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT build_ordered_named_pair(1,2); build_ordered_named_pair -------------------------- @@ -937,7 +937,7 @@ CONTEXT: while creating return value PL/Python function "build_ordered_named_pair" CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ return [{'i': i, 'j': j}, {'i': i, 'j': j+1}] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT build_ordered_named_pairs(1,2); build_ordered_named_pairs --------------------------- @@ -952,7 +952,7 @@ PL/Python function "build_ordered_named_pairs" -- Prepared statements -- CREATE OR REPLACE FUNCTION test_prep_bool_input() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT CASE WHEN $1 THEN 1 ELSE 0 END AS val", ['boolean']) rv = plpy.execute(plan, ['fa'], 5) # 'fa' is true in Python @@ -965,7 +965,7 @@ SELECT test_prep_bool_input(); -- 1 (1 row) CREATE OR REPLACE FUNCTION test_prep_bool_output() RETURNS bool -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT $1 = 1 AS val", ['int']) rv = plpy.execute(plan, [0], 5) @@ -980,7 +980,7 @@ INFO: {'val': False} (1 row) CREATE OR REPLACE FUNCTION test_prep_bytea_input(bb bytea) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT octet_length($1) AS val", ['bytea']) rv = plpy.execute(plan, [bb], 5) @@ -993,7 +993,7 @@ SELECT test_prep_bytea_input(E'a\\000b'); -- 3 (embedded null formerly truncated (1 row) CREATE OR REPLACE FUNCTION test_prep_bytea_output() RETURNS bytea -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT decode('aa00bb', 'hex') AS val") rv = plpy.execute(plan, [], 5) @@ -1001,7 +1001,7 @@ plpy.info(rv[0]) return rv[0]['val'] $$; SELECT test_prep_bytea_output(); -INFO: {'val': '\xaa\x00\xbb'} +INFO: {'val': b'\xaa\x00\xbb'} test_prep_bytea_output ------------------------ \xaa00bb diff --git a/src/pl/plpython/expected/plpython_types_3.out b/src/pl/plpython/expected/plpython_types_3.out deleted file mode 100644 index a6ec10d5e1..0000000000 --- a/src/pl/plpython/expected/plpython_types_3.out +++ /dev/null @@ -1,1009 +0,0 @@ --- --- Test data type behavior --- --- --- Base/common types --- -CREATE FUNCTION test_type_conversion_bool(x bool) RETURNS bool AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_bool(true); -INFO: (True, ) - test_type_conversion_bool ---------------------------- - t -(1 row) - -SELECT * FROM test_type_conversion_bool(false); -INFO: (False, ) - test_type_conversion_bool ---------------------------- - f -(1 row) - -SELECT * FROM test_type_conversion_bool(null); -INFO: (None, ) - test_type_conversion_bool ---------------------------- - -(1 row) - --- test various other ways to express Booleans in Python -CREATE FUNCTION test_type_conversion_bool_other(n int) RETURNS bool AS $$ -# numbers -if n == 0: - ret = 0 -elif n == 1: - ret = 5 -# strings -elif n == 2: - ret = '' -elif n == 3: - ret = 'fa' # true in Python, false in PostgreSQL -# containers -elif n == 4: - ret = [] -elif n == 5: - ret = [0] -plpy.info(ret, not not ret) -return ret -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_bool_other(0); -INFO: (0, False) - test_type_conversion_bool_other ---------------------------------- - f -(1 row) - -SELECT * FROM test_type_conversion_bool_other(1); -INFO: (5, True) - test_type_conversion_bool_other ---------------------------------- - t -(1 row) - -SELECT * FROM test_type_conversion_bool_other(2); -INFO: ('', False) - test_type_conversion_bool_other ---------------------------------- - f -(1 row) - -SELECT * FROM test_type_conversion_bool_other(3); -INFO: ('fa', True) - test_type_conversion_bool_other ---------------------------------- - t -(1 row) - -SELECT * FROM test_type_conversion_bool_other(4); -INFO: ([], False) - test_type_conversion_bool_other ---------------------------------- - f -(1 row) - -SELECT * FROM test_type_conversion_bool_other(5); -INFO: ([0], True) - test_type_conversion_bool_other ---------------------------------- - t -(1 row) - -CREATE FUNCTION test_type_conversion_char(x char) RETURNS char AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_char('a'); -INFO: ('a', ) - test_type_conversion_char ---------------------------- - a -(1 row) - -SELECT * FROM test_type_conversion_char(null); -INFO: (None, ) - test_type_conversion_char ---------------------------- - -(1 row) - -CREATE FUNCTION test_type_conversion_int2(x int2) RETURNS int2 AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_int2(100::int2); -INFO: (100, ) - test_type_conversion_int2 ---------------------------- - 100 -(1 row) - -SELECT * FROM test_type_conversion_int2(-100::int2); -INFO: (-100, ) - test_type_conversion_int2 ---------------------------- - -100 -(1 row) - -SELECT * FROM test_type_conversion_int2(null); -INFO: (None, ) - test_type_conversion_int2 ---------------------------- - -(1 row) - -CREATE FUNCTION test_type_conversion_int4(x int4) RETURNS int4 AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_int4(100); -INFO: (100, ) - test_type_conversion_int4 ---------------------------- - 100 -(1 row) - -SELECT * FROM test_type_conversion_int4(-100); -INFO: (-100, ) - test_type_conversion_int4 ---------------------------- - -100 -(1 row) - -SELECT * FROM test_type_conversion_int4(null); -INFO: (None, ) - test_type_conversion_int4 ---------------------------- - -(1 row) - -CREATE FUNCTION test_type_conversion_int8(x int8) RETURNS int8 AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_int8(100); -INFO: (100, ) - test_type_conversion_int8 ---------------------------- - 100 -(1 row) - -SELECT * FROM test_type_conversion_int8(-100); -INFO: (-100, ) - test_type_conversion_int8 ---------------------------- - -100 -(1 row) - -SELECT * FROM test_type_conversion_int8(5000000000); -INFO: (5000000000, ) - test_type_conversion_int8 ---------------------------- - 5000000000 -(1 row) - -SELECT * FROM test_type_conversion_int8(null); -INFO: (None, ) - test_type_conversion_int8 ---------------------------- - -(1 row) - -CREATE FUNCTION test_type_conversion_numeric(x numeric) RETURNS numeric AS $$ -# print just the class name, not the type, to avoid differences -# between decimal and cdecimal -plpy.info(str(x), x.__class__.__name__) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_numeric(100); -INFO: ('100', 'Decimal') - test_type_conversion_numeric ------------------------------- - 100 -(1 row) - -SELECT * FROM test_type_conversion_numeric(-100); -INFO: ('-100', 'Decimal') - test_type_conversion_numeric ------------------------------- - -100 -(1 row) - -SELECT * FROM test_type_conversion_numeric(100.0); -INFO: ('100.0', 'Decimal') - test_type_conversion_numeric ------------------------------- - 100.0 -(1 row) - -SELECT * FROM test_type_conversion_numeric(100.00); -INFO: ('100.00', 'Decimal') - test_type_conversion_numeric ------------------------------- - 100.00 -(1 row) - -SELECT * FROM test_type_conversion_numeric(5000000000.5); -INFO: ('5000000000.5', 'Decimal') - test_type_conversion_numeric ------------------------------- - 5000000000.5 -(1 row) - -SELECT * FROM test_type_conversion_numeric(1234567890.0987654321); -INFO: ('1234567890.0987654321', 'Decimal') - test_type_conversion_numeric ------------------------------- - 1234567890.0987654321 -(1 row) - -SELECT * FROM test_type_conversion_numeric(-1234567890.0987654321); -INFO: ('-1234567890.0987654321', 'Decimal') - test_type_conversion_numeric ------------------------------- - -1234567890.0987654321 -(1 row) - -SELECT * FROM test_type_conversion_numeric(null); -INFO: ('None', 'NoneType') - test_type_conversion_numeric ------------------------------- - -(1 row) - -CREATE FUNCTION test_type_conversion_float4(x float4) RETURNS float4 AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_float4(100); -INFO: (100.0, ) - test_type_conversion_float4 ------------------------------ - 100 -(1 row) - -SELECT * FROM test_type_conversion_float4(-100); -INFO: (-100.0, ) - test_type_conversion_float4 ------------------------------ - -100 -(1 row) - -SELECT * FROM test_type_conversion_float4(5000.5); -INFO: (5000.5, ) - test_type_conversion_float4 ------------------------------ - 5000.5 -(1 row) - -SELECT * FROM test_type_conversion_float4(null); -INFO: (None, ) - test_type_conversion_float4 ------------------------------ - -(1 row) - -CREATE FUNCTION test_type_conversion_float8(x float8) RETURNS float8 AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_float8(100); -INFO: (100.0, ) - test_type_conversion_float8 ------------------------------ - 100 -(1 row) - -SELECT * FROM test_type_conversion_float8(-100); -INFO: (-100.0, ) - test_type_conversion_float8 ------------------------------ - -100 -(1 row) - -SELECT * FROM test_type_conversion_float8(5000000000.5); -INFO: (5000000000.5, ) - test_type_conversion_float8 ------------------------------ - 5000000000.5 -(1 row) - -SELECT * FROM test_type_conversion_float8(null); -INFO: (None, ) - test_type_conversion_float8 ------------------------------ - -(1 row) - -SELECT * FROM test_type_conversion_float8(100100100.654321); -INFO: (100100100.654321, ) - test_type_conversion_float8 ------------------------------ - 100100100.654321 -(1 row) - -CREATE FUNCTION test_type_conversion_oid(x oid) RETURNS oid AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_oid(100); -INFO: (100, ) - test_type_conversion_oid --------------------------- - 100 -(1 row) - -SELECT * FROM test_type_conversion_oid(2147483649); -INFO: (2147483649, ) - test_type_conversion_oid --------------------------- - 2147483649 -(1 row) - -SELECT * FROM test_type_conversion_oid(null); -INFO: (None, ) - test_type_conversion_oid --------------------------- - -(1 row) - -CREATE FUNCTION test_type_conversion_text(x text) RETURNS text AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_text('hello world'); -INFO: ('hello world', ) - test_type_conversion_text ---------------------------- - hello world -(1 row) - -SELECT * FROM test_type_conversion_text(null); -INFO: (None, ) - test_type_conversion_text ---------------------------- - -(1 row) - -CREATE FUNCTION test_type_conversion_bytea(x bytea) RETURNS bytea AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_bytea('hello world'); -INFO: (b'hello world', ) - test_type_conversion_bytea ----------------------------- - \x68656c6c6f20776f726c64 -(1 row) - -SELECT * FROM test_type_conversion_bytea(E'null\\000byte'); -INFO: (b'null\x00byte', ) - test_type_conversion_bytea ----------------------------- - \x6e756c6c0062797465 -(1 row) - -SELECT * FROM test_type_conversion_bytea(null); -INFO: (None, ) - test_type_conversion_bytea ----------------------------- - -(1 row) - -CREATE FUNCTION test_type_marshal() RETURNS bytea AS $$ -import marshal -return marshal.dumps('hello world') -$$ LANGUAGE plpython3u; -CREATE FUNCTION test_type_unmarshal(x bytea) RETURNS text AS $$ -import marshal -try: - return marshal.loads(x) -except ValueError as e: - return 'FAILED: ' + str(e) -$$ LANGUAGE plpython3u; -SELECT test_type_unmarshal(x) FROM test_type_marshal() x; - test_type_unmarshal ---------------------- - hello world -(1 row) - --- --- Domains --- -CREATE DOMAIN booltrue AS bool CHECK (VALUE IS TRUE OR VALUE IS NULL); -CREATE FUNCTION test_type_conversion_booltrue(x booltrue, y bool) RETURNS booltrue AS $$ -return y -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_booltrue(true, true); - test_type_conversion_booltrue -------------------------------- - t -(1 row) - -SELECT * FROM test_type_conversion_booltrue(false, true); -ERROR: value for domain booltrue violates check constraint "booltrue_check" -SELECT * FROM test_type_conversion_booltrue(true, false); -ERROR: value for domain booltrue violates check constraint "booltrue_check" -CONTEXT: while creating return value -PL/Python function "test_type_conversion_booltrue" -CREATE DOMAIN uint2 AS int2 CHECK (VALUE >= 0); -CREATE FUNCTION test_type_conversion_uint2(x uint2, y int) RETURNS uint2 AS $$ -plpy.info(x, type(x)) -return y -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_uint2(100::uint2, 50); -INFO: (100, ) - test_type_conversion_uint2 ----------------------------- - 50 -(1 row) - -SELECT * FROM test_type_conversion_uint2(100::uint2, -50); -INFO: (100, ) -ERROR: value for domain uint2 violates check constraint "uint2_check" -CONTEXT: while creating return value -PL/Python function "test_type_conversion_uint2" -SELECT * FROM test_type_conversion_uint2(null, 1); -INFO: (None, ) - test_type_conversion_uint2 ----------------------------- - 1 -(1 row) - -CREATE DOMAIN nnint AS int CHECK (VALUE IS NOT NULL); -CREATE FUNCTION test_type_conversion_nnint(x nnint, y int) RETURNS nnint AS $$ -return y -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_nnint(10, 20); - test_type_conversion_nnint ----------------------------- - 20 -(1 row) - -SELECT * FROM test_type_conversion_nnint(null, 20); -ERROR: value for domain nnint violates check constraint "nnint_check" -SELECT * FROM test_type_conversion_nnint(10, null); -ERROR: value for domain nnint violates check constraint "nnint_check" -CONTEXT: while creating return value -PL/Python function "test_type_conversion_nnint" -CREATE DOMAIN bytea10 AS bytea CHECK (octet_length(VALUE) = 10 AND VALUE IS NOT NULL); -CREATE FUNCTION test_type_conversion_bytea10(x bytea10, y bytea) RETURNS bytea10 AS $$ -plpy.info(x, type(x)) -return y -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_bytea10('hello wold', 'hello wold'); -INFO: (b'hello wold', ) - test_type_conversion_bytea10 ------------------------------- - \x68656c6c6f20776f6c64 -(1 row) - -SELECT * FROM test_type_conversion_bytea10('hello world', 'hello wold'); -ERROR: value for domain bytea10 violates check constraint "bytea10_check" -SELECT * FROM test_type_conversion_bytea10('hello word', 'hello world'); -INFO: (b'hello word', ) -ERROR: value for domain bytea10 violates check constraint "bytea10_check" -CONTEXT: while creating return value -PL/Python function "test_type_conversion_bytea10" -SELECT * FROM test_type_conversion_bytea10(null, 'hello word'); -ERROR: value for domain bytea10 violates check constraint "bytea10_check" -SELECT * FROM test_type_conversion_bytea10('hello word', null); -INFO: (b'hello word', ) -ERROR: value for domain bytea10 violates check constraint "bytea10_check" -CONTEXT: while creating return value -PL/Python function "test_type_conversion_bytea10" --- --- Arrays --- -CREATE FUNCTION test_type_conversion_array_int4(x int4[]) RETURNS int4[] AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_int4(ARRAY[0, 100]); -INFO: ([0, 100], ) - test_type_conversion_array_int4 ---------------------------------- - {0,100} -(1 row) - -SELECT * FROM test_type_conversion_array_int4(ARRAY[0,-100,55]); -INFO: ([0, -100, 55], ) - test_type_conversion_array_int4 ---------------------------------- - {0,-100,55} -(1 row) - -SELECT * FROM test_type_conversion_array_int4(ARRAY[NULL,1]); -INFO: ([None, 1], ) - test_type_conversion_array_int4 ---------------------------------- - {NULL,1} -(1 row) - -SELECT * FROM test_type_conversion_array_int4(ARRAY[]::integer[]); -INFO: ([], ) - test_type_conversion_array_int4 ---------------------------------- - {} -(1 row) - -SELECT * FROM test_type_conversion_array_int4(NULL); -INFO: (None, ) - test_type_conversion_array_int4 ---------------------------------- - -(1 row) - -SELECT * FROM test_type_conversion_array_int4(ARRAY[[1,2,3],[4,5,6]]); -INFO: ([[1, 2, 3], [4, 5, 6]], ) - test_type_conversion_array_int4 ---------------------------------- - {{1,2,3},{4,5,6}} -(1 row) - -SELECT * FROM test_type_conversion_array_int4(ARRAY[[[1,2,NULL],[NULL,5,6]],[[NULL,8,9],[10,11,12]]]); -INFO: ([[[1, 2, None], [None, 5, 6]], [[None, 8, 9], [10, 11, 12]]], ) - test_type_conversion_array_int4 ---------------------------------------------------- - {{{1,2,NULL},{NULL,5,6}},{{NULL,8,9},{10,11,12}}} -(1 row) - -SELECT * FROM test_type_conversion_array_int4('[2:4]={1,2,3}'); -INFO: ([1, 2, 3], ) - test_type_conversion_array_int4 ---------------------------------- - {1,2,3} -(1 row) - -CREATE FUNCTION test_type_conversion_array_int8(x int8[]) RETURNS int8[] AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_int8(ARRAY[[[1,2,NULL],[NULL,5,6]],[[NULL,8,9],[10,11,12]]]::int8[]); -INFO: ([[[1, 2, None], [None, 5, 6]], [[None, 8, 9], [10, 11, 12]]], ) - test_type_conversion_array_int8 ---------------------------------------------------- - {{{1,2,NULL},{NULL,5,6}},{{NULL,8,9},{10,11,12}}} -(1 row) - -CREATE FUNCTION test_type_conversion_array_date(x date[]) RETURNS date[] AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_date(ARRAY[[['2016-09-21','2016-09-22',NULL],[NULL,'2016-10-21','2016-10-22']], - [[NULL,'2016-11-21','2016-10-21'],['2015-09-21','2015-09-22','2014-09-21']]]::date[]); -INFO: ([[['09-21-2016', '09-22-2016', None], [None, '10-21-2016', '10-22-2016']], [[None, '11-21-2016', '10-21-2016'], ['09-21-2015', '09-22-2015', '09-21-2014']]], ) - test_type_conversion_array_date ---------------------------------------------------------------------------------------------------------------------------------- - {{{09-21-2016,09-22-2016,NULL},{NULL,10-21-2016,10-22-2016}},{{NULL,11-21-2016,10-21-2016},{09-21-2015,09-22-2015,09-21-2014}}} -(1 row) - -CREATE FUNCTION test_type_conversion_array_timestamp(x timestamp[]) RETURNS timestamp[] AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_timestamp(ARRAY[[['2016-09-21 15:34:24.078792-04','2016-10-22 11:34:24.078795-04',NULL], - [NULL,'2016-10-21 11:34:25.078792-04','2016-10-21 11:34:24.098792-04']], - [[NULL,'2016-01-21 11:34:24.078792-04','2016-11-21 11:34:24.108792-04'], - ['2015-09-21 11:34:24.079792-04','2014-09-21 11:34:24.078792-04','2013-09-21 11:34:24.078792-04']]]::timestamp[]); -INFO: ([[['Wed Sep 21 15:34:24.078792 2016', 'Sat Oct 22 11:34:24.078795 2016', None], [None, 'Fri Oct 21 11:34:25.078792 2016', 'Fri Oct 21 11:34:24.098792 2016']], [[None, 'Thu Jan 21 11:34:24.078792 2016', 'Mon Nov 21 11:34:24.108792 2016'], ['Mon Sep 21 11:34:24.079792 2015', 'Sun Sep 21 11:34:24.078792 2014', 'Sat Sep 21 11:34:24.078792 2013']]], ) - test_type_conversion_array_timestamp ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {{{"Wed Sep 21 15:34:24.078792 2016","Sat Oct 22 11:34:24.078795 2016",NULL},{NULL,"Fri Oct 21 11:34:25.078792 2016","Fri Oct 21 11:34:24.098792 2016"}},{{NULL,"Thu Jan 21 11:34:24.078792 2016","Mon Nov 21 11:34:24.108792 2016"},{"Mon Sep 21 11:34:24.079792 2015","Sun Sep 21 11:34:24.078792 2014","Sat Sep 21 11:34:24.078792 2013"}}} -(1 row) - -CREATE OR REPLACE FUNCTION pyreturnmultidemint4(h int4, i int4, j int4, k int4 ) RETURNS int4[] AS $BODY$ -m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] -plpy.info(m, type(m)) -return m -$BODY$ LANGUAGE plpython3u; -select pyreturnmultidemint4(8,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]], [[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]], [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]]], ) - pyreturnmultidemint4 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {{{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}}},{{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}},{{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7},{0,1,2,3,4,5,6,7}}}} -(1 row) - -CREATE OR REPLACE FUNCTION pyreturnmultidemint8(h int4, i int4, j int4, k int4 ) RETURNS int8[] AS $BODY$ -m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] -plpy.info(m, type(m)) -return m -$BODY$ LANGUAGE plpython3u; -select pyreturnmultidemint8(5,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]], [[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]], ) - pyreturnmultidemint8 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {{{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}}},{{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}},{{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4},{0,1,2,3,4}}}} -(1 row) - -CREATE OR REPLACE FUNCTION pyreturnmultidemfloat4(h int4, i int4, j int4, k int4 ) RETURNS float4[] AS $BODY$ -m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] -plpy.info(m, type(m)) -return m -$BODY$ LANGUAGE plpython3u; -select pyreturnmultidemfloat4(6,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]], [[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]]], ) - pyreturnmultidemfloat4 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {{{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}}},{{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}},{{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5},{0,1,2,3,4,5}}}} -(1 row) - -CREATE OR REPLACE FUNCTION pyreturnmultidemfloat8(h int4, i int4, j int4, k int4 ) RETURNS float8[] AS $BODY$ -m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] -plpy.info(m, type(m)) -return m -$BODY$ LANGUAGE plpython3u; -select pyreturnmultidemfloat8(7,5,3,2); -INFO: ([[[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]]], [[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]]]], ) - pyreturnmultidemfloat8 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {{{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}}},{{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}},{{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6},{0,1,2,3,4,5,6}}}} -(1 row) - -CREATE FUNCTION test_type_conversion_array_text(x text[]) RETURNS text[] AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_text(ARRAY['foo', 'bar']); -INFO: (['foo', 'bar'], ) - test_type_conversion_array_text ---------------------------------- - {foo,bar} -(1 row) - -SELECT * FROM test_type_conversion_array_text(ARRAY[['foo', 'bar'],['foo2', 'bar2']]); -INFO: ([['foo', 'bar'], ['foo2', 'bar2']], ) - test_type_conversion_array_text ---------------------------------- - {{foo,bar},{foo2,bar2}} -(1 row) - -CREATE FUNCTION test_type_conversion_array_bytea(x bytea[]) RETURNS bytea[] AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_bytea(ARRAY[E'\\xdeadbeef'::bytea, NULL]); -INFO: ([b'\xde\xad\xbe\xef', None], ) - test_type_conversion_array_bytea ----------------------------------- - {"\\xdeadbeef",NULL} -(1 row) - -CREATE FUNCTION test_type_conversion_array_mixed1() RETURNS text[] AS $$ -return [123, 'abc'] -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_mixed1(); - test_type_conversion_array_mixed1 ------------------------------------ - {123,abc} -(1 row) - -CREATE FUNCTION test_type_conversion_array_mixed2() RETURNS int[] AS $$ -return [123, 'abc'] -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_mixed2(); -ERROR: invalid input syntax for type integer: "abc" -CONTEXT: while creating return value -PL/Python function "test_type_conversion_array_mixed2" -CREATE FUNCTION test_type_conversion_mdarray_malformed() RETURNS int[] AS $$ -return [[1,2,3],[4,5]] -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_mdarray_malformed(); -ERROR: wrong length of inner sequence: has length 2, but 3 was expected -DETAIL: To construct a multidimensional array, the inner sequences must all have the same length. -CONTEXT: while creating return value -PL/Python function "test_type_conversion_mdarray_malformed" -CREATE FUNCTION test_type_conversion_mdarray_toodeep() RETURNS int[] AS $$ -return [[[[[[[1]]]]]]] -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_mdarray_toodeep(); -ERROR: number of array dimensions exceeds the maximum allowed (6) -CONTEXT: while creating return value -PL/Python function "test_type_conversion_mdarray_toodeep" -CREATE FUNCTION test_type_conversion_array_record() RETURNS type_record[] AS $$ -return [{'first': 'one', 'second': 42}, {'first': 'two', 'second': 11}] -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_record(); - test_type_conversion_array_record ------------------------------------ - {"(one,42)","(two,11)"} -(1 row) - -CREATE FUNCTION test_type_conversion_array_string() RETURNS text[] AS $$ -return 'abc' -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_string(); - test_type_conversion_array_string ------------------------------------ - {a,b,c} -(1 row) - -CREATE FUNCTION test_type_conversion_array_tuple() RETURNS text[] AS $$ -return ('abc', 'def') -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_tuple(); - test_type_conversion_array_tuple ----------------------------------- - {abc,def} -(1 row) - -CREATE FUNCTION test_type_conversion_array_error() RETURNS int[] AS $$ -return 5 -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_error(); -ERROR: return value of function with array return type is not a Python sequence -CONTEXT: while creating return value -PL/Python function "test_type_conversion_array_error" --- --- Domains over arrays --- -CREATE DOMAIN ordered_pair_domain AS integer[] CHECK (array_length(VALUE,1)=2 AND VALUE[1] < VALUE[2]); -CREATE FUNCTION test_type_conversion_array_domain(x ordered_pair_domain) RETURNS ordered_pair_domain AS $$ -plpy.info(x, type(x)) -return x -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_domain(ARRAY[0, 100]::ordered_pair_domain); -INFO: ([0, 100], ) - test_type_conversion_array_domain ------------------------------------ - {0,100} -(1 row) - -SELECT * FROM test_type_conversion_array_domain(NULL::ordered_pair_domain); -INFO: (None, ) - test_type_conversion_array_domain ------------------------------------ - -(1 row) - -CREATE FUNCTION test_type_conversion_array_domain_check_violation() RETURNS ordered_pair_domain AS $$ -return [2,1] -$$ LANGUAGE plpython3u; -SELECT * FROM test_type_conversion_array_domain_check_violation(); -ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" -CONTEXT: while creating return value -PL/Python function "test_type_conversion_array_domain_check_violation" --- --- Arrays of domains --- -CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ -plpy.info(x, type(x)) -return x[0] -$$ LANGUAGE plpythonu; -select test_read_uint2_array(array[1::uint2]); -INFO: ([1], ) - test_read_uint2_array ------------------------ - 1 -(1 row) - -CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ -return [x, x] -$$ LANGUAGE plpythonu; -select test_build_uint2_array(1::int2); - test_build_uint2_array ------------------------- - {1,1} -(1 row) - -select test_build_uint2_array(-1::int2); -- fail -ERROR: value for domain uint2 violates check constraint "uint2_check" -CONTEXT: while creating return value -PL/Python function "test_build_uint2_array" --- --- ideally this would work, but for now it doesn't, because the return value --- is [[2,4], [2,4]] which our conversion code thinks should become a 2-D --- integer array, not an array of arrays. --- -CREATE FUNCTION test_type_conversion_domain_array(x integer[]) - RETURNS ordered_pair_domain[] AS $$ -return [x, x] -$$ LANGUAGE plpythonu; -select test_type_conversion_domain_array(array[2,4]); -ERROR: return value of function with array return type is not a Python sequence -CONTEXT: while creating return value -PL/Python function "test_type_conversion_domain_array" -select test_type_conversion_domain_array(array[4,2]); -- fail -ERROR: return value of function with array return type is not a Python sequence -CONTEXT: while creating return value -PL/Python function "test_type_conversion_domain_array" -CREATE FUNCTION test_type_conversion_domain_array2(x ordered_pair_domain) - RETURNS integer AS $$ -plpy.info(x, type(x)) -return x[1] -$$ LANGUAGE plpythonu; -select test_type_conversion_domain_array2(array[2,4]); -INFO: ([2, 4], ) - test_type_conversion_domain_array2 ------------------------------------- - 4 -(1 row) - -select test_type_conversion_domain_array2(array[4,2]); -- fail -ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" -CREATE FUNCTION test_type_conversion_array_domain_array(x ordered_pair_domain[]) - RETURNS ordered_pair_domain AS $$ -plpy.info(x, type(x)) -return x[0] -$$ LANGUAGE plpythonu; -select test_type_conversion_array_domain_array(array[array[2,4]::ordered_pair_domain]); -INFO: ([[2, 4]], ) - test_type_conversion_array_domain_array ------------------------------------------ - {2,4} -(1 row) - ---- ---- Composite types ---- -CREATE TABLE employee ( - name text, - basesalary integer, - bonus integer -); -INSERT INTO employee VALUES ('John', 100, 10), ('Mary', 200, 10); -CREATE OR REPLACE FUNCTION test_composite_table_input(e employee) RETURNS integer AS $$ -return e['basesalary'] + e['bonus'] -$$ LANGUAGE plpython3u; -SELECT name, test_composite_table_input(employee.*) FROM employee; - name | test_composite_table_input -------+---------------------------- - John | 110 - Mary | 210 -(2 rows) - -ALTER TABLE employee DROP bonus; -SELECT name, test_composite_table_input(employee.*) FROM employee; -ERROR: KeyError: 'bonus' -CONTEXT: Traceback (most recent call last): - PL/Python function "test_composite_table_input", line 2, in - return e['basesalary'] + e['bonus'] -PL/Python function "test_composite_table_input" -ALTER TABLE employee ADD bonus integer; -UPDATE employee SET bonus = 10; -SELECT name, test_composite_table_input(employee.*) FROM employee; - name | test_composite_table_input -------+---------------------------- - John | 110 - Mary | 210 -(2 rows) - -CREATE TYPE named_pair AS ( - i integer, - j integer -); -CREATE OR REPLACE FUNCTION test_composite_type_input(p named_pair) RETURNS integer AS $$ -return sum(p.values()) -$$ LANGUAGE plpython3u; -SELECT test_composite_type_input(row(1, 2)); - test_composite_type_input ---------------------------- - 3 -(1 row) - -ALTER TYPE named_pair RENAME TO named_pair_2; -SELECT test_composite_type_input(row(1, 2)); - test_composite_type_input ---------------------------- - 3 -(1 row) - --- --- Domains within composite --- -CREATE TYPE nnint_container AS (f1 int, f2 nnint); -CREATE FUNCTION nnint_test(x int, y int) RETURNS nnint_container AS $$ -return {'f1': x, 'f2': y} -$$ LANGUAGE plpythonu; -SELECT nnint_test(null, 3); - nnint_test ------------- - (,3) -(1 row) - -SELECT nnint_test(3, null); -- fail -ERROR: value for domain nnint violates check constraint "nnint_check" -CONTEXT: while creating return value -PL/Python function "nnint_test" --- --- Domains of composite --- -CREATE DOMAIN ordered_named_pair AS named_pair_2 CHECK((VALUE).i <= (VALUE).j); -CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ -return p['i'] + p['j'] -$$ LANGUAGE plpythonu; -SELECT read_ordered_named_pair(row(1, 2)); - read_ordered_named_pair -------------------------- - 3 -(1 row) - -SELECT read_ordered_named_pair(row(2, 1)); -- fail -ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" -CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ -return {'i': i, 'j': j} -$$ LANGUAGE plpythonu; -SELECT build_ordered_named_pair(1,2); - build_ordered_named_pair --------------------------- - (1,2) -(1 row) - -SELECT build_ordered_named_pair(2,1); -- fail -ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" -CONTEXT: while creating return value -PL/Python function "build_ordered_named_pair" -CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ -return [{'i': i, 'j': j}, {'i': i, 'j': j+1}] -$$ LANGUAGE plpythonu; -SELECT build_ordered_named_pairs(1,2); - build_ordered_named_pairs ---------------------------- - {"(1,2)","(1,3)"} -(1 row) - -SELECT build_ordered_named_pairs(2,1); -- fail -ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" -CONTEXT: while creating return value -PL/Python function "build_ordered_named_pairs" --- --- Prepared statements --- -CREATE OR REPLACE FUNCTION test_prep_bool_input() RETURNS int -LANGUAGE plpython3u -AS $$ -plan = plpy.prepare("SELECT CASE WHEN $1 THEN 1 ELSE 0 END AS val", ['boolean']) -rv = plpy.execute(plan, ['fa'], 5) # 'fa' is true in Python -return rv[0]['val'] -$$; -SELECT test_prep_bool_input(); -- 1 - test_prep_bool_input ----------------------- - 1 -(1 row) - -CREATE OR REPLACE FUNCTION test_prep_bool_output() RETURNS bool -LANGUAGE plpython3u -AS $$ -plan = plpy.prepare("SELECT $1 = 1 AS val", ['int']) -rv = plpy.execute(plan, [0], 5) -plpy.info(rv[0]) -return rv[0]['val'] -$$; -SELECT test_prep_bool_output(); -- false -INFO: {'val': False} - test_prep_bool_output ------------------------ - f -(1 row) - -CREATE OR REPLACE FUNCTION test_prep_bytea_input(bb bytea) RETURNS int -LANGUAGE plpython3u -AS $$ -plan = plpy.prepare("SELECT octet_length($1) AS val", ['bytea']) -rv = plpy.execute(plan, [bb], 5) -return rv[0]['val'] -$$; -SELECT test_prep_bytea_input(E'a\\000b'); -- 3 (embedded null formerly truncated value) - test_prep_bytea_input ------------------------ - 3 -(1 row) - -CREATE OR REPLACE FUNCTION test_prep_bytea_output() RETURNS bytea -LANGUAGE plpython3u -AS $$ -plan = plpy.prepare("SELECT decode('aa00bb', 'hex') AS val") -rv = plpy.execute(plan, [], 5) -plpy.info(rv[0]) -return rv[0]['val'] -$$; -SELECT test_prep_bytea_output(); -INFO: {'val': b'\xaa\x00\xbb'} - test_prep_bytea_output ------------------------- - \xaa00bb -(1 row) - diff --git a/src/pl/plpython/expected/plpython_unicode.out b/src/pl/plpython/expected/plpython_unicode.out index c7546dd458..fd54b0b88e 100644 --- a/src/pl/plpython/expected/plpython_unicode.out +++ b/src/pl/plpython/expected/plpython_unicode.out @@ -11,24 +11,24 @@ CREATE TABLE unicode_test ( testvalue text NOT NULL ); CREATE FUNCTION unicode_return() RETURNS text AS E' -return u"\\xA0" -' LANGUAGE plpythonu; +return "\\xA0" +' LANGUAGE plpython3u; CREATE FUNCTION unicode_trigger() RETURNS trigger AS E' -TD["new"]["testvalue"] = u"\\xA0" +TD["new"]["testvalue"] = "\\xA0" return "MODIFY" -' LANGUAGE plpythonu; +' LANGUAGE plpython3u; CREATE TRIGGER unicode_test_bi BEFORE INSERT ON unicode_test FOR EACH ROW EXECUTE PROCEDURE unicode_trigger(); CREATE FUNCTION unicode_plan1() RETURNS text AS E' plan = plpy.prepare("SELECT $1 AS testvalue", ["text"]) -rv = plpy.execute(plan, [u"\\xA0"], 1) +rv = plpy.execute(plan, ["\\xA0"], 1) return rv[0]["testvalue"] -' LANGUAGE plpythonu; +' LANGUAGE plpython3u; CREATE FUNCTION unicode_plan2() RETURNS text AS E' -plan = plpy.prepare("SELECT $1 || $2 AS testvalue", ["text", u"text"]) +plan = plpy.prepare("SELECT $1 || $2 AS testvalue", ["text", "text"]) rv = plpy.execute(plan, ["foo", "bar"], 1) return rv[0]["testvalue"] -' LANGUAGE plpythonu; +' LANGUAGE plpython3u; SELECT unicode_return(); unicode_return ---------------- diff --git a/src/pl/plpython/expected/plpython_void.out b/src/pl/plpython/expected/plpython_void.out index 1080d12d6b..07d0760783 100644 --- a/src/pl/plpython/expected/plpython_void.out +++ b/src/pl/plpython/expected/plpython_void.out @@ -3,14 +3,14 @@ -- CREATE FUNCTION test_void_func1() RETURNS void AS $$ x = 10 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- illegal: can't return non-None value in void-returning func CREATE FUNCTION test_void_func2() RETURNS void AS $$ return 10 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_return_none() RETURNS int AS $$ None -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- Tests for functions returning void SELECT test_void_func1(), test_void_func1() IS NULL AS "is null"; test_void_func1 | is null diff --git a/src/pl/plpython/regress-python3-mangle.mk b/src/pl/plpython/regress-python3-mangle.mk deleted file mode 100644 index a785818a17..0000000000 --- a/src/pl/plpython/regress-python3-mangle.mk +++ /dev/null @@ -1,38 +0,0 @@ -ifeq ($(python_majorversion),3) -# Adjust regression tests for Python 3 compatibility -# -# Mention those regression test files that need to be mangled in the -# variable REGRESS_PLPYTHON3_MANGLE. They will be copied to a -# subdirectory python3/ and have their Python syntax and other bits -# adjusted to work with Python 3. - -# Note that the order of the tests needs to be preserved in this -# expression. -REGRESS := $(foreach test,$(REGRESS),$(if $(filter $(test),$(REGRESS_PLPYTHON3_MANGLE)),python3/$(test),$(test))) - -.PHONY: pgregress-python3-mangle -pgregress-python3-mangle: - $(MKDIR_P) sql/python3 expected/python3 results/python3 - for file in $(patsubst %,$(srcdir)/sql/%.sql,$(REGRESS_PLPYTHON3_MANGLE)) $(patsubst %,$(srcdir)/expected/%*.out,$(REGRESS_PLPYTHON3_MANGLE)); do \ - sed \ - -e "s///g" \ - -e "s///g" \ - -e "s/\([0-9][0-9]*\)L/\1/g" \ - -e 's/\([ [{]\)u"/\1"/g' \ - -e "s/\([ [{]\)u'/\1'/g" \ - -e "s/def next/def __next__/g" \ - -e "s/LANGUAGE plpythonu/LANGUAGE plpython3u/g" \ - -e "s/LANGUAGE plpython2u/LANGUAGE plpython3u/g" \ - -e "s/EXTENSION plpythonu/EXTENSION plpython3u/g" \ - -e "s/EXTENSION plpython2u/EXTENSION plpython3u/g" \ - -e "s/EXTENSION \([^ ]*\)_plpythonu/EXTENSION \1_plpython3u/g" \ - -e "s/EXTENSION \([^ ]*\)_plpython2u/EXTENSION \1_plpython3u/g" \ - -e 's/installing required extension "plpython2u"/installing required extension "plpython3u"/g' \ - $$file >`echo $$file | sed 's,^.*/\([^/][^/]*/\)\([^/][^/]*\)$$,\1python3/\2,'` || exit; \ - done - -check installcheck: pgregress-python3-mangle - -pg_regress_clean_files += sql/python3/ expected/python3/ results/python3/ - -endif # Python 3 diff --git a/src/pl/plpython/sql/plpython_call.sql b/src/pl/plpython/sql/plpython_call.sql index b0b3705ae3..daa4bc377d 100644 --- a/src/pl/plpython/sql/plpython_call.sql +++ b/src/pl/plpython/sql/plpython_call.sql @@ -3,7 +3,7 @@ -- CREATE PROCEDURE test_proc1() -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ pass $$; @@ -13,7 +13,7 @@ CALL test_proc1(); -- error: can't return non-None CREATE PROCEDURE test_proc2() -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ return 5 $$; @@ -24,7 +24,7 @@ CALL test_proc2(); CREATE TABLE test1 (a int); CREATE PROCEDURE test_proc3(x int) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plpy.execute("INSERT INTO test1 VALUES (%s)" % x) $$; @@ -37,7 +37,7 @@ SELECT * FROM test1; -- output arguments CREATE PROCEDURE test_proc5(INOUT a text) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ return [a + '+' + a] $$; @@ -46,7 +46,7 @@ CALL test_proc5('abc'); CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ return (b * a, c * a) $$; @@ -57,7 +57,7 @@ CALL test_proc6(2, 3, 4); -- OUT parameters CREATE PROCEDURE test_proc9(IN a int, OUT b int) -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plpy.notice("a: %s" % (a)) return (a * 2,) diff --git a/src/pl/plpython/sql/plpython_composite.sql b/src/pl/plpython/sql/plpython_composite.sql index 0fd2f5d5e3..21757701cc 100644 --- a/src/pl/plpython/sql/plpython_composite.sql +++ b/src/pl/plpython/sql/plpython_composite.sql @@ -1,6 +1,6 @@ CREATE FUNCTION multiout_simple(OUT i integer, OUT j integer) AS $$ return (1, 2) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT multiout_simple(); SELECT * FROM multiout_simple(); @@ -9,7 +9,7 @@ SELECT (multiout_simple()).j + 3; CREATE FUNCTION multiout_simple_setof(n integer = 1, OUT integer, OUT integer) RETURNS SETOF record AS $$ return [(1, 2)] * n -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT multiout_simple_setof(); SELECT * FROM multiout_simple_setof(); @@ -34,7 +34,7 @@ elif typ == 'obj': return type_record elif typ == 'str': return "('%s',%r)" % (first, second) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_record_as('dict', 'foo', 1, 'f'); SELECT multiout_record_as('dict', 'foo', 1, 'f'); @@ -77,7 +77,7 @@ for i in range(n): power = 2 ** i length = plpy.execute("select length('%d')" % power)[0]['length'] yield power, length -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_setof(3); SELECT multiout_setof(5); @@ -86,7 +86,7 @@ CREATE FUNCTION multiout_return_table() RETURNS TABLE (x integer, y text) AS $$ return [{'x': 4, 'y' :'four'}, {'x': 7, 'y' :'seven'}, {'x': 0, 'y' :'zero'}] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_return_table(); @@ -94,18 +94,18 @@ CREATE FUNCTION multiout_array(OUT integer[], OUT text) RETURNS SETOF record AS yield [[1], 'a'] yield [[1,2], 'b'] yield [[1,2,3], None] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_array(); CREATE FUNCTION singleout_composite(OUT type_record) AS $$ return {'first': 1, 'second': 2} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION multiout_composite(OUT type_record) RETURNS SETOF type_record AS $$ return [{'first': 1, 'second': 2}, {'first': 3, 'second': 4 }] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM singleout_composite(); SELECT * FROM multiout_composite(); @@ -113,7 +113,7 @@ SELECT * FROM multiout_composite(); -- composite OUT parameters in functions returning RECORD not supported yet CREATE FUNCTION multiout_composite(INOUT n integer, OUT type_record) AS $$ return (n, (n * 2, n * 3)) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION multiout_table_type_setof(typ text, returnnull boolean, INOUT n integer, OUT table_record) RETURNS SETOF record AS $$ if returnnull: @@ -132,7 +132,7 @@ elif typ == 'str': d = "(%r,%r)" % (n * 2, n * 3) for i in range(n): yield (i, d) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_composite(2); SELECT * FROM multiout_table_type_setof('dict', 'f', 3); @@ -157,7 +157,7 @@ CREATE TABLE changing ( CREATE FUNCTION changing_test(OUT n integer, OUT changing) RETURNS SETOF record AS $$ return [(1, {'i': 1, 'j': 2}), (1, (3, 4))] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM changing_test(); ALTER TABLE changing DROP COLUMN j; @@ -178,14 +178,14 @@ yield {'tab': [('first', 1), ('second', 2)], yield {'tab': [('first', 1), ('second', 2)], 'typ': [{'first': 'third', 'second': 3}, {'first': 'fourth', 'second': 4}]} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM composite_types_table(); -- check what happens if the output record descriptor changes CREATE FUNCTION return_record(t text) RETURNS record AS $$ return {'t': t, 'val': 10} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM return_record('abc') AS r(t text, val integer); SELECT * FROM return_record('abc') AS r(t text, val bigint); @@ -196,7 +196,7 @@ SELECT * FROM return_record('999') AS r(val text, t integer); CREATE FUNCTION return_record_2(t text) RETURNS record AS $$ return {'v1':1,'v2':2,t:3} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM return_record_2('v3') AS (v3 int, v2 int, v1 int); SELECT * FROM return_record_2('v3') AS (v2 int, v3 int, v1 int); @@ -211,7 +211,7 @@ SELECT * FROM return_record_2('v3') AS (v1 int, v2 int, v3 int); -- multi-dimensional array of composite types. CREATE FUNCTION composite_type_as_list() RETURNS type_record[] AS $$ return [[('first', 1), ('second', 1)], [('first', 2), ('second', 2)], [('first', 3), ('second', 3)]]; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM composite_type_as_list(); -- Starting with PostgreSQL 10, a composite type in an array cannot be @@ -220,5 +220,5 @@ SELECT * FROM composite_type_as_list(); -- on the issue. CREATE FUNCTION composite_type_as_list_broken() RETURNS type_record[] AS $$ return [['first', 1]]; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM composite_type_as_list_broken(); diff --git a/src/pl/plpython/sql/plpython_do.sql b/src/pl/plpython/sql/plpython_do.sql index 0e281a08ee..d49413268e 100644 --- a/src/pl/plpython/sql/plpython_do.sql +++ b/src/pl/plpython/sql/plpython_do.sql @@ -1,5 +1,3 @@ -DO $$ plpy.notice("This is plpythonu.") $$ LANGUAGE plpythonu; +DO $$ plpy.notice("This is plpython3u.") $$ LANGUAGE plpython3u; -DO $$ plpy.notice("This is plpython2u.") $$ LANGUAGE plpython2u; - -DO $$ raise Exception("error test") $$ LANGUAGE plpythonu; +DO $$ raise Exception("error test") $$ LANGUAGE plpython3u; diff --git a/src/pl/plpython/sql/plpython_drop.sql b/src/pl/plpython/sql/plpython_drop.sql index 72d5d657ec..e4f373b2bc 100644 --- a/src/pl/plpython/sql/plpython_drop.sql +++ b/src/pl/plpython/sql/plpython_drop.sql @@ -3,6 +3,4 @@ -- SET client_min_messages = WARNING; -DROP EXTENSION plpythonu CASCADE; - -DROP EXTENSION IF EXISTS plpython2u CASCADE; +DROP EXTENSION plpython3u CASCADE; diff --git a/src/pl/plpython/sql/plpython_ereport.sql b/src/pl/plpython/sql/plpython_ereport.sql index 58df2057ef..d4f6223e59 100644 --- a/src/pl/plpython/sql/plpython_ereport.sql +++ b/src/pl/plpython/sql/plpython_ereport.sql @@ -17,28 +17,28 @@ plpy.info('This is message text.', plpy.notice('notice', detail='some detail') plpy.warning('warning', detail='some detail') plpy.error('stop on error', detail='some detail', hint='some hint') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT elog_test(); -DO $$ plpy.info('other types', detail=(10, 20)) $$ LANGUAGE plpythonu; +DO $$ plpy.info('other types', detail=(10, 20)) $$ LANGUAGE plpython3u; DO $$ import time; from datetime import date plpy.info('other types', detail=date(2016, 2, 26)) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; DO $$ basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana'] plpy.info('other types', detail=basket) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- should fail -DO $$ plpy.info('wrong sqlstate', sqlstate='54444A') $$ LANGUAGE plpythonu; -DO $$ plpy.info('unsupported argument', blabla='fooboo') $$ LANGUAGE plpythonu; -DO $$ plpy.info('first message', message='second message') $$ LANGUAGE plpythonu; -DO $$ plpy.info('first message', 'second message', message='third message') $$ LANGUAGE plpythonu; +DO $$ plpy.info('wrong sqlstate', sqlstate='54444A') $$ LANGUAGE plpython3u; +DO $$ plpy.info('unsupported argument', blabla='fooboo') $$ LANGUAGE plpython3u; +DO $$ plpy.info('first message', message='second message') $$ LANGUAGE plpython3u; +DO $$ plpy.info('first message', 'second message', message='third message') $$ LANGUAGE plpython3u; -- raise exception in python, handle exception in plgsql CREATE OR REPLACE FUNCTION raise_exception(_message text, _detail text DEFAULT NULL, _hint text DEFAULT NULL, @@ -57,7 +57,7 @@ kwargs = { } # ignore None values plpy.error(**dict((k, v) for k, v in iter(kwargs.items()) if v)) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT raise_exception('hello', 'world'); SELECT raise_exception('message text', 'detail text', _sqlstate => 'YY333'); @@ -118,17 +118,13 @@ BEGIN END; $$; --- The displayed context is different between Python2 and Python3, --- but that's not important for this test. -\set SHOW_CONTEXT never - DO $$ try: plpy.execute("select raise_exception(_message => 'my message', _sqlstate => 'XX987', _hint => 'some hint', _table_name => 'users_tab', _datatype_name => 'user_type')") except Exception as e: plpy.info(e.spidata) raise e -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; DO $$ try: @@ -136,4 +132,4 @@ try: except Exception as e: plpy.info('sqlstate: %s, hint: %s, table_name: %s, datatype_name: %s' % (e.sqlstate, e.hint, e.table_name, e.datatype_name)) raise e -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; diff --git a/src/pl/plpython/sql/plpython_error.sql b/src/pl/plpython/sql/plpython_error.sql index 88d6936fd0..11f14ec5a7 100644 --- a/src/pl/plpython/sql/plpython_error.sql +++ b/src/pl/plpython/sql/plpython_error.sql @@ -7,7 +7,7 @@ CREATE FUNCTION python_syntax_error() RETURNS text AS '.syntaxerror' - LANGUAGE plpythonu; + LANGUAGE plpython3u; /* With check_function_bodies = false the function should get defined * and the error reported when called @@ -17,7 +17,7 @@ SET check_function_bodies = false; CREATE FUNCTION python_syntax_error() RETURNS text AS '.syntaxerror' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT python_syntax_error(); /* Run the function twice to check if the hashtable entry gets cleaned up */ @@ -30,7 +30,7 @@ RESET check_function_bodies; CREATE FUNCTION sql_syntax_error() RETURNS text AS 'plpy.execute("syntax error")' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT sql_syntax_error(); @@ -40,7 +40,7 @@ SELECT sql_syntax_error(); CREATE FUNCTION exception_index_invalid(text) RETURNS text AS 'return args[1]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT exception_index_invalid('test'); @@ -51,7 +51,7 @@ CREATE FUNCTION exception_index_invalid_nested() RETURNS text AS 'rv = plpy.execute("SELECT test5(''foo'')") return rv[0]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT exception_index_invalid_nested(); @@ -68,7 +68,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT invalid_type_uncaught('rick'); @@ -90,7 +90,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT invalid_type_caught('rick'); @@ -111,7 +111,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT invalid_type_reraised('rick'); @@ -127,7 +127,7 @@ if len(rv): return rv[0]["fname"] return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT valid_type('rick'); @@ -147,7 +147,7 @@ def fun3(): fun3() return "not reached" ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT nested_error(); @@ -167,7 +167,7 @@ def fun3(): fun3() return "not reached" ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT nested_error_raise(); @@ -187,7 +187,7 @@ def fun3(): fun3() return "you''ve been warned" ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT nested_warning(); @@ -196,7 +196,7 @@ SELECT nested_warning(); CREATE FUNCTION toplevel_attribute_error() RETURNS void AS $$ plpy.nonexistent -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT toplevel_attribute_error(); @@ -213,7 +213,7 @@ def third(): plpy.execute("select sql_error()") first() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE OR REPLACE FUNCTION sql_error() RETURNS void AS $$ begin @@ -229,7 +229,7 @@ $$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION sql_from_python_error() RETURNS void AS $$ plpy.execute("select sql_error()") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT python_traceback(); SELECT sql_error(); @@ -251,7 +251,7 @@ except spiexceptions.NotNullViolation as e: plpy.notice("Violated the NOT NULL constraint, sqlstate %s" % e.sqlstate) except spiexceptions.UniqueViolation as e: plpy.notice("Violated the UNIQUE constraint, sqlstate %s" % e.sqlstate) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT specific_exception(2); SELECT specific_exception(NULL); @@ -262,7 +262,7 @@ SELECT specific_exception(2); CREATE FUNCTION python_unique_violation() RETURNS void AS $$ plpy.execute("insert into specific values (1)") plpy.execute("insert into specific values (1)") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION catch_python_unique_violation() RETURNS text AS $$ begin @@ -283,7 +283,7 @@ CREATE FUNCTION manual_subxact() RETURNS void AS $$ plpy.execute("savepoint save") plpy.execute("create table foo(x integer)") plpy.execute("rollback to save") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT manual_subxact(); @@ -295,7 +295,7 @@ rollback = plpy.prepare("rollback to save") plpy.execute(save) plpy.execute("create table foo(x integer)") plpy.execute(rollback) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT manual_subxact_prepared(); @@ -303,7 +303,7 @@ SELECT manual_subxact_prepared(); */ CREATE FUNCTION plpy_raise_spiexception() RETURNS void AS $$ raise plpy.spiexceptions.DivisionByZero() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; DO $$ BEGIN @@ -319,7 +319,7 @@ CREATE FUNCTION plpy_raise_spiexception_override() RETURNS void AS $$ exc = plpy.spiexceptions.DivisionByZero() exc.sqlstate = 'SILLY' raise exc -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; DO $$ BEGIN @@ -332,14 +332,14 @@ $$ LANGUAGE plpgsql; /* test the context stack trace for nested execution levels */ CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ -plpy.execute("DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$") +plpy.execute("DO LANGUAGE plpython3u $x$ plpy.notice('inside DO') $x$") return 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ plpy.execute("SELECT notice_innerfunc()") return 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; \set SHOW_CONTEXT always diff --git a/src/pl/plpython/sql/plpython_global.sql b/src/pl/plpython/sql/plpython_global.sql index 32502b41ee..96d2049286 100644 --- a/src/pl/plpython/sql/plpython_global.sql +++ b/src/pl/plpython/sql/plpython_global.sql @@ -9,7 +9,7 @@ CREATE FUNCTION global_test_one() returns text if "global_test" not in GD: GD["global_test"] = "set by global_test_one" return "SD: " + SD["global_test"] + ", GD: " + GD["global_test"]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION global_test_two() returns text AS @@ -18,7 +18,7 @@ CREATE FUNCTION global_test_two() returns text if "global_test" not in GD: GD["global_test"] = "set by global_test_two" return "SD: " + SD["global_test"] + ", GD: " + GD["global_test"]' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION static_test() returns int4 @@ -29,7 +29,7 @@ else: SD["call"] = 1 return SD["call"] ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; SELECT static_test(); diff --git a/src/pl/plpython/sql/plpython_import.sql b/src/pl/plpython/sql/plpython_import.sql index ec887677e1..3031eef2e6 100644 --- a/src/pl/plpython/sql/plpython_import.sql +++ b/src/pl/plpython/sql/plpython_import.sql @@ -7,7 +7,7 @@ CREATE FUNCTION import_fail() returns text except ImportError: return "failed as expected" return "succeeded, that wasn''t supposed to happen"' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION import_succeed() returns text @@ -28,7 +28,7 @@ except Exception as ex: plpy.notice("import failed -- %s" % str(ex)) return "failed, that wasn''t supposed to happen" return "succeeded, as expected"' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION import_test_one(p text) RETURNS text AS @@ -39,7 +39,7 @@ except ImportError: import sha digest = sha.new(p) return digest.hexdigest()' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION import_test_two(u users) RETURNS text AS @@ -51,7 +51,7 @@ except ImportError: import sha digest = sha.new(plain); return "sha hash of " + plain + " is " + digest.hexdigest()' - LANGUAGE plpythonu; + LANGUAGE plpython3u; -- import python modules diff --git a/src/pl/plpython/sql/plpython_newline.sql b/src/pl/plpython/sql/plpython_newline.sql index f9cee9491b..cb22ba923f 100644 --- a/src/pl/plpython/sql/plpython_newline.sql +++ b/src/pl/plpython/sql/plpython_newline.sql @@ -4,15 +4,15 @@ CREATE OR REPLACE FUNCTION newline_lf() RETURNS integer AS E'x = 100\ny = 23\nreturn x + y\n' -LANGUAGE plpythonu; +LANGUAGE plpython3u; CREATE OR REPLACE FUNCTION newline_cr() RETURNS integer AS E'x = 100\ry = 23\rreturn x + y\r' -LANGUAGE plpythonu; +LANGUAGE plpython3u; CREATE OR REPLACE FUNCTION newline_crlf() RETURNS integer AS E'x = 100\r\ny = 23\r\nreturn x + y\r\n' -LANGUAGE plpythonu; +LANGUAGE plpython3u; SELECT newline_lf(); diff --git a/src/pl/plpython/sql/plpython_params.sql b/src/pl/plpython/sql/plpython_params.sql index ee75c4dc41..8bab488859 100644 --- a/src/pl/plpython/sql/plpython_params.sql +++ b/src/pl/plpython/sql/plpython_params.sql @@ -4,13 +4,13 @@ CREATE FUNCTION test_param_names0(integer, integer) RETURNS int AS $$ return args[0] + args[1] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_param_names1(a0 integer, a1 text) RETURNS boolean AS $$ assert a0 == args[0] assert a1 == args[1] return True -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_param_names2(u users) RETURNS text AS $$ assert u == args[0] @@ -22,7 +22,7 @@ if isinstance(u, dict): else: s = str(u) return s -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- use deliberately wrong parameter names CREATE FUNCTION test_param_names3(a0 integer) RETURNS boolean AS $$ @@ -32,7 +32,7 @@ try: except NameError as e: assert e.args[0].find("a1") > -1 return True -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_param_names0(2,7); diff --git a/src/pl/plpython/sql/plpython_quote.sql b/src/pl/plpython/sql/plpython_quote.sql index 346b5485da..a1133e7e26 100644 --- a/src/pl/plpython/sql/plpython_quote.sql +++ b/src/pl/plpython/sql/plpython_quote.sql @@ -9,7 +9,7 @@ CREATE FUNCTION quote(t text, how text) RETURNS text AS $$ return plpy.quote_ident(t) else: raise plpy.Error("unrecognized quote type %s" % how) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT quote(t, 'literal') FROM (VALUES ('abc'), diff --git a/src/pl/plpython/sql/plpython_record.sql b/src/pl/plpython/sql/plpython_record.sql index 9bab4c9e82..52bad8bcce 100644 --- a/src/pl/plpython/sql/plpython_record.sql +++ b/src/pl/plpython/sql/plpython_record.sql @@ -27,7 +27,7 @@ elif typ == 'obj': type_record.first = first type_record.second = second return type_record -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_type_record_as(typ text, first text, second integer, retnull boolean) RETURNS type_record AS $$ if retnull: @@ -45,20 +45,20 @@ elif typ == 'obj': return type_record elif typ == 'str': return "('%s',%r)" % (first, second) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_in_out_params(first in text, second out text) AS $$ return first + '_in_to_out'; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_in_out_params_multi(first in text, second out text, third out text) AS $$ return (first + '_record_in_to_out_1', first + '_record_in_to_out_2'); -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_inout_params(first inout text) AS $$ return first + '_inout'; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- Test tuple returning functions @@ -136,14 +136,14 @@ SELECT * FROM test_type_record_as('obj', 'one', 1, false); CREATE FUNCTION test_type_record_error1() RETURNS type_record AS $$ return { 'first': 'first' } -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error1(); CREATE FUNCTION test_type_record_error2() RETURNS type_record AS $$ return [ 'first' ] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error2(); @@ -152,12 +152,12 @@ CREATE FUNCTION test_type_record_error3() RETURNS type_record AS $$ class type_record: pass type_record.first = 'first' return type_record -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error3(); CREATE FUNCTION test_type_record_error4() RETURNS type_record AS $$ return 'foo' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_record_error4(); diff --git a/src/pl/plpython/sql/plpython_setof.sql b/src/pl/plpython/sql/plpython_setof.sql index 16c2eef0ad..4cfb10192c 100644 --- a/src/pl/plpython/sql/plpython_setof.sql +++ b/src/pl/plpython/sql/plpython_setof.sql @@ -4,21 +4,21 @@ CREATE FUNCTION test_setof_error() RETURNS SETOF text AS $$ return 37 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_setof_error(); CREATE FUNCTION test_setof_as_list(count integer, content text) RETURNS SETOF text AS $$ return [ content ]*count -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_setof_as_tuple(count integer, content text) RETURNS SETOF text AS $$ t = () for i in range(count): t += ( content, ) return t -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_setof_as_iterator(count integer, content text) RETURNS SETOF text AS $$ class producer: @@ -27,13 +27,13 @@ class producer: self.icount = icount def __iter__ (self): return self - def next (self): + def __next__ (self): if self.icount == 0: raise StopIteration self.icount -= 1 return self.icontent return producer(count, content) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_setof_spi_in_iterator() RETURNS SETOF text AS $$ @@ -42,7 +42,7 @@ $$ yield s plpy.execute('select 2') $$ -LANGUAGE plpythonu; +LANGUAGE plpython3u; -- Test set returning functions @@ -69,7 +69,7 @@ global x while x <= lim: yield x x = x + 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT ugly(1, 5); @@ -81,7 +81,7 @@ CREATE OR REPLACE FUNCTION get_user_records() RETURNS SETOF users AS $$ return plpy.execute("SELECT * FROM users ORDER BY username") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT get_user_records(); SELECT * FROM get_user_records(); @@ -91,7 +91,7 @@ CREATE OR REPLACE FUNCTION get_user_records2() RETURNS TABLE(fname text, lname text, username text, userid int) AS $$ return plpy.execute("SELECT * FROM users ORDER BY username") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT get_user_records2(); SELECT * FROM get_user_records2(); diff --git a/src/pl/plpython/sql/plpython_spi.sql b/src/pl/plpython/sql/plpython_spi.sql index dd77833ed5..fcd113acaa 100644 --- a/src/pl/plpython/sql/plpython_spi.sql +++ b/src/pl/plpython/sql/plpython_spi.sql @@ -7,19 +7,19 @@ CREATE FUNCTION nested_call_one(a text) RETURNS text 'q = "SELECT nested_call_two(''%s'')" % a r = plpy.execute(q) return r[0]' - LANGUAGE plpythonu ; + LANGUAGE plpython3u ; CREATE FUNCTION nested_call_two(a text) RETURNS text AS 'q = "SELECT nested_call_three(''%s'')" % a r = plpy.execute(q) return r[0]' - LANGUAGE plpythonu ; + LANGUAGE plpython3u ; CREATE FUNCTION nested_call_three(a text) RETURNS text AS 'return a' - LANGUAGE plpythonu ; + LANGUAGE plpython3u ; -- some spi stuff @@ -35,7 +35,7 @@ except Exception as ex: plpy.error(str(ex)) return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION spi_prepared_plan_test_two(a text) RETURNS text AS @@ -49,7 +49,7 @@ except Exception as ex: plpy.error(str(ex)) return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION spi_prepared_plan_test_nested(a text) RETURNS text AS @@ -64,7 +64,7 @@ except Exception as ex: plpy.error(str(ex)) return None ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION join_sequences(s sequences) RETURNS text AS @@ -77,7 +77,7 @@ for r in rv: seq = seq + r["sequence"] return seq ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION spi_recursive_sum(a int) RETURNS int AS @@ -86,7 +86,7 @@ if a > 1: r = plpy.execute("SELECT spi_recursive_sum(%d) as a" % (a-1))[0]["a"] return a + r ' - LANGUAGE plpythonu; + LANGUAGE plpython3u; -- -- spi and nested calls @@ -120,7 +120,7 @@ if result.status() > 0: return result.nrows() else: return None -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_metadata_test($$SELECT 1 AS foo, '11'::text AS bar UNION SELECT 2, '22'$$); SELECT result_metadata_test($$CREATE TEMPORARY TABLE foo1 (a int, b text)$$); @@ -129,7 +129,7 @@ CREATE FUNCTION result_nrows_test(cmd text) RETURNS int AS $$ result = plpy.execute(cmd) return result.nrows() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_nrows_test($$SELECT 1$$); SELECT result_nrows_test($$CREATE TEMPORARY TABLE foo2 (a int, b text)$$); @@ -140,7 +140,7 @@ CREATE FUNCTION result_len_test(cmd text) RETURNS int AS $$ result = plpy.execute(cmd) return len(result) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_len_test($$SELECT 1$$); SELECT result_len_test($$CREATE TEMPORARY TABLE foo3 (a int, b text)$$); @@ -162,7 +162,7 @@ result[-1] = {'c': 1000} result[:2] = [{'c': 10}, {'c': 100}] plpy.info([item['c'] for item in result[:]]) -# raises TypeError, but the message differs on Python 2.6, so silence it +# raises TypeError, catch so further tests could be added try: plpy.info(result['foo']) except TypeError: @@ -170,7 +170,7 @@ except TypeError: else: assert False, "TypeError not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_subscript_test(); @@ -180,7 +180,7 @@ result = plpy.execute("select 1 where false") plpy.info(result[:]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_empty_test(); @@ -189,7 +189,7 @@ AS $$ plan = plpy.prepare(cmd) result = plpy.execute(plan) return str(result) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT result_str_test($$SELECT 1 AS foo UNION SELECT 2$$); SELECT result_str_test($$CREATE TEMPORARY TABLE foo1 (a int, b text)$$); @@ -203,13 +203,13 @@ for row in res: if row['lname'] == 'doe': does += 1 return does -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION double_cursor_close() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") res.close() res.close() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_fetch() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") @@ -228,7 +228,7 @@ except StopIteration: pass else: assert False, "StopIteration not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_mix_next_and_fetch() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users order by fname") @@ -242,7 +242,7 @@ except AttributeError: assert item['fname'] == 'rick' assert len(res.fetch(2)) == 1 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION fetch_after_close() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") @@ -253,7 +253,7 @@ except ValueError: pass else: assert False, "ValueError not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION next_after_close() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users") @@ -267,7 +267,7 @@ except ValueError: pass else: assert False, "ValueError not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_fetch_next_empty() RETURNS int AS $$ res = plpy.cursor("select fname, lname from users where false") @@ -281,7 +281,7 @@ except StopIteration: pass else: assert False, "StopIteration not raised" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_plan() RETURNS SETOF text AS $$ plan = plpy.prepare( @@ -291,13 +291,13 @@ for row in plpy.cursor(plan, ["w"]): yield row['fname'] for row in plan.cursor(["j"]): yield row['fname'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_plan_wrong_args() RETURNS SETOF text AS $$ plan = plpy.prepare("select fname, lname from users where fname like $1 || '%'", ["text"]) c = plpy.cursor(plan, ["a", "b"]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TYPE test_composite_type AS ( a1 int, @@ -308,7 +308,7 @@ CREATE OR REPLACE FUNCTION plan_composite_args() RETURNS test_composite_type AS plan = plpy.prepare("select $1 as c1", ["test_composite_type"]) res = plpy.execute(plan, [{"a1": 3, "a2": "label"}]) return res[0]["c1"] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT simple_cursor_test(); SELECT double_cursor_close(); diff --git a/src/pl/plpython/sql/plpython_subtransaction.sql b/src/pl/plpython/sql/plpython_subtransaction.sql index cc4b1ae102..c65c380f40 100644 --- a/src/pl/plpython/sql/plpython_subtransaction.sql +++ b/src/pl/plpython/sql/plpython_subtransaction.sql @@ -17,7 +17,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES ('oops')") elif what_error == "Python": raise Exception("Python exception") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_ctx_test(); SELECT * FROM subtransaction_tbl; @@ -45,7 +45,7 @@ with plpy.subtransaction(): raise plpy.notice("Swallowed %s(%r)" % (e.__class__.__name__, e.args[0])) return "ok" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_nested_test(); SELECT * FROM subtransaction_tbl; @@ -65,7 +65,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES (2)") plpy.execute("SELECT subtransaction_nested_test('t')") return "ok" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_deeply_nested_test(); SELECT * FROM subtransaction_tbl; @@ -76,25 +76,25 @@ TRUNCATE subtransaction_tbl; CREATE FUNCTION subtransaction_exit_without_enter() RETURNS void AS $$ plpy.subtransaction().__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_enter_without_exit() RETURNS void AS $$ plpy.subtransaction().__enter__() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_exit_twice() RETURNS void AS $$ plpy.subtransaction().__enter__() plpy.subtransaction().__exit__(None, None, None) plpy.subtransaction().__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_enter_twice() RETURNS void AS $$ plpy.subtransaction().__enter__() plpy.subtransaction().__enter__() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_exit_same_subtransaction_twice() RETURNS void AS $$ @@ -102,7 +102,7 @@ s = plpy.subtransaction() s.__enter__() s.__exit__(None, None, None) s.__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_enter_same_subtransaction_twice() RETURNS void AS $$ @@ -110,14 +110,14 @@ s = plpy.subtransaction() s.__enter__() s.__enter__() s.__exit__(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- No warnings here, as the subtransaction gets indeed closed CREATE FUNCTION subtransaction_enter_subtransaction_in_with() RETURNS void AS $$ with plpy.subtransaction() as s: s.__enter__() -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION subtransaction_exit_subtransaction_in_with() RETURNS void AS $$ @@ -126,7 +126,7 @@ try: s.__exit__(None, None, None) except ValueError as e: raise ValueError(e) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_exit_without_enter(); SELECT subtransaction_enter_without_exit(); @@ -159,7 +159,7 @@ try: plpy.execute(p, ["wrong"]) except plpy.SPIError: plpy.warning("Caught a SPI error") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_mix_explicit_and_implicit(); SELECT * FROM subtransaction_tbl; @@ -172,7 +172,7 @@ AS $$ s = plpy.subtransaction() s.enter() s.exit(None, None, None) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT subtransaction_alternative_names(); @@ -186,7 +186,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES ('a')") except plpy.SPIError: plpy.notice("caught") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT try_catch_inside_subtransaction(); SELECT * FROM subtransaction_tbl; @@ -202,7 +202,7 @@ with plpy.subtransaction(): plpy.execute("INSERT INTO subtransaction_tbl VALUES (1)") except plpy.SPIError: plpy.notice("caught") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT pk_violation_inside_subtransaction(); SELECT * FROM subtransaction_tbl; @@ -217,7 +217,7 @@ with plpy.subtransaction(): cur.fetch(10) fetched = cur.fetch(10); return int(fetched[5]["i"]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_aborted_subxact() RETURNS int AS $$ try: @@ -229,7 +229,7 @@ except plpy.SPIError: fetched = cur.fetch(10) return int(fetched[5]["i"]) return 0 # not reached -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_plan_aborted_subxact() RETURNS int AS $$ try: @@ -243,7 +243,7 @@ except plpy.SPIError: fetched = cur.fetch(5) return fetched[2]["i"] return 0 # not reached -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION cursor_close_aborted_subxact() RETURNS boolean AS $$ try: @@ -254,7 +254,7 @@ except plpy.SPIError: cur.close() return True return False # not reached -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT cursor_in_subxact(); SELECT cursor_aborted_subxact(); diff --git a/src/pl/plpython/sql/plpython_test.sql b/src/pl/plpython/sql/plpython_test.sql index 5f1be9c94a..aa22a27415 100644 --- a/src/pl/plpython/sql/plpython_test.sql +++ b/src/pl/plpython/sql/plpython_test.sql @@ -1,13 +1,13 @@ -- first some tests of basic functionality -CREATE EXTENSION plpython2u; +CREATE EXTENSION plpython3u; -- really stupid function just to get the module loaded -CREATE FUNCTION stupid() RETURNS text AS 'return "zarkon"' LANGUAGE plpythonu; +CREATE FUNCTION stupid() RETURNS text AS 'return "zarkon"' LANGUAGE plpython3u; select stupid(); -- check 2/3 versioning -CREATE FUNCTION stupidn() RETURNS text AS 'return "zarkon"' LANGUAGE plpython2u; +CREATE FUNCTION stupidn() RETURNS text AS 'return "zarkon"' LANGUAGE plpython3u; select stupidn(); @@ -21,7 +21,7 @@ for key in keys: out.append("%s: %s" % (key, u[key])) words = a1 + " " + a2 + " => {" + ", ".join(out) + "}" return words' - LANGUAGE plpythonu; + LANGUAGE plpython3u; select "Argument test #1"(users, fname, lname) from users where lname = 'doe' order by 1; @@ -32,7 +32,7 @@ $$ contents = list(filter(lambda x: not x.startswith("__"), dir(plpy))) contents.sort() return contents -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select module_contents(); @@ -47,6 +47,6 @@ plpy.info('info', 37, [1, 2, 3]) plpy.notice('notice') plpy.warning('warning') plpy.error('error') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT elog_test_basic(); diff --git a/src/pl/plpython/sql/plpython_transaction.sql b/src/pl/plpython/sql/plpython_transaction.sql index 68588d9fb0..c939ba76d4 100644 --- a/src/pl/plpython/sql/plpython_transaction.sql +++ b/src/pl/plpython/sql/plpython_transaction.sql @@ -2,7 +2,7 @@ CREATE TABLE test1 (a int, b text); CREATE PROCEDURE transaction_test1() -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ for i in range(0, 10): plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) @@ -20,7 +20,7 @@ SELECT * FROM test1; TRUNCATE test1; DO -LANGUAGE plpythonu +LANGUAGE plpython3u $$ for i in range(0, 10): plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) @@ -37,7 +37,7 @@ TRUNCATE test1; -- not allowed in a function CREATE FUNCTION transaction_test2() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ for i in range(0, 10): plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) @@ -55,7 +55,7 @@ SELECT * FROM test1; -- also not allowed if procedure is called from a function CREATE FUNCTION transaction_test3() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plpy.execute("CALL transaction_test1()") return 1 @@ -68,9 +68,9 @@ SELECT * FROM test1; -- DO block inside function CREATE FUNCTION transaction_test4() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ -plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") +plpy.execute("DO LANGUAGE plpython3u $x$ plpy.commit() $x$") return 1 $$; @@ -78,7 +78,7 @@ SELECT transaction_test4(); -- commit inside subtransaction (prohibited) -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ s = plpy.subtransaction() s.enter() plpy.commit() @@ -91,7 +91,7 @@ INSERT INTO test2 VALUES (0), (1), (2), (3), (4); TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) plpy.commit() @@ -106,7 +106,7 @@ SELECT * FROM pg_cursors; -- error in cursor loop with commit TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (12/(%s-2))" % row['x']) plpy.commit() @@ -120,7 +120,7 @@ SELECT * FROM pg_cursors; -- rollback inside cursor loop TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) plpy.rollback() @@ -134,7 +134,7 @@ SELECT * FROM pg_cursors; -- first commit then rollback inside cursor loop TRUNCATE test1; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) if row['x'] % 2 == 0: @@ -152,7 +152,7 @@ SELECT * FROM pg_cursors; CREATE TABLE testpk (id int PRIMARY KEY); CREATE TABLE testfk(f1 int REFERENCES testpk DEFERRABLE INITIALLY DEFERRED); -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ # this insert will fail during commit: plpy.execute("INSERT INTO testfk VALUES (0)") plpy.commit() @@ -162,7 +162,7 @@ $$; SELECT * FROM testpk; SELECT * FROM testfk; -DO LANGUAGE plpythonu $$ +DO LANGUAGE plpython3u $$ # this insert will fail during commit: plpy.execute("INSERT INTO testfk VALUES (0)") try: diff --git a/src/pl/plpython/sql/plpython_trigger.sql b/src/pl/plpython/sql/plpython_trigger.sql index 19852dc585..e5504b9ab1 100644 --- a/src/pl/plpython/sql/plpython_trigger.sql +++ b/src/pl/plpython/sql/plpython_trigger.sql @@ -16,7 +16,7 @@ if TD["new"]["fname"] == "william": TD["new"]["fname"] = TD["args"][0] rv = "MODIFY" return rv' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION users_update() returns trigger @@ -25,7 +25,7 @@ CREATE FUNCTION users_update() returns trigger if TD["old"]["fname"] != TD["new"]["fname"] and TD["old"]["fname"] == TD["args"][0]: return "SKIP" return None' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE FUNCTION users_delete() RETURNS trigger @@ -33,7 +33,7 @@ CREATE FUNCTION users_delete() RETURNS trigger 'if TD["old"]["fname"] == TD["args"][0]: return "SKIP" return None' - LANGUAGE plpythonu; + LANGUAGE plpython3u; CREATE TRIGGER users_insert_trig BEFORE INSERT ON users FOR EACH ROW @@ -72,7 +72,7 @@ CREATE TABLE trigger_test_generated ( j int GENERATED ALWAYS AS (i * 2) STORED ); -CREATE FUNCTION trigger_data() RETURNS trigger LANGUAGE plpythonu AS $$ +CREATE FUNCTION trigger_data() RETURNS trigger LANGUAGE plpython3u AS $$ if 'relid' in TD: TD['relid'] = "bogus:12345" @@ -157,7 +157,7 @@ INSERT INTO trigger_test VALUES (0, 'zero'); CREATE FUNCTION stupid1() RETURNS trigger AS $$ return 37 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger1 BEFORE INSERT ON trigger_test @@ -173,7 +173,7 @@ DROP TRIGGER stupid_trigger1 ON trigger_test; CREATE FUNCTION stupid2() RETURNS trigger AS $$ return "MODIFY" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger2 BEFORE DELETE ON trigger_test @@ -191,7 +191,7 @@ INSERT INTO trigger_test VALUES (0, 'zero'); CREATE FUNCTION stupid3() RETURNS trigger AS $$ return "foo" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger3 BEFORE UPDATE ON trigger_test @@ -206,8 +206,8 @@ DROP TRIGGER stupid_trigger3 ON trigger_test; CREATE FUNCTION stupid3u() RETURNS trigger AS $$ - return u"foo" -$$ LANGUAGE plpythonu; + return "foo" +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger3 BEFORE UPDATE ON trigger_test @@ -224,7 +224,7 @@ CREATE FUNCTION stupid4() RETURNS trigger AS $$ del TD["new"] return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger4 BEFORE UPDATE ON trigger_test @@ -241,7 +241,7 @@ CREATE FUNCTION stupid5() RETURNS trigger AS $$ TD["new"] = ['foo', 'bar'] return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger5 BEFORE UPDATE ON trigger_test @@ -258,7 +258,7 @@ CREATE FUNCTION stupid6() RETURNS trigger AS $$ TD["new"] = {1: 'foo', 2: 'bar'} return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger6 BEFORE UPDATE ON trigger_test @@ -275,7 +275,7 @@ CREATE FUNCTION stupid7() RETURNS trigger AS $$ TD["new"] = {'v': 'foo', 'a': 'bar'} return "MODIFY"; -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger7 BEFORE UPDATE ON trigger_test @@ -290,9 +290,9 @@ DROP TRIGGER stupid_trigger7 ON trigger_test; CREATE FUNCTION stupid7u() RETURNS trigger AS $$ - TD["new"] = {u'v': 'foo', u'a': 'bar'} + TD["new"] = {'v': 'foo', 'a': 'bar'} return "MODIFY" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER stupid_trigger7 BEFORE UPDATE ON trigger_test @@ -318,7 +318,7 @@ CREATE FUNCTION test_null() RETURNS trigger AS $$ TD["new"]['v'] = None return "MODIFY" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER test_null_trigger BEFORE UPDATE ON trigger_test @@ -341,7 +341,7 @@ SET DateStyle = 'ISO'; CREATE FUNCTION set_modif_time() RETURNS trigger AS $$ TD['new']['modif_time'] = '2010-10-13 21:57:28.930486' return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TABLE pb (a TEXT, modif_time TIMESTAMP(0) WITHOUT TIME ZONE); @@ -365,7 +365,7 @@ CREATE FUNCTION composite_trigger_f() RETURNS trigger AS $$ TD['new']['f1'] = (3, False) TD['new']['f2'] = {'k': 7, 'l': 'yes', 'ignored': 10} return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER composite_trigger BEFORE INSERT ON composite_trigger_test FOR EACH ROW EXECUTE PROCEDURE composite_trigger_f(); @@ -380,7 +380,7 @@ CREATE TABLE composite_trigger_noop_test (f1 comp1, f2 comp2); CREATE FUNCTION composite_trigger_noop_f() RETURNS trigger AS $$ return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER composite_trigger_noop BEFORE INSERT ON composite_trigger_noop_test FOR EACH ROW EXECUTE PROCEDURE composite_trigger_noop_f(); @@ -399,7 +399,7 @@ CREATE TABLE composite_trigger_nested_test(c comp3); CREATE FUNCTION composite_trigger_nested_f() RETURNS trigger AS $$ return 'MODIFY' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE TRIGGER composite_trigger_nested BEFORE INSERT ON composite_trigger_nested_test FOR EACH ROW EXECUTE PROCEDURE composite_trigger_nested_f(); @@ -410,7 +410,7 @@ INSERT INTO composite_trigger_nested_test VALUES (ROW(ROW(NULL, 't'), ROW(1, 'f' SELECT * FROM composite_trigger_nested_test; -- check that using a function as a trigger over two tables works correctly -CREATE FUNCTION trig1234() RETURNS trigger LANGUAGE plpythonu AS $$ +CREATE FUNCTION trig1234() RETURNS trigger LANGUAGE plpython3u AS $$ TD["new"]["data"] = '1234' return 'MODIFY' $$; @@ -432,7 +432,7 @@ SELECT * FROM b; CREATE TABLE transition_table_test (id int, name text); INSERT INTO transition_table_test VALUES (1, 'a'); -CREATE FUNCTION transition_table_test_f() RETURNS trigger LANGUAGE plpythonu AS +CREATE FUNCTION transition_table_test_f() RETURNS trigger LANGUAGE plpython3u AS $$ rv = plpy.execute("SELECT * FROM old_table") assert(rv.nrows() == 1) @@ -455,7 +455,7 @@ DROP FUNCTION transition_table_test_f(); -- dealing with generated columns CREATE FUNCTION generated_test_func1() RETURNS trigger -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ TD['new']['j'] = 5 # not allowed return 'MODIFY' diff --git a/src/pl/plpython/sql/plpython_types.sql b/src/pl/plpython/sql/plpython_types.sql index 0d207d9c01..40f4f79d99 100644 --- a/src/pl/plpython/sql/plpython_types.sql +++ b/src/pl/plpython/sql/plpython_types.sql @@ -9,7 +9,7 @@ CREATE FUNCTION test_type_conversion_bool(x bool) RETURNS bool AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bool(true); SELECT * FROM test_type_conversion_bool(false); @@ -35,7 +35,7 @@ elif n == 5: ret = [0] plpy.info(ret, not not ret) return ret -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bool_other(0); SELECT * FROM test_type_conversion_bool_other(1); @@ -48,7 +48,7 @@ SELECT * FROM test_type_conversion_bool_other(5); CREATE FUNCTION test_type_conversion_char(x char) RETURNS char AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_char('a'); SELECT * FROM test_type_conversion_char(null); @@ -57,7 +57,7 @@ SELECT * FROM test_type_conversion_char(null); CREATE FUNCTION test_type_conversion_int2(x int2) RETURNS int2 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_int2(100::int2); SELECT * FROM test_type_conversion_int2(-100::int2); @@ -67,7 +67,7 @@ SELECT * FROM test_type_conversion_int2(null); CREATE FUNCTION test_type_conversion_int4(x int4) RETURNS int4 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_int4(100); SELECT * FROM test_type_conversion_int4(-100); @@ -77,7 +77,7 @@ SELECT * FROM test_type_conversion_int4(null); CREATE FUNCTION test_type_conversion_int8(x int8) RETURNS int8 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_int8(100); SELECT * FROM test_type_conversion_int8(-100); @@ -90,7 +90,7 @@ CREATE FUNCTION test_type_conversion_numeric(x numeric) RETURNS numeric AS $$ # between decimal and cdecimal plpy.info(str(x), x.__class__.__name__) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_numeric(100); SELECT * FROM test_type_conversion_numeric(-100); @@ -105,7 +105,7 @@ SELECT * FROM test_type_conversion_numeric(null); CREATE FUNCTION test_type_conversion_float4(x float4) RETURNS float4 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_float4(100); SELECT * FROM test_type_conversion_float4(-100); @@ -116,7 +116,7 @@ SELECT * FROM test_type_conversion_float4(null); CREATE FUNCTION test_type_conversion_float8(x float8) RETURNS float8 AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_float8(100); SELECT * FROM test_type_conversion_float8(-100); @@ -128,7 +128,7 @@ SELECT * FROM test_type_conversion_float8(100100100.654321); CREATE FUNCTION test_type_conversion_oid(x oid) RETURNS oid AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_oid(100); SELECT * FROM test_type_conversion_oid(2147483649); @@ -138,7 +138,7 @@ SELECT * FROM test_type_conversion_oid(null); CREATE FUNCTION test_type_conversion_text(x text) RETURNS text AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_text('hello world'); SELECT * FROM test_type_conversion_text(null); @@ -147,7 +147,7 @@ SELECT * FROM test_type_conversion_text(null); CREATE FUNCTION test_type_conversion_bytea(x bytea) RETURNS bytea AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bytea('hello world'); SELECT * FROM test_type_conversion_bytea(E'null\\000byte'); @@ -157,7 +157,7 @@ SELECT * FROM test_type_conversion_bytea(null); CREATE FUNCTION test_type_marshal() RETURNS bytea AS $$ import marshal return marshal.dumps('hello world') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_type_unmarshal(x bytea) RETURNS text AS $$ import marshal @@ -165,7 +165,7 @@ try: return marshal.loads(x) except ValueError as e: return 'FAILED: ' + str(e) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_type_unmarshal(x) FROM test_type_marshal() x; @@ -178,7 +178,7 @@ CREATE DOMAIN booltrue AS bool CHECK (VALUE IS TRUE OR VALUE IS NULL); CREATE FUNCTION test_type_conversion_booltrue(x booltrue, y bool) RETURNS booltrue AS $$ return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_booltrue(true, true); SELECT * FROM test_type_conversion_booltrue(false, true); @@ -190,7 +190,7 @@ CREATE DOMAIN uint2 AS int2 CHECK (VALUE >= 0); CREATE FUNCTION test_type_conversion_uint2(x uint2, y int) RETURNS uint2 AS $$ plpy.info(x, type(x)) return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_uint2(100::uint2, 50); SELECT * FROM test_type_conversion_uint2(100::uint2, -50); @@ -201,7 +201,7 @@ CREATE DOMAIN nnint AS int CHECK (VALUE IS NOT NULL); CREATE FUNCTION test_type_conversion_nnint(x nnint, y int) RETURNS nnint AS $$ return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_nnint(10, 20); SELECT * FROM test_type_conversion_nnint(null, 20); @@ -213,7 +213,7 @@ CREATE DOMAIN bytea10 AS bytea CHECK (octet_length(VALUE) = 10 AND VALUE IS NOT CREATE FUNCTION test_type_conversion_bytea10(x bytea10, y bytea) RETURNS bytea10 AS $$ plpy.info(x, type(x)) return y -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_bytea10('hello wold', 'hello wold'); SELECT * FROM test_type_conversion_bytea10('hello world', 'hello wold'); @@ -229,7 +229,7 @@ SELECT * FROM test_type_conversion_bytea10('hello word', null); CREATE FUNCTION test_type_conversion_array_int4(x int4[]) RETURNS int4[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_int4(ARRAY[0, 100]); SELECT * FROM test_type_conversion_array_int4(ARRAY[0,-100,55]); @@ -243,14 +243,14 @@ SELECT * FROM test_type_conversion_array_int4('[2:4]={1,2,3}'); CREATE FUNCTION test_type_conversion_array_int8(x int8[]) RETURNS int8[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_int8(ARRAY[[[1,2,NULL],[NULL,5,6]],[[NULL,8,9],[10,11,12]]]::int8[]); CREATE FUNCTION test_type_conversion_array_date(x date[]) RETURNS date[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_date(ARRAY[[['2016-09-21','2016-09-22',NULL],[NULL,'2016-10-21','2016-10-22']], [[NULL,'2016-11-21','2016-10-21'],['2015-09-21','2015-09-22','2014-09-21']]]::date[]); @@ -258,7 +258,7 @@ SELECT * FROM test_type_conversion_array_date(ARRAY[[['2016-09-21','2016-09-22', CREATE FUNCTION test_type_conversion_array_timestamp(x timestamp[]) RETURNS timestamp[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_timestamp(ARRAY[[['2016-09-21 15:34:24.078792-04','2016-10-22 11:34:24.078795-04',NULL], [NULL,'2016-10-21 11:34:25.078792-04','2016-10-21 11:34:24.098792-04']], @@ -270,7 +270,7 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemint4(h int4, i int4, j int4, k int4 ) m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemint4(8,5,3,2); @@ -278,7 +278,7 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemint8(h int4, i int4, j int4, k int4 ) m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemint8(5,5,3,2); @@ -286,7 +286,7 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemfloat4(h int4, i int4, j int4, k int4 m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemfloat4(6,5,3,2); @@ -294,14 +294,14 @@ CREATE OR REPLACE FUNCTION pyreturnmultidemfloat8(h int4, i int4, j int4, k int4 m = [[[[x for x in range(h)] for y in range(i)] for z in range(j)] for w in range(k)] plpy.info(m, type(m)) return m -$BODY$ LANGUAGE plpythonu; +$BODY$ LANGUAGE plpython3u; select pyreturnmultidemfloat8(7,5,3,2); CREATE FUNCTION test_type_conversion_array_text(x text[]) RETURNS text[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_text(ARRAY['foo', 'bar']); SELECT * FROM test_type_conversion_array_text(ARRAY[['foo', 'bar'],['foo2', 'bar2']]); @@ -310,59 +310,59 @@ SELECT * FROM test_type_conversion_array_text(ARRAY[['foo', 'bar'],['foo2', 'bar CREATE FUNCTION test_type_conversion_array_bytea(x bytea[]) RETURNS bytea[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_bytea(ARRAY[E'\\xdeadbeef'::bytea, NULL]); CREATE FUNCTION test_type_conversion_array_mixed1() RETURNS text[] AS $$ return [123, 'abc'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_mixed1(); CREATE FUNCTION test_type_conversion_array_mixed2() RETURNS int[] AS $$ return [123, 'abc'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_mixed2(); CREATE FUNCTION test_type_conversion_mdarray_malformed() RETURNS int[] AS $$ return [[1,2,3],[4,5]] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_mdarray_malformed(); CREATE FUNCTION test_type_conversion_mdarray_toodeep() RETURNS int[] AS $$ return [[[[[[[1]]]]]]] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_mdarray_toodeep(); CREATE FUNCTION test_type_conversion_array_record() RETURNS type_record[] AS $$ return [{'first': 'one', 'second': 42}, {'first': 'two', 'second': 11}] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_record(); CREATE FUNCTION test_type_conversion_array_string() RETURNS text[] AS $$ return 'abc' -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_string(); CREATE FUNCTION test_type_conversion_array_tuple() RETURNS text[] AS $$ return ('abc', 'def') -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_tuple(); CREATE FUNCTION test_type_conversion_array_error() RETURNS int[] AS $$ return 5 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_error(); @@ -376,14 +376,14 @@ CREATE DOMAIN ordered_pair_domain AS integer[] CHECK (array_length(VALUE,1)=2 AN CREATE FUNCTION test_type_conversion_array_domain(x ordered_pair_domain) RETURNS ordered_pair_domain AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_domain(ARRAY[0, 100]::ordered_pair_domain); SELECT * FROM test_type_conversion_array_domain(NULL::ordered_pair_domain); CREATE FUNCTION test_type_conversion_array_domain_check_violation() RETURNS ordered_pair_domain AS $$ return [2,1] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_domain_check_violation(); @@ -394,13 +394,13 @@ SELECT * FROM test_type_conversion_array_domain_check_violation(); CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ plpy.info(x, type(x)) return x[0] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_read_uint2_array(array[1::uint2]); CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ return [x, x] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_build_uint2_array(1::int2); select test_build_uint2_array(-1::int2); -- fail @@ -413,7 +413,7 @@ select test_build_uint2_array(-1::int2); -- fail CREATE FUNCTION test_type_conversion_domain_array(x integer[]) RETURNS ordered_pair_domain[] AS $$ return [x, x] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_type_conversion_domain_array(array[2,4]); select test_type_conversion_domain_array(array[4,2]); -- fail @@ -422,7 +422,7 @@ CREATE FUNCTION test_type_conversion_domain_array2(x ordered_pair_domain) RETURNS integer AS $$ plpy.info(x, type(x)) return x[1] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_type_conversion_domain_array2(array[2,4]); select test_type_conversion_domain_array2(array[4,2]); -- fail @@ -431,7 +431,7 @@ CREATE FUNCTION test_type_conversion_array_domain_array(x ordered_pair_domain[]) RETURNS ordered_pair_domain AS $$ plpy.info(x, type(x)) return x[0] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; select test_type_conversion_array_domain_array(array[array[2,4]::ordered_pair_domain]); @@ -450,7 +450,7 @@ INSERT INTO employee VALUES ('John', 100, 10), ('Mary', 200, 10); CREATE OR REPLACE FUNCTION test_composite_table_input(e employee) RETURNS integer AS $$ return e['basesalary'] + e['bonus'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT name, test_composite_table_input(employee.*) FROM employee; @@ -470,7 +470,7 @@ CREATE TYPE named_pair AS ( CREATE OR REPLACE FUNCTION test_composite_type_input(p named_pair) RETURNS integer AS $$ return sum(p.values()) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT test_composite_type_input(row(1, 2)); @@ -487,7 +487,7 @@ CREATE TYPE nnint_container AS (f1 int, f2 nnint); CREATE FUNCTION nnint_test(x int, y int) RETURNS nnint_container AS $$ return {'f1': x, 'f2': y} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT nnint_test(null, 3); SELECT nnint_test(3, null); -- fail @@ -501,21 +501,21 @@ CREATE DOMAIN ordered_named_pair AS named_pair_2 CHECK((VALUE).i <= (VALUE).j); CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ return p['i'] + p['j'] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT read_ordered_named_pair(row(1, 2)); SELECT read_ordered_named_pair(row(2, 1)); -- fail CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ return {'i': i, 'j': j} -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT build_ordered_named_pair(1,2); SELECT build_ordered_named_pair(2,1); -- fail CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ return [{'i': i, 'j': j}, {'i': i, 'j': j+1}] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT build_ordered_named_pairs(1,2); SELECT build_ordered_named_pairs(2,1); -- fail @@ -526,7 +526,7 @@ SELECT build_ordered_named_pairs(2,1); -- fail -- CREATE OR REPLACE FUNCTION test_prep_bool_input() RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT CASE WHEN $1 THEN 1 ELSE 0 END AS val", ['boolean']) rv = plpy.execute(plan, ['fa'], 5) # 'fa' is true in Python @@ -537,7 +537,7 @@ SELECT test_prep_bool_input(); -- 1 CREATE OR REPLACE FUNCTION test_prep_bool_output() RETURNS bool -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT $1 = 1 AS val", ['int']) rv = plpy.execute(plan, [0], 5) @@ -549,7 +549,7 @@ SELECT test_prep_bool_output(); -- false CREATE OR REPLACE FUNCTION test_prep_bytea_input(bb bytea) RETURNS int -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT octet_length($1) AS val", ['bytea']) rv = plpy.execute(plan, [bb], 5) @@ -560,7 +560,7 @@ SELECT test_prep_bytea_input(E'a\\000b'); -- 3 (embedded null formerly truncated CREATE OR REPLACE FUNCTION test_prep_bytea_output() RETURNS bytea -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ plan = plpy.prepare("SELECT decode('aa00bb', 'hex') AS val") rv = plpy.execute(plan, [], 5) diff --git a/src/pl/plpython/sql/plpython_unicode.sql b/src/pl/plpython/sql/plpython_unicode.sql index a11e5eeaa2..14f7b4e005 100644 --- a/src/pl/plpython/sql/plpython_unicode.sql +++ b/src/pl/plpython/sql/plpython_unicode.sql @@ -14,28 +14,28 @@ CREATE TABLE unicode_test ( ); CREATE FUNCTION unicode_return() RETURNS text AS E' -return u"\\xA0" -' LANGUAGE plpythonu; +return "\\xA0" +' LANGUAGE plpython3u; CREATE FUNCTION unicode_trigger() RETURNS trigger AS E' -TD["new"]["testvalue"] = u"\\xA0" +TD["new"]["testvalue"] = "\\xA0" return "MODIFY" -' LANGUAGE plpythonu; +' LANGUAGE plpython3u; CREATE TRIGGER unicode_test_bi BEFORE INSERT ON unicode_test FOR EACH ROW EXECUTE PROCEDURE unicode_trigger(); CREATE FUNCTION unicode_plan1() RETURNS text AS E' plan = plpy.prepare("SELECT $1 AS testvalue", ["text"]) -rv = plpy.execute(plan, [u"\\xA0"], 1) +rv = plpy.execute(plan, ["\\xA0"], 1) return rv[0]["testvalue"] -' LANGUAGE plpythonu; +' LANGUAGE plpython3u; CREATE FUNCTION unicode_plan2() RETURNS text AS E' -plan = plpy.prepare("SELECT $1 || $2 AS testvalue", ["text", u"text"]) +plan = plpy.prepare("SELECT $1 || $2 AS testvalue", ["text", "text"]) rv = plpy.execute(plan, ["foo", "bar"], 1) return rv[0]["testvalue"] -' LANGUAGE plpythonu; +' LANGUAGE plpython3u; SELECT unicode_return(); diff --git a/src/pl/plpython/sql/plpython_void.sql b/src/pl/plpython/sql/plpython_void.sql index 77d7f59e4c..5a1a6711fb 100644 --- a/src/pl/plpython/sql/plpython_void.sql +++ b/src/pl/plpython/sql/plpython_void.sql @@ -4,16 +4,16 @@ CREATE FUNCTION test_void_func1() RETURNS void AS $$ x = 10 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- illegal: can't return non-None value in void-returning func CREATE FUNCTION test_void_func2() RETURNS void AS $$ return 10 -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION test_return_none() RETURNS int AS $$ None -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; -- Tests for functions returning void diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index e2b0db0879..625f6fb88a 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -314,55 +314,6 @@ sub taptest return; } -sub mangle_plpython3 -{ - my $tests = shift; - mkdir "results" unless -d "results"; - mkdir "sql/python3"; - mkdir "results/python3"; - mkdir "expected/python3"; - - foreach my $test (@$tests) - { - local $/ = undef; - foreach my $dir ('sql', 'expected') - { - my $extension = ($dir eq 'sql' ? 'sql' : 'out'); - - my @files = - glob("$dir/$test.$extension $dir/${test}_[0-9].$extension"); - foreach my $file (@files) - { - open(my $handle, '<', $file) - || die "test file $file not found"; - my $contents = <$handle>; - close($handle); - do - { - s///g; - s///g; - s/([0-9][0-9]*)L/$1/g; - s/([ [{])u"/$1"/g; - s/([ [{])u'/$1'/g; - s/def next/def __next__/g; - s/LANGUAGE plpython2?u/LANGUAGE plpython3u/g; - s/EXTENSION (\S*?)plpython2?u/EXTENSION $1plpython3u/g; - s/installing required extension "plpython2u"/installing required extension "plpython3u"/g; - } - for ($contents); - my $base = basename $file; - open($handle, '>', "$dir/python3/$base") - || die "opening python 3 file for $file"; - print $handle $contents; - close($handle); - } - } - } - do { s!^!python3/!; } - foreach (@$tests); - return @$tests; -} - sub plcheck { chdir "$topdir/src/pl"; @@ -386,8 +337,7 @@ sub plcheck if ($lang eq 'plpython') { next - unless -d "$topdir/$Config/plpython2" - || -d "$topdir/$Config/plpython3"; + unless -d "$topdir/$Config/plpython3"; $lang = 'plpythonu'; } else @@ -397,8 +347,6 @@ sub plcheck my @lang_args = ("--load-extension=$lang"); chdir $dir; my @tests = fetchTests(); - @tests = mangle_plpython3(\@tests) - if $lang eq 'plpythonu' && -d "$topdir/$Config/plpython3"; if ($lang eq 'plperl') { @@ -462,28 +410,6 @@ sub subdircheck my @opts = fetchRegressOpts(); - # Special processing for python transform modules, see their respective - # Makefiles for more details regarding Python-version specific - # dependencies. - if ($module =~ /_plpython$/) - { - die "Python not enabled in configuration" - if !defined($config->{python}); - - @opts = grep { $_ !~ /plpythonu/ } @opts; - - if (-d "$topdir/$Config/plpython2") - { - push @opts, "--load-extension=plpythonu"; - push @opts, '--load-extension=' . $module . 'u'; - } - else - { - # must be python 3 - @tests = mangle_plpython3(\@tests); - } - } - print "============================================================\n"; print "Checking $module\n"; my @args = ( From 9b7e24a2cb37fb52af13219f625cd719e364a346 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 7 Mar 2022 18:30:28 -0800 Subject: [PATCH 098/108] plpython: Code cleanup related to removal of Python 2 support. Since 19252e8ec93 we reject Python 2 during build configuration. Now that the dust on the buildfarm has settled, remove Python 2 specific code, including the "Python 2/3 porting layer". The code to detect conflicts between plpython using Python 2 and 3 is not removed, in case somebody creates an out-of-tree version adding back support for Python 2. Reviewed-By: Peter Eisentraut Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20211031184548.g4sxfe47n2kyi55r@alap3.anarazel.de --- contrib/hstore_plpython/hstore_plpython.c | 12 ++--- contrib/jsonb_plpython/jsonb_plpython.c | 27 +++++------ contrib/ltree_plpython/ltree_plpython.c | 6 +-- src/pl/plpython/plpy_cursorobject.c | 8 +-- src/pl/plpython/plpy_elog.c | 26 +++++----- src/pl/plpython/plpy_exec.c | 44 ++++++++--------- src/pl/plpython/plpy_main.c | 59 ++++------------------- src/pl/plpython/plpy_planobject.c | 2 +- src/pl/plpython/plpy_plpymodule.c | 32 +++--------- src/pl/plpython/plpy_plpymodule.h | 2 - src/pl/plpython/plpy_resultobject.c | 16 ++---- src/pl/plpython/plpy_spi.c | 10 ++-- src/pl/plpython/plpy_typeio.c | 40 ++++++--------- src/pl/plpython/plpy_util.c | 9 ---- src/pl/plpython/plpy_util.h | 2 - src/pl/plpython/plpython.h | 34 +------------ 16 files changed, 95 insertions(+), 234 deletions(-) diff --git a/contrib/hstore_plpython/hstore_plpython.c b/contrib/hstore_plpython/hstore_plpython.c index 39bad55802..889ece315d 100644 --- a/contrib/hstore_plpython/hstore_plpython.c +++ b/contrib/hstore_plpython/hstore_plpython.c @@ -12,10 +12,8 @@ extern void _PG_init(void); /* Linkage to functions in plpython module */ typedef char *(*PLyObject_AsString_t) (PyObject *plrv); static PLyObject_AsString_t PLyObject_AsString_p; -#if PY_MAJOR_VERSION >= 3 typedef PyObject *(*PLyUnicode_FromStringAndSize_t) (const char *s, Py_ssize_t size); static PLyUnicode_FromStringAndSize_t PLyUnicode_FromStringAndSize_p; -#endif /* Linkage to functions in hstore module */ typedef HStore *(*hstoreUpgrade_t) (Datum orig); @@ -41,12 +39,10 @@ _PG_init(void) PLyObject_AsString_p = (PLyObject_AsString_t) load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLyObject_AsString", true, NULL); -#if PY_MAJOR_VERSION >= 3 AssertVariableIsOfType(&PLyUnicode_FromStringAndSize, PLyUnicode_FromStringAndSize_t); PLyUnicode_FromStringAndSize_p = (PLyUnicode_FromStringAndSize_t) load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLyUnicode_FromStringAndSize", true, NULL); -#endif AssertVariableIsOfType(&hstoreUpgrade, hstoreUpgrade_t); hstoreUpgrade_p = (hstoreUpgrade_t) load_external_function("$libdir/hstore", "hstoreUpgrade", @@ -102,16 +98,16 @@ hstore_to_plpython(PG_FUNCTION_ARGS) { PyObject *key; - key = PyString_FromStringAndSize(HSTORE_KEY(entries, base, i), - HSTORE_KEYLEN(entries, i)); + key = PLyUnicode_FromStringAndSize(HSTORE_KEY(entries, base, i), + HSTORE_KEYLEN(entries, i)); if (HSTORE_VALISNULL(entries, i)) PyDict_SetItem(dict, key, Py_None); else { PyObject *value; - value = PyString_FromStringAndSize(HSTORE_VAL(entries, base, i), - HSTORE_VALLEN(entries, i)); + value = PLyUnicode_FromStringAndSize(HSTORE_VAL(entries, base, i), + HSTORE_VALLEN(entries, i)); PyDict_SetItem(dict, key, value); Py_XDECREF(value); } diff --git a/contrib/jsonb_plpython/jsonb_plpython.c b/contrib/jsonb_plpython/jsonb_plpython.c index 836c178770..03bbfa87d9 100644 --- a/contrib/jsonb_plpython/jsonb_plpython.c +++ b/contrib/jsonb_plpython/jsonb_plpython.c @@ -28,11 +28,9 @@ static PyObject *PLyObject_FromJsonbContainer(JsonbContainer *jsonb); static JsonbValue *PLyObject_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state, bool is_elem); -#if PY_MAJOR_VERSION >= 3 typedef PyObject *(*PLyUnicode_FromStringAndSize_t) (const char *s, Py_ssize_t size); static PLyUnicode_FromStringAndSize_t PLyUnicode_FromStringAndSize_p; -#endif /* * Module initialize function: fetch function pointers for cross-module calls. @@ -45,13 +43,10 @@ _PG_init(void) PLyObject_AsString_p = (PLyObject_AsString_t) load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLyObject_AsString", true, NULL); -#if PY_MAJOR_VERSION >= 3 AssertVariableIsOfType(&PLyUnicode_FromStringAndSize, PLyUnicode_FromStringAndSize_t); PLyUnicode_FromStringAndSize_p = (PLyUnicode_FromStringAndSize_t) load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLyUnicode_FromStringAndSize", true, NULL); -#endif - AssertVariableIsOfType(&PLy_elog_impl, PLy_elog_impl_t); PLy_elog_impl_p = (PLy_elog_impl_t) load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLy_elog_impl", @@ -65,25 +60,25 @@ _PG_init(void) #define PLy_elog (PLy_elog_impl_p) /* - * PLyString_FromJsonbValue + * PLyUnicode_FromJsonbValue * * Transform string JsonbValue to Python string. */ static PyObject * -PLyString_FromJsonbValue(JsonbValue *jbv) +PLyUnicode_FromJsonbValue(JsonbValue *jbv) { Assert(jbv->type == jbvString); - return PyString_FromStringAndSize(jbv->val.string.val, jbv->val.string.len); + return PLyUnicode_FromStringAndSize(jbv->val.string.val, jbv->val.string.len); } /* - * PLyString_ToJsonbValue + * PLyUnicode_ToJsonbValue * * Transform Python string to JsonbValue. */ static void -PLyString_ToJsonbValue(PyObject *obj, JsonbValue *jbvElem) +PLyUnicode_ToJsonbValue(PyObject *obj, JsonbValue *jbvElem) { jbvElem->type = jbvString; jbvElem->val.string.val = PLyObject_AsString(obj); @@ -118,7 +113,7 @@ PLyObject_FromJsonbValue(JsonbValue *jsonbValue) } case jbvString: - return PLyString_FromJsonbValue(jsonbValue); + return PLyUnicode_FromJsonbValue(jsonbValue); case jbvBool: if (jsonbValue->val.boolean) @@ -210,7 +205,7 @@ PLyObject_FromJsonbContainer(JsonbContainer *jsonb) if (r != WJB_KEY) continue; - key = PLyString_FromJsonbValue(&v); + key = PLyUnicode_FromJsonbValue(&v); if (!key) { Py_XDECREF(result_v); @@ -298,7 +293,7 @@ PLyMapping_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state) else { /* All others types of keys we serialize to string */ - PLyString_ToJsonbValue(key, &jbvKey); + PLyUnicode_ToJsonbValue(key, &jbvKey); } (void) pushJsonbValue(jsonb_state, WJB_KEY, &jbvKey); @@ -415,7 +410,7 @@ PLyObject_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state, bool is_ele { JsonbValue *out; - if (!(PyString_Check(obj) || PyUnicode_Check(obj))) + if (!PyUnicode_Check(obj)) { if (PySequence_Check(obj)) return PLySequence_ToJsonbValue(obj, jsonb_state); @@ -427,8 +422,8 @@ PLyObject_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state, bool is_ele if (obj == Py_None) out->type = jbvNull; - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - PLyString_ToJsonbValue(obj, out); + else if (PyUnicode_Check(obj)) + PLyUnicode_ToJsonbValue(obj, out); /* * PyNumber_Check() returns true for booleans, so boolean check should diff --git a/contrib/ltree_plpython/ltree_plpython.c b/contrib/ltree_plpython/ltree_plpython.c index 1570e77dd9..7431a1150a 100644 --- a/contrib/ltree_plpython/ltree_plpython.c +++ b/contrib/ltree_plpython/ltree_plpython.c @@ -9,10 +9,8 @@ PG_MODULE_MAGIC; extern void _PG_init(void); /* Linkage to functions in plpython module */ -#if PY_MAJOR_VERSION >= 3 typedef PyObject *(*PLyUnicode_FromStringAndSize_t) (const char *s, Py_ssize_t size); static PLyUnicode_FromStringAndSize_t PLyUnicode_FromStringAndSize_p; -#endif /* @@ -22,12 +20,10 @@ void _PG_init(void) { /* Asserts verify that typedefs above match original declarations */ -#if PY_MAJOR_VERSION >= 3 AssertVariableIsOfType(&PLyUnicode_FromStringAndSize, PLyUnicode_FromStringAndSize_t); PLyUnicode_FromStringAndSize_p = (PLyUnicode_FromStringAndSize_t) load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLyUnicode_FromStringAndSize", true, NULL); -#endif } @@ -54,7 +50,7 @@ ltree_to_plpython(PG_FUNCTION_ARGS) curlevel = LTREE_FIRST(in); for (i = 0; i < in->numlevel; i++) { - PyList_SetItem(list, i, PyString_FromStringAndSize(curlevel->name, curlevel->len)); + PyList_SetItem(list, i, PLyUnicode_FromStringAndSize(curlevel->name, curlevel->len)); curlevel = LEVEL_NEXT(curlevel); } diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c index 08d8b607e3..6b6e743345 100644 --- a/src/pl/plpython/plpy_cursorobject.c +++ b/src/pl/plpython/plpy_cursorobject.c @@ -40,7 +40,7 @@ static PyTypeObject PLy_CursorType = { .tp_name = "PLyCursor", .tp_basicsize = sizeof(PLyCursorObject), .tp_dealloc = PLy_cursor_dealloc, - .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, .tp_doc = PLy_cursor_doc, .tp_iter = PyObject_SelfIter, .tp_iternext = PLy_cursor_iternext, @@ -150,7 +150,7 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) if (args) { - if (!PySequence_Check(args) || PyString_Check(args) || PyUnicode_Check(args)) + if (!PySequence_Check(args) || PyUnicode_Check(args)) { PLy_exception_set(PyExc_TypeError, "plpy.cursor takes a sequence as its second argument"); return NULL; @@ -169,7 +169,7 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) if (!so) PLy_elog(ERROR, "could not execute plan"); - sv = PyString_AsString(so); + sv = PLyUnicode_AsString(so); PLy_exception_set_plural(PyExc_TypeError, "Expected sequence of %d argument, got %d: %s", "Expected sequence of %d arguments, got %d: %s", @@ -410,7 +410,7 @@ PLy_cursor_fetch(PyObject *self, PyObject *args) SPI_cursor_fetch(portal, true, count); Py_DECREF(ret->status); - ret->status = PyInt_FromLong(SPI_OK_FETCH); + ret->status = PyLong_FromLong(SPI_OK_FETCH); Py_DECREF(ret->nrows); ret->nrows = PyLong_FromUnsignedLongLong(SPI_processed); diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c index 224b8836fb..7c627eacfb 100644 --- a/src/pl/plpython/plpy_elog.c +++ b/src/pl/plpython/plpy_elog.c @@ -193,24 +193,20 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb, e_type_o = PyObject_GetAttrString(e, "__name__"); e_module_o = PyObject_GetAttrString(e, "__module__"); if (e_type_o) - e_type_s = PyString_AsString(e_type_o); + e_type_s = PLyUnicode_AsString(e_type_o); if (e_type_s) - e_module_s = PyString_AsString(e_module_o); + e_module_s = PLyUnicode_AsString(e_module_o); if (v && ((vob = PyObject_Str(v)) != NULL)) - vstr = PyString_AsString(vob); + vstr = PLyUnicode_AsString(vob); else vstr = "unknown"; initStringInfo(&xstr); if (!e_type_s || !e_module_s) { - if (PyString_Check(e)) - /* deprecated string exceptions */ - appendStringInfoString(&xstr, PyString_AsString(e)); - else - /* shouldn't happen */ - appendStringInfoString(&xstr, "unrecognized exception"); + /* shouldn't happen */ + appendStringInfoString(&xstr, "unrecognized exception"); } /* mimics behavior of traceback.format_exception_only */ else if (strcmp(e_module_s, "builtins") == 0 @@ -290,11 +286,11 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb, if (*tb_depth == 1) fname = ""; else - fname = PyString_AsString(name); + fname = PLyUnicode_AsString(name); proname = PLy_procedure_name(exec_ctx->curr_proc); - plain_filename = PyString_AsString(filename); - plain_lineno = PyInt_AsLong(lineno); + plain_filename = PLyUnicode_AsString(filename); + plain_lineno = PyLong_AsLong(lineno); if (proname == NULL) appendStringInfo(&tbstr, "\n PL/Python anonymous code block, line %ld, in %s", @@ -365,7 +361,7 @@ PLy_get_sqlerrcode(PyObject *exc, int *sqlerrcode) if (sqlstate == NULL) return; - buffer = PyString_AsString(sqlstate); + buffer = PLyUnicode_AsString(sqlstate); if (strlen(buffer) == 5 && strspn(buffer, "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") == 5) { @@ -573,7 +569,7 @@ get_string_attr(PyObject *obj, char *attrname, char **str) val = PyObject_GetAttrString(obj, attrname); if (val != NULL && val != Py_None) { - *str = pstrdup(PyString_AsString(val)); + *str = pstrdup(PLyUnicode_AsString(val)); } Py_XDECREF(val); } @@ -589,7 +585,7 @@ set_string_attr(PyObject *obj, char *attrname, char *str) if (str != NULL) { - val = PyString_FromString(str); + val = PLyUnicode_FromString(str); if (!val) return false; } diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c index c6f6a6fbcc..150b3a5977 100644 --- a/src/pl/plpython/plpy_exec.c +++ b/src/pl/plpython/plpy_exec.c @@ -294,7 +294,7 @@ PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc) /* trigger subhandler * * the python function is expected to return Py_None if the tuple is - * acceptable and unmodified. Otherwise it should return a PyString + * acceptable and unmodified. Otherwise it should return a PyUnicode * object who's value is SKIP, or MODIFY. SKIP means don't perform * this action. MODIFY means the tuple has been modified, so update * tuple and perform action. SKIP and MODIFY assume the trigger fires @@ -360,9 +360,7 @@ PLy_exec_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc) { char *srv; - if (PyString_Check(plrv)) - srv = PyString_AsString(plrv); - else if (PyUnicode_Check(plrv)) + if (PyUnicode_Check(plrv)) srv = PLyUnicode_AsString(plrv); else { @@ -700,35 +698,35 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r if (!pltdata) return NULL; - pltname = PyString_FromString(tdata->tg_trigger->tgname); + pltname = PLyUnicode_FromString(tdata->tg_trigger->tgname); PyDict_SetItemString(pltdata, "name", pltname); Py_DECREF(pltname); stroid = DatumGetCString(DirectFunctionCall1(oidout, ObjectIdGetDatum(tdata->tg_relation->rd_id))); - pltrelid = PyString_FromString(stroid); + pltrelid = PLyUnicode_FromString(stroid); PyDict_SetItemString(pltdata, "relid", pltrelid); Py_DECREF(pltrelid); pfree(stroid); stroid = SPI_getrelname(tdata->tg_relation); - plttablename = PyString_FromString(stroid); + plttablename = PLyUnicode_FromString(stroid); PyDict_SetItemString(pltdata, "table_name", plttablename); Py_DECREF(plttablename); pfree(stroid); stroid = SPI_getnspname(tdata->tg_relation); - plttableschema = PyString_FromString(stroid); + plttableschema = PLyUnicode_FromString(stroid); PyDict_SetItemString(pltdata, "table_schema", plttableschema); Py_DECREF(plttableschema); pfree(stroid); if (TRIGGER_FIRED_BEFORE(tdata->tg_event)) - pltwhen = PyString_FromString("BEFORE"); + pltwhen = PLyUnicode_FromString("BEFORE"); else if (TRIGGER_FIRED_AFTER(tdata->tg_event)) - pltwhen = PyString_FromString("AFTER"); + pltwhen = PLyUnicode_FromString("AFTER"); else if (TRIGGER_FIRED_INSTEAD(tdata->tg_event)) - pltwhen = PyString_FromString("INSTEAD OF"); + pltwhen = PLyUnicode_FromString("INSTEAD OF"); else { elog(ERROR, "unrecognized WHEN tg_event: %u", tdata->tg_event); @@ -739,7 +737,7 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r if (TRIGGER_FIRED_FOR_ROW(tdata->tg_event)) { - pltlevel = PyString_FromString("ROW"); + pltlevel = PLyUnicode_FromString("ROW"); PyDict_SetItemString(pltdata, "level", pltlevel); Py_DECREF(pltlevel); @@ -750,7 +748,7 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event)) { - pltevent = PyString_FromString("INSERT"); + pltevent = PLyUnicode_FromString("INSERT"); PyDict_SetItemString(pltdata, "old", Py_None); pytnew = PLy_input_from_tuple(&proc->result_in, @@ -763,7 +761,7 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r } else if (TRIGGER_FIRED_BY_DELETE(tdata->tg_event)) { - pltevent = PyString_FromString("DELETE"); + pltevent = PLyUnicode_FromString("DELETE"); PyDict_SetItemString(pltdata, "new", Py_None); pytold = PLy_input_from_tuple(&proc->result_in, @@ -776,7 +774,7 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r } else if (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event)) { - pltevent = PyString_FromString("UPDATE"); + pltevent = PLyUnicode_FromString("UPDATE"); pytnew = PLy_input_from_tuple(&proc->result_in, tdata->tg_newtuple, @@ -803,7 +801,7 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r } else if (TRIGGER_FIRED_FOR_STATEMENT(tdata->tg_event)) { - pltlevel = PyString_FromString("STATEMENT"); + pltlevel = PLyUnicode_FromString("STATEMENT"); PyDict_SetItemString(pltdata, "level", pltlevel); Py_DECREF(pltlevel); @@ -812,13 +810,13 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r *rv = NULL; if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event)) - pltevent = PyString_FromString("INSERT"); + pltevent = PLyUnicode_FromString("INSERT"); else if (TRIGGER_FIRED_BY_DELETE(tdata->tg_event)) - pltevent = PyString_FromString("DELETE"); + pltevent = PLyUnicode_FromString("DELETE"); else if (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event)) - pltevent = PyString_FromString("UPDATE"); + pltevent = PLyUnicode_FromString("UPDATE"); else if (TRIGGER_FIRED_BY_TRUNCATE(tdata->tg_event)) - pltevent = PyString_FromString("TRUNCATE"); + pltevent = PLyUnicode_FromString("TRUNCATE"); else { elog(ERROR, "unrecognized OP tg_event: %u", tdata->tg_event); @@ -847,7 +845,7 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r } for (i = 0; i < tdata->tg_trigger->tgnargs; i++) { - pltarg = PyString_FromString(tdata->tg_trigger->tgargs[i]); + pltarg = PLyUnicode_FromString(tdata->tg_trigger->tgargs[i]); /* * stolen, don't Py_DECREF @@ -931,9 +929,7 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata, PLyObToDatum *att; platt = PyList_GetItem(plkeys, i); - if (PyString_Check(platt)) - plattstr = PyString_AsString(platt); - else if (PyUnicode_Check(platt)) + if (PyUnicode_Check(platt)) plattstr = PLyUnicode_AsString(platt); else { diff --git a/src/pl/plpython/plpy_main.c b/src/pl/plpython/plpy_main.c index 3eedaa80da..0bce106495 100644 --- a/src/pl/plpython/plpy_main.c +++ b/src/pl/plpython/plpy_main.c @@ -28,27 +28,13 @@ * exported functions */ -#if PY_MAJOR_VERSION >= 3 -/* Use separate names to reduce confusion */ -#define plpython_validator plpython3_validator -#define plpython_call_handler plpython3_call_handler -#define plpython_inline_handler plpython3_inline_handler -#endif - extern void _PG_init(void); PG_MODULE_MAGIC; -PG_FUNCTION_INFO_V1(plpython_validator); -PG_FUNCTION_INFO_V1(plpython_call_handler); -PG_FUNCTION_INFO_V1(plpython_inline_handler); - -#if PY_MAJOR_VERSION < 3 -/* Define aliases plpython2_call_handler etc */ -PG_FUNCTION_INFO_V1(plpython2_validator); -PG_FUNCTION_INFO_V1(plpython2_call_handler); -PG_FUNCTION_INFO_V1(plpython2_inline_handler); -#endif +PG_FUNCTION_INFO_V1(plpython3_validator); +PG_FUNCTION_INFO_V1(plpython3_call_handler); +PG_FUNCTION_INFO_V1(plpython3_inline_handler); static bool PLy_procedure_is_trigger(Form_pg_proc procStruct); @@ -82,6 +68,10 @@ _PG_init(void) * the actual failure for later, so that operations like pg_restore can * load more than one plpython library so long as they don't try to do * anything much with the language. + * + * While we only support Python 3 these days, somebody might create an + * out-of-tree version adding back support for Python 2. Conflicts with + * such an extension should be detected. */ bitmask_ptr = (int **) find_rendezvous_variable("plpython_version_bitmask"); if (!(*bitmask_ptr)) /* am I the first? */ @@ -125,13 +115,9 @@ PLy_initialize(void) if (inited) return; -#if PY_MAJOR_VERSION >= 3 PyImport_AppendInittab("plpy", PyInit_plpy); -#endif Py_Initialize(); -#if PY_MAJOR_VERSION >= 3 PyImport_ImportModule("plpy"); -#endif PLy_init_interp(); PLy_init_plpy(); if (PyErr_Occurred()) @@ -171,7 +157,7 @@ PLy_init_interp(void) } Datum -plpython_validator(PG_FUNCTION_ARGS) +plpython3_validator(PG_FUNCTION_ARGS) { Oid funcoid = PG_GETARG_OID(0); HeapTuple tuple; @@ -203,17 +189,8 @@ plpython_validator(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -#if PY_MAJOR_VERSION < 3 -Datum -plpython2_validator(PG_FUNCTION_ARGS) -{ - /* call plpython validator with our fcinfo so it gets our oid */ - return plpython_validator(fcinfo); -} -#endif /* PY_MAJOR_VERSION < 3 */ - Datum -plpython_call_handler(PG_FUNCTION_ARGS) +plpython3_call_handler(PG_FUNCTION_ARGS) { bool nonatomic; Datum retval; @@ -284,16 +261,8 @@ plpython_call_handler(PG_FUNCTION_ARGS) return retval; } -#if PY_MAJOR_VERSION < 3 Datum -plpython2_call_handler(PG_FUNCTION_ARGS) -{ - return plpython_call_handler(fcinfo); -} -#endif /* PY_MAJOR_VERSION < 3 */ - -Datum -plpython_inline_handler(PG_FUNCTION_ARGS) +plpython3_inline_handler(PG_FUNCTION_ARGS) { LOCAL_FCINFO(fake_fcinfo, 0); InlineCodeBlock *codeblock = (InlineCodeBlock *) DatumGetPointer(PG_GETARG_DATUM(0)); @@ -368,14 +337,6 @@ plpython_inline_handler(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -#if PY_MAJOR_VERSION < 3 -Datum -plpython2_inline_handler(PG_FUNCTION_ARGS) -{ - return plpython_inline_handler(fcinfo); -} -#endif /* PY_MAJOR_VERSION < 3 */ - static bool PLy_procedure_is_trigger(Form_pg_proc procStruct) { diff --git a/src/pl/plpython/plpy_planobject.c b/src/pl/plpython/plpy_planobject.c index 5951d2a6ff..ec2439c6a1 100644 --- a/src/pl/plpython/plpy_planobject.c +++ b/src/pl/plpython/plpy_planobject.c @@ -119,7 +119,7 @@ PLy_plan_status(PyObject *self, PyObject *args) { Py_INCREF(Py_True); return Py_True; - /* return PyInt_FromLong(self->status); */ + /* return PyLong_FromLong(self->status); */ } return NULL; } diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c index 907f89d153..fa08f0dbfb 100644 --- a/src/pl/plpython/plpy_plpymodule.c +++ b/src/pl/plpython/plpy_plpymodule.c @@ -107,7 +107,6 @@ static PyMethodDef PLy_exc_methods[] = { {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 static PyModuleDef PLy_module = { PyModuleDef_HEAD_INIT, .m_name = "plpy", @@ -139,7 +138,6 @@ PyInit_plpy(void) return m; } -#endif /* PY_MAJOR_VERSION >= 3 */ void PLy_init_plpy(void) @@ -148,10 +146,6 @@ PLy_init_plpy(void) *main_dict, *plpy_mod; -#if PY_MAJOR_VERSION < 3 - PyObject *plpy; -#endif - /* * initialize plpy module */ @@ -160,13 +154,7 @@ PLy_init_plpy(void) PLy_subtransaction_init_type(); PLy_cursor_init_type(); -#if PY_MAJOR_VERSION >= 3 PyModule_Create(&PLy_module); - /* for Python 3 we initialized the exceptions in PyInit_plpy */ -#else - plpy = Py_InitModule("plpy", PLy_methods); - PLy_add_exceptions(plpy); -#endif /* PyDict_SetItemString(plpy, "PlanType", (PyObject *) &PLy_PlanType); */ @@ -189,11 +177,7 @@ PLy_add_exceptions(PyObject *plpy) PyObject *excmod; HASHCTL hash_ctl; -#if PY_MAJOR_VERSION < 3 - excmod = Py_InitModule("spiexceptions", PLy_exc_methods); -#else excmod = PyModule_Create(&PLy_exc_module); -#endif if (excmod == NULL) PLy_elog(ERROR, "could not create the spiexceptions module"); @@ -268,7 +252,7 @@ PLy_generate_spi_exceptions(PyObject *mod, PyObject *base) if (dict == NULL) PLy_elog(ERROR, NULL); - sqlstate = PyString_FromString(unpack_sql_state(exception_map[i].sqlstate)); + sqlstate = PLyUnicode_FromString(unpack_sql_state(exception_map[i].sqlstate)); if (sqlstate == NULL) PLy_elog(ERROR, "could not generate SPI exceptions"); @@ -346,7 +330,7 @@ PLy_quote_literal(PyObject *self, PyObject *args) return NULL; quoted = quote_literal_cstr(str); - ret = PyString_FromString(quoted); + ret = PLyUnicode_FromString(quoted); pfree(quoted); return ret; @@ -363,10 +347,10 @@ PLy_quote_nullable(PyObject *self, PyObject *args) return NULL; if (str == NULL) - return PyString_FromString("NULL"); + return PLyUnicode_FromString("NULL"); quoted = quote_literal_cstr(str); - ret = PyString_FromString(quoted); + ret = PLyUnicode_FromString(quoted); pfree(quoted); return ret; @@ -383,7 +367,7 @@ PLy_quote_ident(PyObject *self, PyObject *args) return NULL; quoted = quote_identifier(str); - ret = PyString_FromString(quoted); + ret = PLyUnicode_FromString(quoted); return ret; } @@ -400,7 +384,7 @@ object_to_string(PyObject *obj) { char *str; - str = pstrdup(PyString_AsString(so)); + str = pstrdup(PLyUnicode_AsString(so)); Py_DECREF(so); return str; @@ -444,7 +428,7 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) else so = PyObject_Str(args); - if (so == NULL || ((message = PyString_AsString(so)) == NULL)) + if (so == NULL || ((message = PLyUnicode_AsString(so)) == NULL)) { level = ERROR; message = dgettext(TEXTDOMAIN, "could not parse error message in plpy.elog"); @@ -457,7 +441,7 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) { while (PyDict_Next(kw, &pos, &key, &value)) { - char *keyword = PyString_AsString(key); + char *keyword = PLyUnicode_AsString(key); if (strcmp(keyword, "message") == 0) { diff --git a/src/pl/plpython/plpy_plpymodule.h b/src/pl/plpython/plpy_plpymodule.h index 54d78101ce..ad6436aca7 100644 --- a/src/pl/plpython/plpy_plpymodule.h +++ b/src/pl/plpython/plpy_plpymodule.h @@ -11,9 +11,7 @@ extern HTAB *PLy_spi_exceptions; -#if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit_plpy(void); -#endif extern void PLy_init_plpy(void); #endif /* PLPY_PLPYMODULE_H */ diff --git a/src/pl/plpython/plpy_resultobject.c b/src/pl/plpython/plpy_resultobject.c index 54f39419c8..a8516b2db3 100644 --- a/src/pl/plpython/plpy_resultobject.c +++ b/src/pl/plpython/plpy_resultobject.c @@ -76,7 +76,7 @@ PLy_result_new(void) Py_INCREF(Py_None); ob->status = Py_None; - ob->nrows = PyInt_FromLong(-1); + ob->nrows = PyLong_FromLong(-1); ob->rows = PyList_New(0); ob->tupdesc = NULL; if (!ob->rows) @@ -125,7 +125,7 @@ PLy_result_colnames(PyObject *self, PyObject *unused) { Form_pg_attribute attr = TupleDescAttr(ob->tupdesc, i); - PyList_SET_ITEM(list, i, PyString_FromString(NameStr(attr->attname))); + PyList_SET_ITEM(list, i, PLyUnicode_FromString(NameStr(attr->attname))); } return list; @@ -151,7 +151,7 @@ PLy_result_coltypes(PyObject *self, PyObject *unused) { Form_pg_attribute attr = TupleDescAttr(ob->tupdesc, i); - PyList_SET_ITEM(list, i, PyInt_FromLong(attr->atttypid)); + PyList_SET_ITEM(list, i, PyLong_FromLong(attr->atttypid)); } return list; @@ -177,7 +177,7 @@ PLy_result_coltypmods(PyObject *self, PyObject *unused) { Form_pg_attribute attr = TupleDescAttr(ob->tupdesc, i); - PyList_SET_ITEM(list, i, PyInt_FromLong(attr->atttypmod)); + PyList_SET_ITEM(list, i, PyLong_FromLong(attr->atttypmod)); } return list; @@ -226,19 +226,11 @@ PLy_result_str(PyObject *arg) { PLyResultObject *ob = (PLyResultObject *) arg; -#if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("<%s status=%S nrows=%S rows=%S>", Py_TYPE(ob)->tp_name, ob->status, ob->nrows, ob->rows); -#else - return PyString_FromFormat("<%s status=%ld nrows=%ld rows=%s>", - ob->ob_type->tp_name, - PyInt_AsLong(ob->status), - PyInt_AsLong(ob->nrows), - PyString_AsString(PyObject_Str(ob->rows))); -#endif } static PyObject * diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c index 86d70470a7..9a71a42c15 100644 --- a/src/pl/plpython/plpy_spi.c +++ b/src/pl/plpython/plpy_spi.c @@ -90,9 +90,7 @@ PLy_spi_prepare(PyObject *self, PyObject *args) int32 typmod; optr = PySequence_GetItem(list, i); - if (PyString_Check(optr)) - sptr = PyString_AsString(optr); - else if (PyUnicode_Check(optr)) + if (PyUnicode_Check(optr)) sptr = PLyUnicode_AsString(optr); else { @@ -186,7 +184,7 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit) if (list != NULL) { - if (!PySequence_Check(list) || PyString_Check(list) || PyUnicode_Check(list)) + if (!PySequence_Check(list) || PyUnicode_Check(list)) { PLy_exception_set(PyExc_TypeError, "plpy.execute takes a sequence as its second argument"); return NULL; @@ -205,7 +203,7 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit) if (!so) PLy_elog(ERROR, "could not execute plan"); - sv = PyString_AsString(so); + sv = PLyUnicode_AsString(so); PLy_exception_set_plural(PyExc_TypeError, "Expected sequence of %d argument, got %d: %s", "Expected sequence of %d arguments, got %d: %s", @@ -360,7 +358,7 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status) return NULL; } Py_DECREF(result->status); - result->status = PyInt_FromLong(status); + result->status = PyLong_FromLong(status); if (status > 0 && tuptable == NULL) { diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c index 5e807b139f..7018c9d404 100644 --- a/src/pl/plpython/plpy_typeio.c +++ b/src/pl/plpython/plpy_typeio.c @@ -26,12 +26,12 @@ static PyObject *PLyBool_FromBool(PLyDatumToOb *arg, Datum d); static PyObject *PLyFloat_FromFloat4(PLyDatumToOb *arg, Datum d); static PyObject *PLyFloat_FromFloat8(PLyDatumToOb *arg, Datum d); static PyObject *PLyDecimal_FromNumeric(PLyDatumToOb *arg, Datum d); -static PyObject *PLyInt_FromInt16(PLyDatumToOb *arg, Datum d); -static PyObject *PLyInt_FromInt32(PLyDatumToOb *arg, Datum d); +static PyObject *PLyLong_FromInt16(PLyDatumToOb *arg, Datum d); +static PyObject *PLyLong_FromInt32(PLyDatumToOb *arg, Datum d); static PyObject *PLyLong_FromInt64(PLyDatumToOb *arg, Datum d); static PyObject *PLyLong_FromOid(PLyDatumToOb *arg, Datum d); static PyObject *PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d); -static PyObject *PLyString_FromScalar(PLyDatumToOb *arg, Datum d); +static PyObject *PLyUnicode_FromScalar(PLyDatumToOb *arg, Datum d); static PyObject *PLyObject_FromTransform(PLyDatumToOb *arg, Datum d); static PyObject *PLyList_FromArray(PLyDatumToOb *arg, Datum d); static PyObject *PLyList_FromArray_recurse(PLyDatumToOb *elm, int *dims, int ndim, int dim, @@ -59,7 +59,7 @@ static void PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list, Datum *elems, bool *nulls, int *currelem); /* conversion from Python objects to composite Datums */ -static Datum PLyString_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray); +static Datum PLyUnicode_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray); static Datum PLyMapping_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *mapping); static Datum PLySequence_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *sequence); static Datum PLyGenericObject_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *object, bool inarray); @@ -517,10 +517,10 @@ PLy_input_setup_func(PLyDatumToOb *arg, MemoryContext arg_mcxt, arg->func = PLyDecimal_FromNumeric; break; case INT2OID: - arg->func = PLyInt_FromInt16; + arg->func = PLyLong_FromInt16; break; case INT4OID: - arg->func = PLyInt_FromInt32; + arg->func = PLyLong_FromInt32; break; case INT8OID: arg->func = PLyLong_FromInt64; @@ -532,7 +532,7 @@ PLy_input_setup_func(PLyDatumToOb *arg, MemoryContext arg_mcxt, arg->func = PLyBytes_FromBytea; break; default: - arg->func = PLyString_FromScalar; + arg->func = PLyUnicode_FromScalar; getTypeOutputInfo(typeOid, &typoutput, &typisvarlena); fmgr_info_cxt(typoutput, &arg->u.scalar.typfunc, arg_mcxt); break; @@ -600,15 +600,15 @@ PLyDecimal_FromNumeric(PLyDatumToOb *arg, Datum d) } static PyObject * -PLyInt_FromInt16(PLyDatumToOb *arg, Datum d) +PLyLong_FromInt16(PLyDatumToOb *arg, Datum d) { - return PyInt_FromLong(DatumGetInt16(d)); + return PyLong_FromLong(DatumGetInt16(d)); } static PyObject * -PLyInt_FromInt32(PLyDatumToOb *arg, Datum d) +PLyLong_FromInt32(PLyDatumToOb *arg, Datum d) { - return PyInt_FromLong(DatumGetInt32(d)); + return PyLong_FromLong(DatumGetInt32(d)); } static PyObject * @@ -638,10 +638,10 @@ PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d) * Generic input conversion using a SQL type's output function. */ static PyObject * -PLyString_FromScalar(PLyDatumToOb *arg, Datum d) +PLyUnicode_FromScalar(PLyDatumToOb *arg, Datum d) { char *x = OutputFunctionCall(&arg->u.scalar.typfunc, d); - PyObject *r = PyString_FromString(x); + PyObject *r = PLyUnicode_FromString(x); pfree(x); return r; @@ -954,8 +954,8 @@ PLyObject_ToComposite(PLyObToDatum *arg, PyObject *plrv, * The string conversion case doesn't require a tupdesc, nor per-field * conversion data, so just go for it if that's the case to use. */ - if (PyString_Check(plrv) || PyUnicode_Check(plrv)) - return PLyString_ToComposite(arg, plrv, inarray); + if (PyUnicode_Check(plrv)) + return PLyUnicode_ToComposite(arg, plrv, inarray); /* * If we're dealing with a named composite type, we must look up the @@ -1032,25 +1032,17 @@ PLyObject_AsString(PyObject *plrv) else if (PyFloat_Check(plrv)) { /* use repr() for floats, str() is lossy */ -#if PY_MAJOR_VERSION >= 3 PyObject *s = PyObject_Repr(plrv); plrv_bo = PLyUnicode_Bytes(s); Py_XDECREF(s); -#else - plrv_bo = PyObject_Repr(plrv); -#endif } else { -#if PY_MAJOR_VERSION >= 3 PyObject *s = PyObject_Str(plrv); plrv_bo = PLyUnicode_Bytes(s); Py_XDECREF(s); -#else - plrv_bo = PyObject_Str(plrv); -#endif } if (!plrv_bo) PLy_elog(ERROR, "could not create string representation of Python object"); @@ -1299,7 +1291,7 @@ PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list, * Convert a Python string to composite, using record_in. */ static Datum -PLyString_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray) +PLyUnicode_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray) { char *str; diff --git a/src/pl/plpython/plpy_util.c b/src/pl/plpython/plpy_util.c index 4a7d7264d7..22e2a599ad 100644 --- a/src/pl/plpython/plpy_util.c +++ b/src/pl/plpython/plpy_util.c @@ -78,12 +78,6 @@ PLyUnicode_Bytes(PyObject *unicode) * Convert a Python unicode object to a C string in PostgreSQL server * encoding. No Python object reference is passed out of this * function. The result is palloc'ed. - * - * Note that this function is disguised as PyString_AsString() when - * using Python 3. That function returns a pointer into the internal - * memory of the argument, which isn't exactly the interface of this - * function. But in either case you get a rather short-lived - * reference that you ought to better leave alone. */ char * PLyUnicode_AsString(PyObject *unicode) @@ -95,7 +89,6 @@ PLyUnicode_AsString(PyObject *unicode) return rv; } -#if PY_MAJOR_VERSION >= 3 /* * Convert a C string in the PostgreSQL server encoding to a Python * unicode object. Reference ownership is passed to the caller. @@ -126,5 +119,3 @@ PLyUnicode_FromString(const char *s) { return PLyUnicode_FromStringAndSize(s, strlen(s)); } - -#endif /* PY_MAJOR_VERSION >= 3 */ diff --git a/src/pl/plpython/plpy_util.h b/src/pl/plpython/plpy_util.h index c9ba7edc0e..7c6577925e 100644 --- a/src/pl/plpython/plpy_util.h +++ b/src/pl/plpython/plpy_util.h @@ -11,9 +11,7 @@ extern PyObject *PLyUnicode_Bytes(PyObject *unicode); extern char *PLyUnicode_AsString(PyObject *unicode); -#if PY_MAJOR_VERSION >= 3 extern PyObject *PLyUnicode_FromString(const char *s); extern PyObject *PLyUnicode_FromStringAndSize(const char *s, Py_ssize_t size); -#endif #endif /* PLPY_UTIL_H */ diff --git a/src/pl/plpython/plpython.h b/src/pl/plpython/plpython.h index 05e4362dab..2a0c9bf036 100644 --- a/src/pl/plpython/plpython.h +++ b/src/pl/plpython/plpython.h @@ -59,37 +59,6 @@ #include #endif -/* - * Python 2/3 strings/unicode/bytes handling. Python 2 has strings - * and unicode, Python 3 has strings, which are unicode on the C - * level, and bytes. The porting convention, which is similarly used - * in Python 2.6, is that "Unicode" is always unicode, and "Bytes" are - * bytes in Python 3 and strings in Python 2. Since we keep - * supporting Python 2 and its usual strings, we provide a - * compatibility layer for Python 3 that when asked to convert a C - * string to a Python string it converts the C string from the - * PostgreSQL server encoding to a Python Unicode object. - */ -#if PY_MAJOR_VERSION >= 3 -#define PyString_Check(x) 0 -#define PyString_AsString(x) PLyUnicode_AsString(x) -#define PyString_FromString(x) PLyUnicode_FromString(x) -#define PyString_FromStringAndSize(x, size) PLyUnicode_FromStringAndSize(x, size) -#endif - -/* - * Python 3 only has long. - */ -#if PY_MAJOR_VERSION >= 3 -#define PyInt_FromLong(x) PyLong_FromLong(x) -#define PyInt_AsLong(x) PyLong_AsLong(x) -#endif - -/* Python 3 removed the Py_TPFLAGS_HAVE_ITER flag */ -#if PY_MAJOR_VERSION >= 3 -#define Py_TPFLAGS_HAVE_ITER 0 -#endif - /* define our text domain for translations */ #undef TEXTDOMAIN #define TEXTDOMAIN PG_TEXTDOMAIN("plpython") @@ -130,8 +99,7 @@ #define printf(...) pg_printf(__VA_ARGS__) /* - * Used throughout, and also by the Python 2/3 porting layer, so it's easier to - * just include it everywhere. + * Used throughout, so it's easier to just include it everywhere. */ #include "plpy_util.h" From 4228cabb72bb57e1df4c9d92613f1fcd4baadd5a Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 7 Mar 2022 18:30:57 -0800 Subject: [PATCH 099/108] plpython: Adjust docs after removal of Python 2 support. Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20211031184548.g4sxfe47n2kyi55r@alap3.anarazel.de --- doc/src/sgml/hstore.sgml | 8 +- doc/src/sgml/json.sgml | 9 +- doc/src/sgml/ltree.sgml | 10 +- doc/src/sgml/plpython.sgml | 289 +++++-------------------- doc/src/sgml/ref/comment.sgml | 2 +- doc/src/sgml/ref/create_transform.sgml | 6 +- doc/src/sgml/ref/drop_transform.sgml | 4 +- 7 files changed, 71 insertions(+), 257 deletions(-) diff --git a/doc/src/sgml/hstore.sgml b/doc/src/sgml/hstore.sgml index 870063c288..679878b3af 100644 --- a/doc/src/sgml/hstore.sgml +++ b/doc/src/sgml/hstore.sgml @@ -943,12 +943,8 @@ ALTER TABLE tablename ALTER hstorecol TYPE hstore USING hstorecol || ''; and hstore_plperlu, for trusted and untrusted PL/Perl. If you install these transforms and specify them when creating a function, hstore values are mapped to Perl hashes. The - extensions for PL/Python are - called hstore_plpythonu, hstore_plpython2u, - and hstore_plpython3u - (see for the PL/Python naming - convention). If you use them, hstore values are mapped to - Python dictionaries. + extension for PL/Python is called hstore_plpython3u. + If you use it, hstore values are mapped to Python dictionaries. diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml index 673c70c3bb..c4223fafb6 100644 --- a/doc/src/sgml/json.sgml +++ b/doc/src/sgml/json.sgml @@ -716,12 +716,9 @@ UPDATE table_name SET jsonb_field[1]['a'] = '1'; - The extensions for PL/Python are called jsonb_plpythonu, - jsonb_plpython2u, and - jsonb_plpython3u (see for the PL/Python naming convention). If you - use them, jsonb values are mapped to Python dictionaries, - lists, and scalars, as appropriate. + The extension for PL/Python is called jsonb_plpython3u. + If you use it, jsonb values are mapped to Python + dictionaries, lists, and scalars, as appropriate. diff --git a/doc/src/sgml/ltree.sgml b/doc/src/sgml/ltree.sgml index 436be76bfa..508f404ae8 100644 --- a/doc/src/sgml/ltree.sgml +++ b/doc/src/sgml/ltree.sgml @@ -826,19 +826,15 @@ ltreetest=> SELECT ins_label(path,2,'Space') FROM test WHERE path <@ 'Top. Transforms - Additional extensions are available that implement transforms for - the ltree type for PL/Python. The extensions are - called ltree_plpythonu, ltree_plpython2u, - and ltree_plpython3u - (see for the PL/Python naming - convention). If you install these transforms and specify them when + The ltree_plpython3u extension implements transforms for + the ltree type for PL/Python. If installed and specified when creating a function, ltree values are mapped to Python lists. (The reverse is currently not supported, however.) - It is strongly recommended that the transform extensions be installed in + It is strongly recommended that the transform extension be installed in the same schema as ltree. Otherwise there are installation-time security hazards if a transform extension's schema contains objects defined by a hostile user. diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index b67f8f4aae..54355effd7 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -14,8 +14,7 @@ To install PL/Python in a particular database, use - CREATE EXTENSION plpythonu (but - see also ). + CREATE EXTENSION plpython3u. @@ -28,14 +27,14 @@ PL/Python is only available as an untrusted language, meaning it does not offer any way of restricting what users can do in it and - is therefore named plpythonu. A trusted + is therefore named plpython3u. A trusted variant plpython might become available in the future if a secure execution mechanism is developed in Python. The writer of a function in untrusted PL/Python must take care that the function cannot be used to do anything unwanted, since it will be able to do anything that could be done by a user logged in as the database administrator. Only superusers can create functions in - untrusted languages such as plpythonu. + untrusted languages such as plpython3u. @@ -47,140 +46,6 @@ - - Python 2 vs. Python 3 - - - PL/Python supports both the Python 2 and Python 3 language - variants. (The PostgreSQL installation instructions might contain - more precise information about the exact supported minor versions - of Python.) Because the Python 2 and Python 3 language variants - are incompatible in some important aspects, the following naming - and transitioning scheme is used by PL/Python to avoid mixing them: - - - - - The PostgreSQL language named plpython2u - implements PL/Python based on the Python 2 language variant. - - - - - - The PostgreSQL language named plpython3u - implements PL/Python based on the Python 3 language variant. - - - - - - The language named plpythonu implements - PL/Python based on the default Python language variant, which is - currently Python 2. (This default is independent of what any - local Python installations might consider to be - their default, for example, - what /usr/bin/python might be.) The - default will probably be changed to Python 3 in a distant future - release of PostgreSQL, depending on the progress of the - migration to Python 3 in the Python community. - - - - - This scheme is analogous to the recommendations in PEP 394 regarding the - naming and transitioning of the python command. - - - - It depends on the build configuration or the installed packages - whether PL/Python for Python 2 or Python 3 or both are available. - - - - - The built variant depends on which Python version was found during - the installation or which version was explicitly set using - the PYTHON environment variable; - see . To make both variants of - PL/Python available in one installation, the source tree has to be - configured and built twice. - - - - - This results in the following usage and migration strategy: - - - - - Existing users and users who are currently not interested in - Python 3 use the language name plpythonu and - don't have to change anything for the foreseeable future. It is - recommended to gradually future-proof the code - via migration to Python 2.6/2.7 to simplify the eventual - migration to Python 3. - - - - In practice, many PL/Python functions will migrate to Python 3 - with few or no changes. - - - - - - Users who know that they have heavily Python 2 dependent code - and don't plan to ever change it can make use of - the plpython2u language name. This will - continue to work into the very distant future, until Python 2 - support might be completely dropped by PostgreSQL. - - - - - - Users who want to dive into Python 3 can use - the plpython3u language name, which will keep - working forever by today's standards. In the distant future, - when Python 3 might become the default, they might like to - remove the 3 for aesthetic reasons. - - - - - - Daredevils, who want to build a Python-3-only operating system - environment, can change the contents of - plpythonu's extension control and script files - to make plpythonu be equivalent - to plpython3u, keeping in mind that this - would make their installation incompatible with most of the rest - of the world. - - - - - - - See also the - document What's - New In Python 3.0 for more information about porting to - Python 3. - - - - It is not allowed to use PL/Python based on Python 2 and PL/Python - based on Python 3 in the same session, because the symbols in the - dynamic modules would clash, which could result in crashes of the - PostgreSQL server process. There is a check that prevents mixing - Python major versions in a session, which will abort the session if - a mismatch is detected. It is possible, however, to use both - PL/Python variants in the same database, from separate sessions. - - - PL/Python Functions @@ -193,7 +58,7 @@ CREATE FUNCTION funcname (argument-list< RETURNS return-type AS $$ # PL/Python function body -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -225,7 +90,7 @@ AS $$ if a > b: return a return b -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; The Python code that is given as the body of the function definition @@ -255,7 +120,7 @@ CREATE FUNCTION pystrip(x text) AS $$ x = x.strip() # error return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; because assigning to x makes x a local variable for the entire block, @@ -271,7 +136,7 @@ AS $$ global x x = x.strip() # ok now return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; But it is advisable not to rely on this implementation detail of PL/Python. It is better to treat the function parameters as @@ -303,11 +168,8 @@ $$ LANGUAGE plpythonu; - PostgreSQL smallint and int are - converted to Python int. - PostgreSQL bigint and oid are converted - to long in Python 2 and to int in - Python 3. + PostgreSQL smallint, int, bigint + and oid are converted to Python int. @@ -335,19 +197,15 @@ $$ LANGUAGE plpythonu; - PostgreSQL bytea is converted to - Python str in Python 2 and to bytes - in Python 3. In Python 2, the string should be treated as a - byte sequence without any character encoding. + PostgreSQL bytea is converted to Python bytes. - All other data types, including the PostgreSQL character string - types, are converted to a Python str. In Python - 2, this string will be in the PostgreSQL server encoding; in - Python 3, it will be a Unicode string like all strings. + All other data types, including the PostgreSQL character string types, + are converted to a Python str (in Unicode like all Python + strings). @@ -375,10 +233,10 @@ $$ LANGUAGE plpythonu; - When the PostgreSQL return type is bytea, the - return value will be converted to a string (Python 2) or bytes - (Python 3) using the respective Python built-ins, with the - result being converted to bytea. + When the PostgreSQL return type is bytea, the return value + will be converted to Python bytes using the respective + Python built-ins, with the result being converted to + bytea. @@ -393,14 +251,8 @@ $$ LANGUAGE plpythonu; - Strings in Python 2 are required to be in the PostgreSQL server - encoding when they are passed to PostgreSQL. Strings that are - not valid in the current server encoding will raise an error, - but not all encoding mismatches can be detected, so garbage - data can still result when this is not done correctly. Unicode - strings are converted to the correct encoding automatically, so - it can be safer and more convenient to use those. In Python 3, - all strings are Unicode strings. + Strings are automatically converted to the PostgreSQL server encoding + when they are passed to PostgreSQL. @@ -440,7 +292,7 @@ AS $$ if a > b: return a return b -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; As shown above, to return an SQL null value from a PL/Python @@ -461,10 +313,10 @@ CREATE FUNCTION return_arr() RETURNS int[] AS $$ return [1, 2, 3, 4, 5] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT return_arr(); - return_arr + return_arr ------------- {1,2,3,4,5} (1 row) @@ -479,11 +331,11 @@ SELECT return_arr(); CREATE FUNCTION test_type_conversion_array_int4(x int4[]) RETURNS int4[] AS $$ plpy.info(x, type(x)) return x -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_int4(ARRAY[[1,2,3],[4,5,6]]); INFO: ([[1, 2, 3], [4, 5, 6]], <type 'list'>) - test_type_conversion_array_int4 + test_type_conversion_array_int4 --------------------------------- {{1,2,3},{4,5,6}} (1 row) @@ -506,7 +358,7 @@ CREATE FUNCTION return_str_arr() RETURNS varchar[] AS $$ return "hello" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT return_str_arr(); return_str_arr @@ -540,7 +392,7 @@ AS $$ if (e["age"] < 30) and (e["salary"] > 100000): return True return False -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -574,7 +426,7 @@ CREATE FUNCTION make_pair (name text, value integer) AS $$ return ( name, value ) # or alternatively, as list: return [ name, value ] -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; To return an SQL null for any column, insert None at @@ -600,7 +452,7 @@ CREATE FUNCTION make_pair (name text, value integer) RETURNS named_value AS $$ return { "name": name, "value": value } -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; Any extra dictionary key/value pairs are ignored. Missing keys are @@ -633,7 +485,7 @@ AS $$ nv.name = name nv.value = value return nv -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -646,7 +498,7 @@ $$ LANGUAGE plpythonu; CREATE FUNCTION multiout_simple(OUT i integer, OUT j integer) AS $$ return (1, 2) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_simple(); @@ -657,7 +509,7 @@ SELECT * FROM multiout_simple(); CREATE PROCEDURE python_triple(INOUT a integer, INOUT b integer) AS $$ return (a * 3, b * 3) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CALL python_triple(5, 10); @@ -693,7 +545,7 @@ AS $$ # return tuple containing lists as composite types # all other combinations work also return ( [ how, "World" ], [ how, "PostgreSQL" ], [ how, "PL/Python" ] ) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -724,7 +576,7 @@ AS $$ return ( self.how, self.who[self.ndx] ) return producer(how, [ "World", "PostgreSQL", "PL/Python" ]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -740,7 +592,7 @@ CREATE FUNCTION greet (how text) AS $$ for who in [ "World", "PostgreSQL", "PL/Python" ]: yield ( how, who ) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -756,7 +608,7 @@ $$ LANGUAGE plpythonu; CREATE FUNCTION multiout_simple_setof(n integer, OUT integer, OUT integer) RETURNS SETOF record AS $$ return [(1, 2)] * n -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; SELECT * FROM multiout_simple_setof(3); @@ -794,7 +646,7 @@ SELECT * FROM multiout_simple_setof(3); DO $$ # PL/Python code -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; An anonymous code block receives no arguments, and whatever value it @@ -1089,7 +941,7 @@ CREATE FUNCTION usesavedplan() RETURNS trigger AS $$ plan = plpy.prepare("SELECT 1") SD["plan"] = plan # rest of function -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -1132,7 +984,7 @@ for row in plpy.cursor("select num from largetable"): if row['num'] % 2: odd += 1 return odd -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION count_odd_fetch(batch_size integer) RETURNS integer AS $$ odd = 0 @@ -1145,7 +997,7 @@ while True: if row['num'] % 2: odd += 1 return odd -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; CREATE FUNCTION count_odd_prepared() RETURNS integer AS $$ odd = 0 @@ -1153,7 +1005,7 @@ plan = plpy.prepare("select num from largetable where num % $1 <> 0", ["in rows = list(plpy.cursor(plan, [2])) # or: = list(plan.cursor([2])) return len(rows) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -1198,7 +1050,7 @@ CREATE FUNCTION try_adding_joe() RETURNS text AS $$ return "something went wrong" else: return "Joe added" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; @@ -1231,7 +1083,7 @@ except plpy.SPIError as e: return "other error, SQLSTATE %s" % e.sqlstate else: return "fraction inserted" -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; Note that because all exceptions from the plpy.spiexceptions module inherit @@ -1280,7 +1132,7 @@ else: result = "funds transferred correctly" plan = plpy.prepare("INSERT INTO operations (result) VALUES ($1)", ["text"]) plpy.execute(plan, [result]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; If the second UPDATE statement results in an exception being raised, this function will report the error, but @@ -1312,7 +1164,7 @@ else: result = "funds transferred correctly" plan = plpy.prepare("INSERT INTO operations (result) VALUES ($1)", ["text"]) plpy.execute(plan, [result]) -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; Note that the use of try/catch is still required. Otherwise the exception would propagate to the top of @@ -1329,44 +1181,6 @@ $$ LANGUAGE plpythonu; to be rolled back. - - - Older Python Versions - - - Context managers syntax using the with keyword - is available by default in Python 2.6. For compatibility with - older Python versions, you can call the - subtransaction manager's __enter__ and - __exit__ functions using the - enter and exit convenience - aliases. The example function that transfers funds could be - written as: - -CREATE FUNCTION transfer_funds_old() RETURNS void AS $$ -try: - subxact = plpy.subtransaction() - subxact.enter() - try: - plpy.execute("UPDATE accounts SET balance = balance - 100 WHERE account_name = 'joe'") - plpy.execute("UPDATE accounts SET balance = balance + 100 WHERE account_name = 'mary'") - except: - import sys - subxact.exit(*sys.exc_info()) - raise - else: - subxact.exit(None, None, None) -except plpy.SPIError as e: - result = "error transferring funds: %s" % e.args -else: - result = "funds transferred correctly" - -plan = plpy.prepare("INSERT INTO operations (result) VALUES ($1)", ["text"]) -plpy.execute(plan, [result]) -$$ LANGUAGE plpythonu; - - - @@ -1389,7 +1203,7 @@ $$ LANGUAGE plpythonu; Here is an example: CREATE PROCEDURE transaction_test1() -LANGUAGE plpythonu +LANGUAGE plpython3u AS $$ for i in range(0, 10): plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) @@ -1465,7 +1279,7 @@ CREATE FUNCTION raise_custom_exception() RETURNS void AS $$ plpy.error("custom exception message", detail="some info about exception", hint="hint for users") -$$ LANGUAGE plpythonu; +$$ LANGUAGE plpython3u; =# SELECT raise_custom_exception(); ERROR: plpy.Error: custom exception message @@ -1496,6 +1310,17 @@ plpy.execute("UPDATE tbl SET %s = %s WHERE key = %s" % ( + + Python 2 vs. Python 3 + + + PL/Python supports only Python 3. Past versions of + PostgreSQL supported Python 2, using the + plpythonu and plpython2u language + names. + + + Environment Variables diff --git a/doc/src/sgml/ref/comment.sgml b/doc/src/sgml/ref/comment.sgml index b12796095f..23d9029af9 100644 --- a/doc/src/sgml/ref/comment.sgml +++ b/doc/src/sgml/ref/comment.sgml @@ -349,7 +349,7 @@ COMMENT ON TEXT SEARCH CONFIGURATION my_config IS 'Special word filtering'; COMMENT ON TEXT SEARCH DICTIONARY swedish IS 'Snowball stemmer for Swedish language'; COMMENT ON TEXT SEARCH PARSER my_parser IS 'Splits text into words'; COMMENT ON TEXT SEARCH TEMPLATE snowball IS 'Snowball stemmer'; -COMMENT ON TRANSFORM FOR hstore LANGUAGE plpythonu IS 'Transform between hstore and Python dict'; +COMMENT ON TRANSFORM FOR hstore LANGUAGE plpython3u IS 'Transform between hstore and Python dict'; COMMENT ON TRIGGER my_trigger ON my_table IS 'Used for RI'; COMMENT ON TYPE complex IS 'Complex number data type'; COMMENT ON VIEW my_view IS 'View of departmental costs'; diff --git a/doc/src/sgml/ref/create_transform.sgml b/doc/src/sgml/ref/create_transform.sgml index 3f81dc6bba..34bdc60e13 100644 --- a/doc/src/sgml/ref/create_transform.sgml +++ b/doc/src/sgml/ref/create_transform.sgml @@ -156,11 +156,11 @@ CREATE [ OR REPLACE ] TRANSFORM FOR type_name LANGUAG To create a transform for type hstore and language - plpythonu, first set up the type and the language: + plpython3u, first set up the type and the language: CREATE TYPE hstore ...; -CREATE EXTENSION plpythonu; +CREATE EXTENSION plpython3u; Then create the necessary functions: @@ -174,7 +174,7 @@ AS ...; And finally create the transform to connect them all together: -CREATE TRANSFORM FOR hstore LANGUAGE plpythonu ( +CREATE TRANSFORM FOR hstore LANGUAGE plpython3u ( FROM SQL WITH FUNCTION hstore_to_plpython(internal), TO SQL WITH FUNCTION plpython_to_hstore(internal) ); diff --git a/doc/src/sgml/ref/drop_transform.sgml b/doc/src/sgml/ref/drop_transform.sgml index d25cb51604..544a9663d7 100644 --- a/doc/src/sgml/ref/drop_transform.sgml +++ b/doc/src/sgml/ref/drop_transform.sgml @@ -101,9 +101,9 @@ DROP TRANSFORM [ IF EXISTS ] FOR type_name LANGUAGE < To drop the transform for type hstore and language - plpythonu: + plpython3u: -DROP TRANSFORM FOR hstore LANGUAGE plpythonu; +DROP TRANSFORM FOR hstore LANGUAGE plpython3u;
From d3e8368c4b6e5110d8b3d12859850aeaae08dffb Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Tue, 8 Mar 2022 08:08:32 +0530 Subject: [PATCH 100/108] Add the additional information to the logical replication worker errcontext. This commits adds both the finish LSN (commit_lsn in case transaction got committed, prepare_lsn in case of a prepared transaction, etc.) and replication origin name to the existing error context message. This will help users in specifying the origin name and transaction finish LSN to pg_replication_origin_advance() SQL function to skip a particular transaction. Author: Masahiko Sawada Reviewed-by: Takamichi Osumi, Euler Taveira, and Amit Kapila Discussion: https://postgr.es/m/CAD21AoBarBf2oTF71ig2g_o=3Z_Dt6_sOpMQma1kFgbnA5OZ_w@mail.gmail.com --- doc/src/sgml/logical-replication.sgml | 23 ++++++-- src/backend/replication/logical/worker.c | 75 ++++++++++++++++++------ 2 files changed, 75 insertions(+), 23 deletions(-) diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index fb4472356d..82326c3901 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -352,11 +352,26 @@ The resolution can be done either by changing data or permissions on the subscriber so that it does not conflict with the incoming change or by skipping the - transaction that conflicts with the existing data. The transaction can be - skipped by calling the + transaction that conflicts with the existing data. When a conflict produces + an error, the replication won't proceed, and the logical replication worker will + emit the following kind of message to the subscriber's server log: + +ERROR: duplicate key value violates unique constraint "test_pkey" +DETAIL: Key (c)=(1) already exists. +CONTEXT: processing remote data for replication origin "pg_16395" during "INSERT" for replication target relation "public.test" in transaction 725 finished at 0/14C0378 + + The LSN of the transaction that contains the change violating the constraint and + the replication origin name can be found from the server log (LSN 0/14C0378 and + replication origin pg_16395 in the above case). To skip the + transaction, the subscription needs to be disabled temporarily by + ALTER SUBSCRIPTION ... DISABLE first. Then, the transaction + can be skipped by calling the + pg_replication_origin_advance() function with - a node_name corresponding to the subscription name, - and a position. The current position of origins can be seen in the + the node_name (i.e., pg_16395) and the + next LSN of the transaction's LSN (i.e., LSN 0/14C0379). After that the replication + can be resumed by ALTER SUBSCRIPTION ... ENABLE. The current + position of origins can be seen in the pg_replication_origin_status system view. diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 92aa794706..8653e1d840 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -226,6 +226,8 @@ typedef struct ApplyErrorCallbackArg /* Remote node information */ int remote_attnum; /* -1 if invalid */ TransactionId remote_xid; + XLogRecPtr finish_lsn; + char *origin_name; } ApplyErrorCallbackArg; static ApplyErrorCallbackArg apply_error_callback_arg = @@ -234,6 +236,8 @@ static ApplyErrorCallbackArg apply_error_callback_arg = .rel = NULL, .remote_attnum = -1, .remote_xid = InvalidTransactionId, + .finish_lsn = InvalidXLogRecPtr, + .origin_name = NULL, }; static MemoryContext ApplyMessageContext = NULL; @@ -332,7 +336,7 @@ static void apply_spooled_messages(TransactionId xid, XLogRecPtr lsn); /* Functions for apply error callback */ static void apply_error_callback(void *arg); -static inline void set_apply_error_context_xact(TransactionId xid); +static inline void set_apply_error_context_xact(TransactionId xid, XLogRecPtr lsn); static inline void reset_apply_error_context_info(void); /* @@ -785,7 +789,7 @@ apply_handle_begin(StringInfo s) LogicalRepBeginData begin_data; logicalrep_read_begin(s, &begin_data); - set_apply_error_context_xact(begin_data.xid); + set_apply_error_context_xact(begin_data.xid, begin_data.final_lsn); remote_final_lsn = begin_data.final_lsn; @@ -837,7 +841,7 @@ apply_handle_begin_prepare(StringInfo s) errmsg_internal("tablesync worker received a BEGIN PREPARE message"))); logicalrep_read_begin_prepare(s, &begin_data); - set_apply_error_context_xact(begin_data.xid); + set_apply_error_context_xact(begin_data.xid, begin_data.prepare_lsn); remote_final_lsn = begin_data.prepare_lsn; @@ -936,7 +940,7 @@ apply_handle_commit_prepared(StringInfo s) char gid[GIDSIZE]; logicalrep_read_commit_prepared(s, &prepare_data); - set_apply_error_context_xact(prepare_data.xid); + set_apply_error_context_xact(prepare_data.xid, prepare_data.commit_lsn); /* Compute GID for two_phase transactions. */ TwoPhaseTransactionGid(MySubscription->oid, prepare_data.xid, @@ -977,7 +981,7 @@ apply_handle_rollback_prepared(StringInfo s) char gid[GIDSIZE]; logicalrep_read_rollback_prepared(s, &rollback_data); - set_apply_error_context_xact(rollback_data.xid); + set_apply_error_context_xact(rollback_data.xid, rollback_data.rollback_end_lsn); /* Compute GID for two_phase transactions. */ TwoPhaseTransactionGid(MySubscription->oid, rollback_data.xid, @@ -1042,7 +1046,7 @@ apply_handle_stream_prepare(StringInfo s) errmsg_internal("tablesync worker received a STREAM PREPARE message"))); logicalrep_read_stream_prepare(s, &prepare_data); - set_apply_error_context_xact(prepare_data.xid); + set_apply_error_context_xact(prepare_data.xid, prepare_data.prepare_lsn); elog(DEBUG1, "received prepare for streamed transaction %u", prepare_data.xid); @@ -1124,7 +1128,7 @@ apply_handle_stream_start(StringInfo s) (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg_internal("invalid transaction ID in streamed replication transaction"))); - set_apply_error_context_xact(stream_xid); + set_apply_error_context_xact(stream_xid, InvalidXLogRecPtr); /* * Initialize the worker's stream_fileset if we haven't yet. This will be @@ -1213,7 +1217,7 @@ apply_handle_stream_abort(StringInfo s) */ if (xid == subxid) { - set_apply_error_context_xact(xid); + set_apply_error_context_xact(xid, InvalidXLogRecPtr); stream_cleanup_files(MyLogicalRepWorker->subid, xid); } else @@ -1239,7 +1243,7 @@ apply_handle_stream_abort(StringInfo s) bool found = false; char path[MAXPGPATH]; - set_apply_error_context_xact(subxid); + set_apply_error_context_xact(subxid, InvalidXLogRecPtr); subidx = -1; begin_replication_step(); @@ -1424,7 +1428,7 @@ apply_handle_stream_commit(StringInfo s) errmsg_internal("STREAM COMMIT message without STREAM STOP"))); xid = logicalrep_read_stream_commit(s, &commit_data); - set_apply_error_context_xact(xid); + set_apply_error_context_xact(xid, commit_data.commit_lsn); elog(DEBUG1, "received commit for streamed transaction %u", xid); @@ -3499,6 +3503,17 @@ ApplyWorkerMain(Datum main_arg) myslotname = MemoryContextStrdup(ApplyContext, syncslotname); pfree(syncslotname); + + /* + * Allocate the origin name in long-lived context for error context + * message. + */ + ReplicationOriginNameForTablesync(MySubscription->oid, + MyLogicalRepWorker->relid, + originname, + sizeof(originname)); + apply_error_callback_arg.origin_name = MemoryContextStrdup(ApplyContext, + originname); } else { @@ -3542,6 +3557,13 @@ ApplyWorkerMain(Datum main_arg) * does some initializations on the upstream so let's still call it. */ (void) walrcv_identify_system(LogRepWorkerWalRcvConn, &startpointTLI); + + /* + * Allocate the origin name in long-lived context for error context + * message. + */ + apply_error_callback_arg.origin_name = MemoryContextStrdup(ApplyContext, + originname); } /* @@ -3651,36 +3673,51 @@ apply_error_callback(void *arg) if (apply_error_callback_arg.command == 0) return; + Assert(errarg->origin_name); + if (errarg->rel == NULL) { if (!TransactionIdIsValid(errarg->remote_xid)) - errcontext("processing remote data during \"%s\"", + errcontext("processing remote data for replication origin \"%s\" during \"%s\"", + errarg->origin_name, logicalrep_message_type(errarg->command)); - else - errcontext("processing remote data during \"%s\" in transaction %u", + else if (XLogRecPtrIsInvalid(errarg->finish_lsn)) + errcontext("processing remote data for replication origin \"%s\" during \"%s\" in transaction %u", + errarg->origin_name, logicalrep_message_type(errarg->command), errarg->remote_xid); + else + errcontext("processing remote data for replication origin \"%s\" during \"%s\" in transaction %u finished at %X/%X", + errarg->origin_name, + logicalrep_message_type(errarg->command), + errarg->remote_xid, + LSN_FORMAT_ARGS(errarg->finish_lsn)); } else if (errarg->remote_attnum < 0) - errcontext("processing remote data during \"%s\" for replication target relation \"%s.%s\" in transaction %u", + errcontext("processing remote data for replication origin \"%s\" during \"%s\" for replication target relation \"%s.%s\" in transaction %u finished at %X/%X", + errarg->origin_name, logicalrep_message_type(errarg->command), errarg->rel->remoterel.nspname, errarg->rel->remoterel.relname, - errarg->remote_xid); + errarg->remote_xid, + LSN_FORMAT_ARGS(errarg->finish_lsn)); else - errcontext("processing remote data during \"%s\" for replication target relation \"%s.%s\" column \"%s\" in transaction %u", + errcontext("processing remote data for replication origin \"%s\" during \"%s\" for replication target relation \"%s.%s\" column \"%s\" in transaction %u finished at %X/%X", + errarg->origin_name, logicalrep_message_type(errarg->command), errarg->rel->remoterel.nspname, errarg->rel->remoterel.relname, errarg->rel->remoterel.attnames[errarg->remote_attnum], - errarg->remote_xid); + errarg->remote_xid, + LSN_FORMAT_ARGS(errarg->finish_lsn)); } /* Set transaction information of apply error callback */ static inline void -set_apply_error_context_xact(TransactionId xid) +set_apply_error_context_xact(TransactionId xid, XLogRecPtr lsn) { apply_error_callback_arg.remote_xid = xid; + apply_error_callback_arg.finish_lsn = lsn; } /* Reset all information of apply error callback */ @@ -3690,5 +3727,5 @@ reset_apply_error_context_info(void) apply_error_callback_arg.command = 0; apply_error_callback_arg.rel = NULL; apply_error_callback_arg.remote_attnum = -1; - set_apply_error_context_xact(InvalidTransactionId); + set_apply_error_context_xact(InvalidTransactionId, InvalidXLogRecPtr); } From c28839c8326155f25161ed42f23890c997e0b4a4 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 8 Mar 2022 14:29:03 +0900 Subject: [PATCH 101/108] Improve comment in execReplication.c Author: Peter Smith Reviewed-by: Julien Rouhaud Discussion: https://postgr.es/m/CAHut+PuRVf3ghNTg8EV5XOQu6unGSZma0ahsRoz-haaOFZe-1A@mail.gmail.com --- src/backend/executor/execReplication.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index de106d767d..09f78f2244 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -607,7 +607,7 @@ CheckCmdReplicaIdentity(Relation rel, CmdType cmd) return; /* - * This is either UPDATE OR DELETE and there is no replica identity. + * This is UPDATE/DELETE and there is no replica identity. * * Check if the table publishes UPDATES or DELETES. */ From 7cf085f077df8dd9b80cf1f5964b5b8c142be496 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Mon, 7 Mar 2022 15:08:45 -0500 Subject: [PATCH 102/108] Add support for zstd base backup compression. Both client-side compression and server-side compression are now supported for zstd. In addition, a backup compressed by the server using zstd can now be decompressed by the client in order to accommodate the use of -Fp. Jeevan Ladhe, with some edits by me. Discussion: http://postgr.es/m/CA+Tgmobyzfbz=gyze2_LL1ZumZunmaEKbHQxjrFkOR7APZGu-g@mail.gmail.com --- doc/src/sgml/protocol.sgml | 7 +- doc/src/sgml/ref/pg_basebackup.sgml | 41 ++- src/backend/replication/Makefile | 1 + src/backend/replication/basebackup.c | 7 +- src/backend/replication/basebackup_zstd.c | 299 ++++++++++++++++ src/bin/pg_basebackup/Makefile | 1 + src/bin/pg_basebackup/bbstreamer.h | 3 + src/bin/pg_basebackup/bbstreamer_zstd.c | 338 ++++++++++++++++++ src/bin/pg_basebackup/pg_basebackup.c | 49 ++- src/bin/pg_basebackup/pg_receivewal.c | 4 + src/bin/pg_basebackup/walmethods.h | 1 + src/bin/pg_verifybackup/Makefile | 1 + src/bin/pg_verifybackup/t/008_untar.pl | 9 + src/bin/pg_verifybackup/t/009_extract.pl | 5 + src/bin/pg_verifybackup/t/010_client_untar.pl | 8 + src/include/replication/basebackup_sink.h | 1 + src/tools/msvc/Mkvcbuild.pm | 1 + 17 files changed, 750 insertions(+), 26 deletions(-) create mode 100644 src/backend/replication/basebackup_zstd.c create mode 100644 src/bin/pg_basebackup/bbstreamer_zstd.c diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index c51c4254a7..0695bcd423 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -2724,8 +2724,8 @@ The commands accepted in replication mode are: Instructs the server to compress the backup using the specified - method. Currently, the supported methods are gzip - and lz4. + method. Currently, the supported methods are gzip, + lz4, and zstd. @@ -2737,7 +2737,8 @@ The commands accepted in replication mode are: Specifies the compression level to be used. This should only be used in conjunction with the COMPRESSION option. For gzip the value should be an integer between 1 - and 9, and for lz4 it should be between 1 and 12. + and 9, for lz4 between 1 and 12, and for + zstd it should be between 1 and 22. diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml index 53aa40dcd1..4a630b59b7 100644 --- a/doc/src/sgml/ref/pg_basebackup.sgml +++ b/doc/src/sgml/ref/pg_basebackup.sgml @@ -417,30 +417,33 @@ PostgreSQL documentation specify -Xfetch. - The compression method can be set to gzip or - lz4, or none for no - compression. A compression level can be optionally specified, by - appending the level number after a colon (:). If no - level is specified, the default compression level will be used. If - only a level is specified without mentioning an algorithm, - gzip compression will be used if the level is - greater than 0, and no compression will be used if the level is 0. - - - When the tar format is used with gzip or - lz4, the suffix .gz or - .lz4 will automatically be added to all tar - filenames. When the plain format is used, client-side compression may - not be specified, but it is still possible to request server-side - compression. If this is done, the server will compress the backup for - transmission, and the client will decompress and extract it. + The compression method can be set to gzip, + lz4, zstd, or + none for no compression. A compression level can + optionally be specified, by appending the level number after a colon + (:). If no level is specified, the default + compression level will be used. If only a level is specified without + mentioning an algorithm, gzip compression will be + used if the level is greater than 0, and no compression will be used if + the level is 0. + + + When the tar format is used with gzip, + lz4, or zstd, the suffix + .gz, .lz4, or + .zst, respectively, will be automatically added to + all tar filenames. When the plain format is used, client-side + compression may not be specified, but it is still possible to request + server-side compression. If this is done, the server will compress the + backup for transmission, and the client will decompress and extract it. When this option is used in combination with -Xstream, pg_wal.tar will be compressed using gzip if client-side gzip - compression is selected, but will not be compressed if server-side - compresion or LZ4 compresion is selected. + compression is selected, but will not be compressed if any other + compression algorithm is selected, or if server-side compression + is selected. diff --git a/src/backend/replication/Makefile b/src/backend/replication/Makefile index 74043ff331..2e6de7007f 100644 --- a/src/backend/replication/Makefile +++ b/src/backend/replication/Makefile @@ -20,6 +20,7 @@ OBJS = \ basebackup_copy.o \ basebackup_gzip.o \ basebackup_lz4.o \ + basebackup_zstd.o \ basebackup_progress.o \ basebackup_server.o \ basebackup_sink.o \ diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 0bf28b55d7..2378ce5c5e 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -64,7 +64,8 @@ typedef enum { BACKUP_COMPRESSION_NONE, BACKUP_COMPRESSION_GZIP, - BACKUP_COMPRESSION_LZ4 + BACKUP_COMPRESSION_LZ4, + BACKUP_COMPRESSION_ZSTD } basebackup_compression_type; typedef struct @@ -906,6 +907,8 @@ parse_basebackup_options(List *options, basebackup_options *opt) opt->compression = BACKUP_COMPRESSION_GZIP; else if (strcmp(optval, "lz4") == 0) opt->compression = BACKUP_COMPRESSION_LZ4; + else if (strcmp(optval, "zstd") == 0) + opt->compression = BACKUP_COMPRESSION_ZSTD; else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -1026,6 +1029,8 @@ SendBaseBackup(BaseBackupCmd *cmd) sink = bbsink_gzip_new(sink, opt.compression_level); else if (opt.compression == BACKUP_COMPRESSION_LZ4) sink = bbsink_lz4_new(sink, opt.compression_level); + else if (opt.compression == BACKUP_COMPRESSION_ZSTD) + sink = bbsink_zstd_new(sink, opt.compression_level); /* Set up progress reporting. */ sink = bbsink_progress_new(sink, opt.progress); diff --git a/src/backend/replication/basebackup_zstd.c b/src/backend/replication/basebackup_zstd.c new file mode 100644 index 0000000000..e3f9b1d4dc --- /dev/null +++ b/src/backend/replication/basebackup_zstd.c @@ -0,0 +1,299 @@ +/*------------------------------------------------------------------------- + * + * basebackup_zstd.c + * Basebackup sink implementing zstd compression. + * + * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/replication/basebackup_zstd.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#ifdef HAVE_LIBZSTD +#include +#endif + +#include "replication/basebackup_sink.h" + +#ifdef HAVE_LIBZSTD + +typedef struct bbsink_zstd +{ + /* Common information for all types of sink. */ + bbsink base; + + /* Compression level */ + int compresslevel; + + ZSTD_CCtx *cctx; + ZSTD_outBuffer zstd_outBuf; +} bbsink_zstd; + +static void bbsink_zstd_begin_backup(bbsink *sink); +static void bbsink_zstd_begin_archive(bbsink *sink, const char *archive_name); +static void bbsink_zstd_archive_contents(bbsink *sink, size_t avail_in); +static void bbsink_zstd_manifest_contents(bbsink *sink, size_t len); +static void bbsink_zstd_end_archive(bbsink *sink); +static void bbsink_zstd_cleanup(bbsink *sink); +static void bbsink_zstd_end_backup(bbsink *sink, XLogRecPtr endptr, + TimeLineID endtli); + +const bbsink_ops bbsink_zstd_ops = { + .begin_backup = bbsink_zstd_begin_backup, + .begin_archive = bbsink_zstd_begin_archive, + .archive_contents = bbsink_zstd_archive_contents, + .end_archive = bbsink_zstd_end_archive, + .begin_manifest = bbsink_forward_begin_manifest, + .manifest_contents = bbsink_zstd_manifest_contents, + .end_manifest = bbsink_forward_end_manifest, + .end_backup = bbsink_zstd_end_backup, + .cleanup = bbsink_zstd_cleanup +}; +#endif + +/* + * Create a new basebackup sink that performs zstd compression using the + * designated compression level. + */ +bbsink * +bbsink_zstd_new(bbsink *next, int compresslevel) +{ +#ifndef HAVE_LIBZSTD + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("zstd compression is not supported by this build"))); + return NULL; /* keep compiler quiet */ +#else + bbsink_zstd *sink; + + Assert(next != NULL); + + if (compresslevel < 0 || compresslevel > 22) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("zstd compression level %d is out of range", + compresslevel))); + + sink = palloc0(sizeof(bbsink_zstd)); + *((const bbsink_ops **) &sink->base.bbs_ops) = &bbsink_zstd_ops; + sink->base.bbs_next = next; + sink->compresslevel = compresslevel; + + return &sink->base; +#endif +} + +#ifdef HAVE_LIBZSTD + +/* + * Begin backup. + */ +static void +bbsink_zstd_begin_backup(bbsink *sink) +{ + bbsink_zstd *mysink = (bbsink_zstd *) sink; + size_t output_buffer_bound; + + mysink->cctx = ZSTD_createCCtx(); + if (!mysink->cctx) + elog(ERROR, "could not create zstd compression context"); + + ZSTD_CCtx_setParameter(mysink->cctx, ZSTD_c_compressionLevel, + mysink->compresslevel); + + /* + * We need our own buffer, because we're going to pass different data to + * the next sink than what gets passed to us. + */ + mysink->base.bbs_buffer = palloc(mysink->base.bbs_buffer_length); + + /* + * Make sure that the next sink's bbs_buffer is big enough to accommodate + * the compressed input buffer. + */ + output_buffer_bound = ZSTD_compressBound(mysink->base.bbs_buffer_length); + + /* + * The buffer length is expected to be a multiple of BLCKSZ, so round up. + */ + output_buffer_bound = output_buffer_bound + BLCKSZ - + (output_buffer_bound % BLCKSZ); + + bbsink_begin_backup(sink->bbs_next, sink->bbs_state, output_buffer_bound); +} + +/* + * Prepare to compress the next archive. + */ +static void +bbsink_zstd_begin_archive(bbsink *sink, const char *archive_name) +{ + bbsink_zstd *mysink = (bbsink_zstd *) sink; + char *zstd_archive_name; + + /* + * At the start of each archive we reset the state to start a new + * compression operation. The parameters are sticky and they will stick + * around as we are resetting with option ZSTD_reset_session_only. + */ + ZSTD_CCtx_reset(mysink->cctx, ZSTD_reset_session_only); + + mysink->zstd_outBuf.dst = mysink->base.bbs_next->bbs_buffer; + mysink->zstd_outBuf.size = mysink->base.bbs_next->bbs_buffer_length; + mysink->zstd_outBuf.pos = 0; + + /* Add ".zst" to the archive name. */ + zstd_archive_name = psprintf("%s.zst", archive_name); + Assert(sink->bbs_next != NULL); + bbsink_begin_archive(sink->bbs_next, zstd_archive_name); + pfree(zstd_archive_name); +} + +/* + * Compress the input data to the output buffer until we run out of input + * data. Each time the output buffer falls below the compression bound for + * the input buffer, invoke the archive_contents() method for the next sink. + * + * Note that since we're compressing the input, it may very commonly happen + * that we consume all the input data without filling the output buffer. In + * that case, the compressed representation of the current input data won't + * actually be sent to the next bbsink until a later call to this function, + * or perhaps even not until bbsink_zstd_end_archive() is invoked. + */ +static void +bbsink_zstd_archive_contents(bbsink *sink, size_t len) +{ + bbsink_zstd *mysink = (bbsink_zstd *) sink; + ZSTD_inBuffer inBuf = {mysink->base.bbs_buffer, len, 0}; + + while (inBuf.pos < inBuf.size) + { + size_t yet_to_flush; + size_t max_needed = ZSTD_compressBound(inBuf.size - inBuf.pos); + + /* + * If the out buffer is not left with enough space, send the output + * buffer to the next sink, and reset it. + */ + if (mysink->zstd_outBuf.size - mysink->zstd_outBuf.pos < max_needed) + { + bbsink_archive_contents(mysink->base.bbs_next, + mysink->zstd_outBuf.pos); + mysink->zstd_outBuf.dst = mysink->base.bbs_next->bbs_buffer; + mysink->zstd_outBuf.size = + mysink->base.bbs_next->bbs_buffer_length; + mysink->zstd_outBuf.pos = 0; + } + + yet_to_flush = ZSTD_compressStream2(mysink->cctx, &mysink->zstd_outBuf, + &inBuf, ZSTD_e_continue); + + if (ZSTD_isError(yet_to_flush)) + elog(ERROR, + "could not compress data: %s", + ZSTD_getErrorName(yet_to_flush)); + } +} + +/* + * There might be some data inside zstd's internal buffers; we need to get that + * flushed out, also end the zstd frame and then get that forwarded to the + * successor sink as archive content. + * + * Then we can end processing for this archive. + */ +static void +bbsink_zstd_end_archive(bbsink *sink) +{ + bbsink_zstd *mysink = (bbsink_zstd *) sink; + size_t yet_to_flush; + + do + { + ZSTD_inBuffer in = {NULL, 0, 0}; + size_t max_needed = ZSTD_compressBound(0); + + /* + * If the out buffer is not left with enough space, send the output + * buffer to the next sink, and reset it. + */ + if (mysink->zstd_outBuf.size - mysink->zstd_outBuf.pos < max_needed) + { + bbsink_archive_contents(mysink->base.bbs_next, + mysink->zstd_outBuf.pos); + mysink->zstd_outBuf.dst = mysink->base.bbs_next->bbs_buffer; + mysink->zstd_outBuf.size = + mysink->base.bbs_next->bbs_buffer_length; + mysink->zstd_outBuf.pos = 0; + } + + yet_to_flush = ZSTD_compressStream2(mysink->cctx, + &mysink->zstd_outBuf, + &in, ZSTD_e_end); + + if (ZSTD_isError(yet_to_flush)) + elog(ERROR, "could not compress data: %s", + ZSTD_getErrorName(yet_to_flush)); + + } while (yet_to_flush > 0); + + /* Make sure to pass any remaining bytes to the next sink. */ + if (mysink->zstd_outBuf.pos > 0) + bbsink_archive_contents(mysink->base.bbs_next, + mysink->zstd_outBuf.pos); + + /* Pass on the information that this archive has ended. */ + bbsink_forward_end_archive(sink); +} + +/* + * Free the resources and context. + */ +static void +bbsink_zstd_end_backup(bbsink *sink, XLogRecPtr endptr, + TimeLineID endtli) +{ + bbsink_zstd *mysink = (bbsink_zstd *) sink; + + /* Release the context. */ + if (mysink->cctx) + { + ZSTD_freeCCtx(mysink->cctx); + mysink->cctx = NULL; + } + + bbsink_forward_end_backup(sink, endptr, endtli); +} + +/* + * Manifest contents are not compressed, but we do need to copy them into + * the successor sink's buffer, because we have our own. + */ +static void +bbsink_zstd_manifest_contents(bbsink *sink, size_t len) +{ + memcpy(sink->bbs_next->bbs_buffer, sink->bbs_buffer, len); + bbsink_manifest_contents(sink->bbs_next, len); +} + +/* + * In case the backup fails, make sure we free any compression context that + * got allocated, so that we don't leak memory. + */ +static void +bbsink_zstd_cleanup(bbsink *sink) +{ + bbsink_zstd *mysink = (bbsink_zstd *) sink; + + /* Release the context if not already released. */ + if (mysink->cctx) + { + ZSTD_freeCCtx(mysink->cctx); + mysink->cctx = NULL; + } +} + +#endif diff --git a/src/bin/pg_basebackup/Makefile b/src/bin/pg_basebackup/Makefile index 1d0db4f9d0..0035ebcef5 100644 --- a/src/bin/pg_basebackup/Makefile +++ b/src/bin/pg_basebackup/Makefile @@ -44,6 +44,7 @@ BBOBJS = \ bbstreamer_gzip.o \ bbstreamer_inject.o \ bbstreamer_lz4.o \ + bbstreamer_zstd.o \ bbstreamer_tar.o all: pg_basebackup pg_receivewal pg_recvlogical diff --git a/src/bin/pg_basebackup/bbstreamer.h b/src/bin/pg_basebackup/bbstreamer.h index c2de77bacc..02d4c05df6 100644 --- a/src/bin/pg_basebackup/bbstreamer.h +++ b/src/bin/pg_basebackup/bbstreamer.h @@ -209,6 +209,9 @@ extern bbstreamer *bbstreamer_gzip_decompressor_new(bbstreamer *next); extern bbstreamer *bbstreamer_lz4_compressor_new(bbstreamer *next, int compresslevel); extern bbstreamer *bbstreamer_lz4_decompressor_new(bbstreamer *next); +extern bbstreamer *bbstreamer_zstd_compressor_new(bbstreamer *next, + int compresslevel); +extern bbstreamer *bbstreamer_zstd_decompressor_new(bbstreamer *next); extern bbstreamer *bbstreamer_tar_parser_new(bbstreamer *next); extern bbstreamer *bbstreamer_tar_terminator_new(bbstreamer *next); extern bbstreamer *bbstreamer_tar_archiver_new(bbstreamer *next); diff --git a/src/bin/pg_basebackup/bbstreamer_zstd.c b/src/bin/pg_basebackup/bbstreamer_zstd.c new file mode 100644 index 0000000000..cc68367dd5 --- /dev/null +++ b/src/bin/pg_basebackup/bbstreamer_zstd.c @@ -0,0 +1,338 @@ +/*------------------------------------------------------------------------- + * + * bbstreamer_zstd.c + * + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/bbstreamer_zstd.c + *------------------------------------------------------------------------- + */ + +#include "postgres_fe.h" + +#include + +#ifdef HAVE_LIBZSTD +#include +#endif + +#include "bbstreamer.h" +#include "common/logging.h" + +#ifdef HAVE_LIBZSTD + +typedef struct bbstreamer_zstd_frame +{ + bbstreamer base; + + ZSTD_CCtx *cctx; + ZSTD_DCtx *dctx; + ZSTD_outBuffer zstd_outBuf; +} bbstreamer_zstd_frame; + +static void bbstreamer_zstd_compressor_content(bbstreamer *streamer, + bbstreamer_member *member, + const char *data, int len, + bbstreamer_archive_context context); +static void bbstreamer_zstd_compressor_finalize(bbstreamer *streamer); +static void bbstreamer_zstd_compressor_free(bbstreamer *streamer); + +const bbstreamer_ops bbstreamer_zstd_compressor_ops = { + .content = bbstreamer_zstd_compressor_content, + .finalize = bbstreamer_zstd_compressor_finalize, + .free = bbstreamer_zstd_compressor_free +}; + +static void bbstreamer_zstd_decompressor_content(bbstreamer *streamer, + bbstreamer_member *member, + const char *data, int len, + bbstreamer_archive_context context); +static void bbstreamer_zstd_decompressor_finalize(bbstreamer *streamer); +static void bbstreamer_zstd_decompressor_free(bbstreamer *streamer); + +const bbstreamer_ops bbstreamer_zstd_decompressor_ops = { + .content = bbstreamer_zstd_decompressor_content, + .finalize = bbstreamer_zstd_decompressor_finalize, + .free = bbstreamer_zstd_decompressor_free +}; +#endif + +/* + * Create a new base backup streamer that performs zstd compression of tar + * blocks. + */ +bbstreamer * +bbstreamer_zstd_compressor_new(bbstreamer *next, int compresslevel) +{ +#ifdef HAVE_LIBZSTD + bbstreamer_zstd_frame *streamer; + + Assert(next != NULL); + + streamer = palloc0(sizeof(bbstreamer_zstd_frame)); + + *((const bbstreamer_ops **) &streamer->base.bbs_ops) = + &bbstreamer_zstd_compressor_ops; + + streamer->base.bbs_next = next; + initStringInfo(&streamer->base.bbs_buffer); + enlargeStringInfo(&streamer->base.bbs_buffer, ZSTD_DStreamOutSize()); + + streamer->cctx = ZSTD_createCCtx(); + if (!streamer->cctx) + pg_log_error("could not create zstd compression context"); + + /* Initialize stream compression preferences */ + ZSTD_CCtx_setParameter(streamer->cctx, ZSTD_c_compressionLevel, + compresslevel); + + /* Initialize the ZSTD output buffer. */ + streamer->zstd_outBuf.dst = streamer->base.bbs_buffer.data; + streamer->zstd_outBuf.size = streamer->base.bbs_buffer.maxlen; + streamer->zstd_outBuf.pos = 0; + + return &streamer->base; +#else + pg_log_error("this build does not support zstd compression"); + exit(1); +#endif +} + +#ifdef HAVE_LIBZSTD +/* + * Compress the input data to output buffer. + * + * Find out the compression bound based on input data length for each + * invocation to make sure that output buffer has enough capacity to + * accommodate the compressed data. In case if the output buffer + * capacity falls short of compression bound then forward the content + * of output buffer to next streamer and empty the buffer. + */ +static void +bbstreamer_zstd_compressor_content(bbstreamer *streamer, + bbstreamer_member *member, + const char *data, int len, + bbstreamer_archive_context context) +{ + bbstreamer_zstd_frame *mystreamer = (bbstreamer_zstd_frame *) streamer; + ZSTD_inBuffer inBuf = {data, len, 0}; + + while (inBuf.pos < inBuf.size) + { + size_t yet_to_flush; + size_t max_needed = ZSTD_compressBound(inBuf.size - inBuf.pos); + + /* + * If the output buffer is not left with enough space, send the + * compressed bytes to the next streamer, and empty the buffer. + */ + if (mystreamer->zstd_outBuf.size - mystreamer->zstd_outBuf.pos < + max_needed) + { + bbstreamer_content(mystreamer->base.bbs_next, member, + mystreamer->zstd_outBuf.dst, + mystreamer->zstd_outBuf.pos, + context); + + /* Reset the ZSTD output buffer. */ + mystreamer->zstd_outBuf.dst = mystreamer->base.bbs_buffer.data; + mystreamer->zstd_outBuf.size = mystreamer->base.bbs_buffer.maxlen; + mystreamer->zstd_outBuf.pos = 0; + } + + yet_to_flush = + ZSTD_compressStream2(mystreamer->cctx, &mystreamer->zstd_outBuf, + &inBuf, ZSTD_e_continue); + + if (ZSTD_isError(yet_to_flush)) + pg_log_error("could not compress data: %s", + ZSTD_getErrorName(yet_to_flush)); + } +} + +/* + * End-of-stream processing. + */ +static void +bbstreamer_zstd_compressor_finalize(bbstreamer *streamer) +{ + bbstreamer_zstd_frame *mystreamer = (bbstreamer_zstd_frame *) streamer; + size_t yet_to_flush; + + do + { + ZSTD_inBuffer in = {NULL, 0, 0}; + size_t max_needed = ZSTD_compressBound(0); + + /* + * If the output buffer is not left with enough space, send the + * compressed bytes to the next streamer, and empty the buffer. + */ + if (mystreamer->zstd_outBuf.size - mystreamer->zstd_outBuf.pos < + max_needed) + { + bbstreamer_content(mystreamer->base.bbs_next, NULL, + mystreamer->zstd_outBuf.dst, + mystreamer->zstd_outBuf.pos, + BBSTREAMER_UNKNOWN); + + /* Reset the ZSTD output buffer. */ + mystreamer->zstd_outBuf.dst = mystreamer->base.bbs_buffer.data; + mystreamer->zstd_outBuf.size = mystreamer->base.bbs_buffer.maxlen; + mystreamer->zstd_outBuf.pos = 0; + } + + yet_to_flush = ZSTD_compressStream2(mystreamer->cctx, + &mystreamer->zstd_outBuf, + &in, ZSTD_e_end); + + if (ZSTD_isError(yet_to_flush)) + pg_log_error("could not compress data: %s", + ZSTD_getErrorName(yet_to_flush)); + + } while (yet_to_flush > 0); + + /* Make sure to pass any remaining bytes to the next streamer. */ + if (mystreamer->zstd_outBuf.pos > 0) + bbstreamer_content(mystreamer->base.bbs_next, NULL, + mystreamer->zstd_outBuf.dst, + mystreamer->zstd_outBuf.pos, + BBSTREAMER_UNKNOWN); + + bbstreamer_finalize(mystreamer->base.bbs_next); +} + +/* + * Free memory. + */ +static void +bbstreamer_zstd_compressor_free(bbstreamer *streamer) +{ + bbstreamer_zstd_frame *mystreamer = (bbstreamer_zstd_frame *) streamer; + + bbstreamer_free(streamer->bbs_next); + ZSTD_freeCCtx(mystreamer->cctx); + pfree(streamer->bbs_buffer.data); + pfree(streamer); +} +#endif + +/* + * Create a new base backup streamer that performs decompression of zstd + * compressed blocks. + */ +bbstreamer * +bbstreamer_zstd_decompressor_new(bbstreamer *next) +{ +#ifdef HAVE_LIBZSTD + bbstreamer_zstd_frame *streamer; + + Assert(next != NULL); + + streamer = palloc0(sizeof(bbstreamer_zstd_frame)); + *((const bbstreamer_ops **) &streamer->base.bbs_ops) = + &bbstreamer_zstd_decompressor_ops; + + streamer->base.bbs_next = next; + initStringInfo(&streamer->base.bbs_buffer); + enlargeStringInfo(&streamer->base.bbs_buffer, ZSTD_DStreamOutSize()); + + streamer->dctx = ZSTD_createDCtx(); + if (!streamer->dctx) + { + pg_log_error("could not create zstd decompression context"); + exit(1); + } + + /* Initialize the ZSTD output buffer. */ + streamer->zstd_outBuf.dst = streamer->base.bbs_buffer.data; + streamer->zstd_outBuf.size = streamer->base.bbs_buffer.maxlen; + streamer->zstd_outBuf.pos = 0; + + return &streamer->base; +#else + pg_log_error("this build does not support compression"); + exit(1); +#endif +} + +#ifdef HAVE_LIBZSTD +/* + * Decompress the input data to output buffer until we run out of input + * data. Each time the output buffer is full, pass on the decompressed data + * to the next streamer. + */ +static void +bbstreamer_zstd_decompressor_content(bbstreamer *streamer, + bbstreamer_member *member, + const char *data, int len, + bbstreamer_archive_context context) +{ + bbstreamer_zstd_frame *mystreamer = (bbstreamer_zstd_frame *) streamer; + ZSTD_inBuffer inBuf = {data, len, 0}; + + while (inBuf.pos < inBuf.size) + { + size_t ret; + + /* + * If output buffer is full then forward the content to next streamer + * and update the output buffer. + */ + if (mystreamer->zstd_outBuf.pos >= mystreamer->zstd_outBuf.size) + { + bbstreamer_content(mystreamer->base.bbs_next, member, + mystreamer->zstd_outBuf.dst, + mystreamer->zstd_outBuf.pos, + context); + + /* Reset the ZSTD output buffer. */ + mystreamer->zstd_outBuf.dst = mystreamer->base.bbs_buffer.data; + mystreamer->zstd_outBuf.size = mystreamer->base.bbs_buffer.maxlen; + mystreamer->zstd_outBuf.pos = 0; + } + + ret = ZSTD_decompressStream(mystreamer->dctx, + &mystreamer->zstd_outBuf, &inBuf); + + if (ZSTD_isError(ret)) + pg_log_error("could not decompress data: %s", ZSTD_getErrorName(ret)); + } +} + +/* + * End-of-stream processing. + */ +static void +bbstreamer_zstd_decompressor_finalize(bbstreamer *streamer) +{ + bbstreamer_zstd_frame *mystreamer = (bbstreamer_zstd_frame *) streamer; + + /* + * End of the stream, if there is some pending data in output buffers then + * we must forward it to next streamer. + */ + if (mystreamer->zstd_outBuf.pos > 0) + bbstreamer_content(mystreamer->base.bbs_next, NULL, + mystreamer->base.bbs_buffer.data, + mystreamer->base.bbs_buffer.maxlen, + BBSTREAMER_UNKNOWN); + + bbstreamer_finalize(mystreamer->base.bbs_next); +} + +/* + * Free memory. + */ +static void +bbstreamer_zstd_decompressor_free(bbstreamer *streamer) +{ + bbstreamer_zstd_frame *mystreamer = (bbstreamer_zstd_frame *) streamer; + + bbstreamer_free(streamer->bbs_next); + ZSTD_freeDCtx(mystreamer->dctx); + pfree(streamer->bbs_buffer.data); + pfree(streamer); +} +#endif diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index c1ed7aeeee..9f3ecc60fb 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -405,8 +405,9 @@ usage(void) printf(_(" -X, --wal-method=none|fetch|stream\n" " include required WAL files with specified method\n")); printf(_(" -z, --gzip compress tar output\n")); - printf(_(" -Z, --compress={[{client,server}-]gzip,lz4,none}[:LEVEL] or [LEVEL]\n" + printf(_(" -Z, --compress=[{client|server}-]{gzip|lz4|zstd}[:LEVEL]\n" " compress tar output with given compression method or level\n")); + printf(_(" -Z, --compress=none do not compress tar output\n")); printf(_("\nGeneral options:\n")); printf(_(" -c, --checkpoint=fast|spread\n" " set fast or spread checkpointing\n")); @@ -1067,6 +1068,21 @@ parse_compress_options(char *src, WalCompressionMethod *methodres, *methodres = COMPRESSION_LZ4; *locationres = COMPRESS_LOCATION_SERVER; } + else if (pg_strcasecmp(firstpart, "zstd") == 0) + { + *methodres = COMPRESSION_ZSTD; + *locationres = COMPRESS_LOCATION_UNSPECIFIED; + } + else if (pg_strcasecmp(firstpart, "client-zstd") == 0) + { + *methodres = COMPRESSION_ZSTD; + *locationres = COMPRESS_LOCATION_CLIENT; + } + else if (pg_strcasecmp(firstpart, "server-zstd") == 0) + { + *methodres = COMPRESSION_ZSTD; + *locationres = COMPRESS_LOCATION_SERVER; + } else if (pg_strcasecmp(firstpart, "none") == 0) { *methodres = COMPRESSION_NONE; @@ -1191,7 +1207,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation, bool inject_manifest; bool is_tar, is_tar_gz, - is_tar_lz4; + is_tar_lz4, + is_tar_zstd; bool must_parse_archive; int archive_name_len = strlen(archive_name); @@ -1214,6 +1231,10 @@ CreateBackupStreamer(char *archive_name, char *spclocation, is_tar_lz4 = (archive_name_len > 8 && strcmp(archive_name + archive_name_len - 4, ".lz4") == 0); + /* Is this a ZSTD archive? */ + is_tar_zstd = (archive_name_len > 8 && + strcmp(archive_name + archive_name_len - 4, ".zst") == 0); + /* * We have to parse the archive if (1) we're suppose to extract it, or if * (2) we need to inject backup_manifest or recovery configuration into it. @@ -1223,7 +1244,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation, (spclocation == NULL && writerecoveryconf)); /* At present, we only know how to parse tar archives. */ - if (must_parse_archive && !is_tar && !is_tar_gz && !is_tar_lz4) + if (must_parse_archive && !is_tar && !is_tar_gz && !is_tar_lz4 + && !is_tar_zstd) { pg_log_error("unable to parse archive: %s", archive_name); pg_log_info("only tar archives can be parsed"); @@ -1295,6 +1317,14 @@ CreateBackupStreamer(char *archive_name, char *spclocation, streamer = bbstreamer_lz4_compressor_new(streamer, compresslevel); } + else if (compressmethod == COMPRESSION_ZSTD) + { + strlcat(archive_filename, ".zst", sizeof(archive_filename)); + streamer = bbstreamer_plain_writer_new(archive_filename, + archive_file); + streamer = bbstreamer_zstd_compressor_new(streamer, + compresslevel); + } else { Assert(false); /* not reachable */ @@ -1353,6 +1383,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation, streamer = bbstreamer_gzip_decompressor_new(streamer); else if (compressmethod == COMPRESSION_LZ4) streamer = bbstreamer_lz4_decompressor_new(streamer); + else if (compressmethod == COMPRESSION_ZSTD) + streamer = bbstreamer_zstd_decompressor_new(streamer); } /* Return the results. */ @@ -2020,6 +2052,9 @@ BaseBackup(void) case COMPRESSION_LZ4: compressmethodstr = "lz4"; break; + case COMPRESSION_ZSTD: + compressmethodstr = "zstd"; + break; default: Assert(false); break; @@ -2869,6 +2904,14 @@ main(int argc, char **argv) exit(1); } break; + case COMPRESSION_ZSTD: + if (compresslevel > 22) + { + pg_log_error("compression level %d of method %s higher than maximum of 22", + compresslevel, "zstd"); + exit(1); + } + break; } /* diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c index ce661a9ce4..8a4c2b8964 100644 --- a/src/bin/pg_basebackup/pg_receivewal.c +++ b/src/bin/pg_basebackup/pg_receivewal.c @@ -904,6 +904,10 @@ main(int argc, char **argv) exit(1); #endif break; + case COMPRESSION_ZSTD: + pg_log_error("compression with %s is not yet supported", "ZSTD"); + exit(1); + } diff --git a/src/bin/pg_basebackup/walmethods.h b/src/bin/pg_basebackup/walmethods.h index 2dfb353baa..ec54019cfc 100644 --- a/src/bin/pg_basebackup/walmethods.h +++ b/src/bin/pg_basebackup/walmethods.h @@ -24,6 +24,7 @@ typedef enum { COMPRESSION_GZIP, COMPRESSION_LZ4, + COMPRESSION_ZSTD, COMPRESSION_NONE } WalCompressionMethod; diff --git a/src/bin/pg_verifybackup/Makefile b/src/bin/pg_verifybackup/Makefile index 851233a6e0..596df15118 100644 --- a/src/bin/pg_verifybackup/Makefile +++ b/src/bin/pg_verifybackup/Makefile @@ -10,6 +10,7 @@ export TAR # name. export GZIP_PROGRAM=$(GZIP) export LZ4=$(LZ4) +export ZSTD=$(ZSTD) subdir = src/bin/pg_verifybackup top_builddir = ../../.. diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl index 383203d0b8..efbc910dfb 100644 --- a/src/bin/pg_verifybackup/t/008_untar.pl +++ b/src/bin/pg_verifybackup/t/008_untar.pl @@ -42,6 +42,14 @@ 'decompress_program' => $ENV{'LZ4'}, 'decompress_flags' => [ '-d', '-m'], 'enabled' => check_pg_config("#define HAVE_LIBLZ4 1") + }, + { + 'compression_method' => 'zstd', + 'backup_flags' => ['--compress', 'server-zstd'], + 'backup_archive' => 'base.tar.zst', + 'decompress_program' => $ENV{'ZSTD'}, + 'decompress_flags' => [ '-d' ], + 'enabled' => check_pg_config("#define HAVE_LIBZSTD 1") } ); @@ -107,6 +115,7 @@ # Cleanup. unlink($backup_path . '/backup_manifest'); unlink($backup_path . '/base.tar'); + unlink($backup_path . '/' . $tc->{'backup_archive'}); rmtree($extract_path); } } diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl index c51cdf79f8..d30ba01742 100644 --- a/src/bin/pg_verifybackup/t/009_extract.pl +++ b/src/bin/pg_verifybackup/t/009_extract.pl @@ -31,6 +31,11 @@ 'compression_method' => 'lz4', 'backup_flags' => ['--compress', 'server-lz4:5'], 'enabled' => check_pg_config("#define HAVE_LIBLZ4 1") + }, + { + 'compression_method' => 'zstd', + 'backup_flags' => ['--compress', 'server-zstd:5'], + 'enabled' => check_pg_config("#define HAVE_LIBZSTD 1") } ); diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl index 3616529390..c2a6161be6 100644 --- a/src/bin/pg_verifybackup/t/010_client_untar.pl +++ b/src/bin/pg_verifybackup/t/010_client_untar.pl @@ -42,6 +42,14 @@ 'decompress_flags' => [ '-d' ], 'output_file' => 'base.tar', 'enabled' => check_pg_config("#define HAVE_LIBLZ4 1") + }, + { + 'compression_method' => 'zstd', + 'backup_flags' => ['--compress', 'client-zstd:5'], + 'backup_archive' => 'base.tar.zst', + 'decompress_program' => $ENV{'ZSTD'}, + 'decompress_flags' => [ '-d' ], + 'enabled' => check_pg_config("#define HAVE_LIBZSTD 1") } ); diff --git a/src/include/replication/basebackup_sink.h b/src/include/replication/basebackup_sink.h index a3f8d37258..a7f16758a4 100644 --- a/src/include/replication/basebackup_sink.h +++ b/src/include/replication/basebackup_sink.h @@ -285,6 +285,7 @@ extern void bbsink_forward_cleanup(bbsink *sink); extern bbsink *bbsink_copystream_new(bool send_to_client); extern bbsink *bbsink_gzip_new(bbsink *next, int compresslevel); extern bbsink *bbsink_lz4_new(bbsink *next, int compresslevel); +extern bbsink *bbsink_zstd_new(bbsink *next, int compresslevel); extern bbsink *bbsink_progress_new(bbsink *next, bool estimate_backup_size); extern bbsink *bbsink_server_new(bbsink *next, char *pathname); extern bbsink *bbsink_throttle_new(bbsink *next, uint32 maxrate); diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 105f5c72a2..441d6ae6bf 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -380,6 +380,7 @@ sub mkvcbuild $pgbasebackup->AddFile('src/bin/pg_basebackup/bbstreamer_gzip.c'); $pgbasebackup->AddFile('src/bin/pg_basebackup/bbstreamer_inject.c'); $pgbasebackup->AddFile('src/bin/pg_basebackup/bbstreamer_lz4.c'); + $pgbasebackup->AddFile('src/bin/pg_basebackup/bbstreamer_zstd.c'); $pgbasebackup->AddFile('src/bin/pg_basebackup/bbstreamer_tar.c'); $pgbasebackup->AddLibrary('ws2_32.lib'); From 1d4be6be65ab18aa3b240d9bc912ebece255c53b Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 8 Mar 2022 10:05:55 -0500 Subject: [PATCH 103/108] Fix LZ4 tests for remaining buffer space. We should flush the buffer when the remaining space is less than the maximum amount that we might need, not when it is less than or equal to the maximum amount we might need. Jeevan Ladhe, per an observation from me. Discussion: http://postgr.es/m/CANm22CgVMa85O1akgs+DOPE8NSrT1zbz5_vYfS83_r+6nCivLQ@mail.gmail.com --- src/backend/replication/basebackup_lz4.c | 4 ++-- src/bin/pg_basebackup/bbstreamer_lz4.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/backend/replication/basebackup_lz4.c b/src/backend/replication/basebackup_lz4.c index d26032783c..472b620d7c 100644 --- a/src/backend/replication/basebackup_lz4.c +++ b/src/backend/replication/basebackup_lz4.c @@ -193,7 +193,7 @@ bbsink_lz4_archive_contents(bbsink *sink, size_t avail_in) * LZ4F_compressBound(), ask the next sink to process the data so that we * can empty the buffer. */ - if ((mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written) <= + if ((mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written) < avail_in_bound) { bbsink_archive_contents(sink->bbs_next, mysink->bytes_written); @@ -238,7 +238,7 @@ bbsink_lz4_end_archive(bbsink *sink) Assert(mysink->base.bbs_next->bbs_buffer_length >= lz4_footer_bound); - if ((mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written) <= + if ((mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written) < lz4_footer_bound) { bbsink_archive_contents(sink->bbs_next, mysink->bytes_written); diff --git a/src/bin/pg_basebackup/bbstreamer_lz4.c b/src/bin/pg_basebackup/bbstreamer_lz4.c index f0bc226bf8..bde018246f 100644 --- a/src/bin/pg_basebackup/bbstreamer_lz4.c +++ b/src/bin/pg_basebackup/bbstreamer_lz4.c @@ -99,7 +99,7 @@ bbstreamer_lz4_compressor_new(bbstreamer *next, int compresslevel) compressed_bound = LZ4F_compressBound(streamer->base.bbs_buffer.maxlen, prefs); /* Enlarge buffer if it falls short of compression bound. */ - if (streamer->base.bbs_buffer.maxlen <= compressed_bound) + if (streamer->base.bbs_buffer.maxlen < compressed_bound) enlargeStringInfo(&streamer->base.bbs_buffer, compressed_bound); ctxError = LZ4F_createCompressionContext(&streamer->cctx, LZ4F_VERSION); @@ -170,7 +170,7 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer, */ out_bound = LZ4F_compressBound(len, &mystreamer->prefs); Assert(mystreamer->base.bbs_buffer.maxlen >= out_bound); - if (avail_out <= out_bound) + if (avail_out < out_bound) { bbstreamer_content(mystreamer->base.bbs_next, member, mystreamer->base.bbs_buffer.data, @@ -218,7 +218,7 @@ bbstreamer_lz4_compressor_finalize(bbstreamer *streamer) /* Find out the footer bound and update the output buffer. */ footer_bound = LZ4F_compressBound(0, &mystreamer->prefs); Assert(mystreamer->base.bbs_buffer.maxlen >= footer_bound); - if ((mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written) <= + if ((mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written) < footer_bound) { bbstreamer_content(mystreamer->base.bbs_next, NULL, From 54c72eb5e5e63f99f68c054900424724b0570b20 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Tue, 8 Mar 2022 09:47:34 -0800 Subject: [PATCH 104/108] plpython: add missing plpython.h include to plpy_plpymodule.h The include was missing before 9b7e24a2cb3, but starting with that commit the missing include causes cpluspluscheck to fail because the use of PyMODINIT_FUNC isn't incidentally protected by an ifdef anymore. Discussion: https://postgr.es/m/20220308045916.7baapelbgftoqeop@alap3.anarazel.de --- src/pl/plpython/plpy_plpymodule.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pl/plpython/plpy_plpymodule.h b/src/pl/plpython/plpy_plpymodule.h index ad6436aca7..1ca3823daf 100644 --- a/src/pl/plpython/plpy_plpymodule.h +++ b/src/pl/plpython/plpy_plpymodule.h @@ -5,6 +5,7 @@ #ifndef PLPY_PLPYMODULE_H #define PLPY_PLPYMODULE_H +#include "plpython.h" #include "utils/hsearch.h" /* A hash table mapping sqlstates to exceptions, for speedy lookup */ From a180c2b34de0989269fdb819bff241a249bf5380 Mon Sep 17 00:00:00 2001 From: Tomas Vondra Date: Tue, 8 Mar 2022 18:54:37 +0100 Subject: [PATCH 105/108] Stabilize test_decoding touching with sequences Some of the test_decoding regression tests are unstable due to modifying a sequence. The first increment of a sequence after a checkpoint is always logged (and thus decoded), which makes the output unpredictable. The runs are usually much shorter than a checkpoint internal, so these failures are rare, but we've seen a couple of them on animals that are either slow or are running with valgrind/clobber cache/... Fixed by skipping sequence decoding in most tests, with the exception of the test aimed at testing decoding of sequences. Reported-by: Amita Kapila Discussion: https://postgr.es/m/d045f3c2-6cfb-06d3-5540-e63c320df8bc@enterprisedb.com --- contrib/test_decoding/expected/ddl.out | 26 +++++++++---------- .../expected/decoding_in_xact.out | 2 +- contrib/test_decoding/expected/replorigin.out | 8 +++--- contrib/test_decoding/expected/rewrite.out | 2 +- contrib/test_decoding/expected/slot.out | 6 ++--- contrib/test_decoding/sql/ddl.sql | 26 +++++++++---------- .../test_decoding/sql/decoding_in_xact.sql | 2 +- contrib/test_decoding/sql/replorigin.sql | 8 +++--- contrib/test_decoding/sql/rewrite.sql | 2 +- contrib/test_decoding/sql/slot.sql | 6 ++--- 10 files changed, 44 insertions(+), 44 deletions(-) diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out index 82898201ca..8bb52b559f 100644 --- a/contrib/test_decoding/expected/ddl.out +++ b/contrib/test_decoding/expected/ddl.out @@ -40,7 +40,7 @@ SELECT 'init' FROM pg_create_physical_replication_slot('repl'); init (1 row) -SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); ERROR: cannot use physical replication slot for logical decoding SELECT pg_drop_replication_slot('repl'); pg_drop_replication_slot @@ -118,7 +118,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); -- check that this doesn't produce any changes from the heap rewrite -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); count ------- 0 @@ -134,7 +134,7 @@ INSERT INTO replication_example(somedata, somenum, zaphod2) VALUES (6, 3, 1); INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2); COMMIT; -- show changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ------------------------------------------------------------------------------------------------------------------------------------------ BEGIN @@ -154,7 +154,7 @@ INSERT INTO replication_example(id, somedata, somenum) SELECT i, i, i FROM gener ON CONFLICT (id) DO UPDATE SET somenum = excluded.somenum + 1; COMMIT; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data -------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN @@ -211,7 +211,7 @@ INSERT INTO tr_pkey(data) VALUES(1); --show deletion with primary key DELETE FROM tr_pkey; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ---------------------------------------------------------------------------- BEGIN @@ -264,7 +264,7 @@ DELETE FROM spoolme; DROP TABLE spoolme; COMMIT; SELECT data -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0') WHERE data ~ 'UPDATE'; data ------------------------------------------------------------------------------------------------------------- @@ -278,7 +278,7 @@ INSERT INTO tr_etoomuch (id, data) SELECT g.i, -g.i FROM generate_series(8000, 12000) g(i) ON CONFLICT(id) DO UPDATE SET data = EXCLUDED.data; SELECT substring(data, 1, 29), count(*) -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') WITH ORDINALITY +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0') WITH ORDINALITY GROUP BY 1 ORDER BY min(ordinality); substring | count @@ -348,7 +348,7 @@ INSERT INTO tr_sub(path) VALUES ('2-top-1...--#3'); RELEASE SAVEPOINT subtop; INSERT INTO tr_sub(path) VALUES ('2-top-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ------------------------------------------------------------------------ BEGIN @@ -369,7 +369,7 @@ INSERT INTO tr_sub(path) VALUES ('3-top-2-2-#1'); ROLLBACK TO SAVEPOINT b; INSERT INTO tr_sub(path) VALUES ('3-top-2-#2'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ----------------------------------------------------------------------- BEGIN @@ -398,7 +398,7 @@ BEGIN; SAVEPOINT a; INSERT INTO tr_sub(path) VALUES ('5-top-1-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data --------------------------------------------------------------------- BEGIN @@ -419,7 +419,7 @@ ROLLBACK TO SAVEPOINT a; ALTER TABLE tr_sub_ddl ALTER COLUMN data TYPE bigint; INSERT INTO tr_sub_ddl VALUES(43); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data -------------------------------------------------- BEGIN @@ -768,7 +768,7 @@ UPDATE toasttable WHERE id = 1; -- make sure we decode correctly even if the toast table is gone DROP TABLE toasttable; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN @@ -780,7 +780,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc (6 rows) -- done, free logical replication slot -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ------ (0 rows) diff --git a/contrib/test_decoding/expected/decoding_in_xact.out b/contrib/test_decoding/expected/decoding_in_xact.out index 6e97b6e34b..0816c780fe 100644 --- a/contrib/test_decoding/expected/decoding_in_xact.out +++ b/contrib/test_decoding/expected/decoding_in_xact.out @@ -68,7 +68,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc COMMIT; INSERT INTO nobarf(data) VALUES('3'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ----------------------------------------------------------- BEGIN diff --git a/contrib/test_decoding/expected/replorigin.out b/contrib/test_decoding/expected/replorigin.out index fb96aa3172..7468c24f2b 100644 --- a/contrib/test_decoding/expected/replorigin.out +++ b/contrib/test_decoding/expected/replorigin.out @@ -110,7 +110,7 @@ SELECT pg_replication_origin_xact_setup('0/aabbccdd', '2013-01-01 00:00'); (1 row) INSERT INTO target_tbl(data) -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1', 'include-sequences', '0'); COMMIT; -- check replication progress for the session is correct SELECT pg_replication_origin_session_progress(false); @@ -154,14 +154,14 @@ SELECT pg_replication_origin_progress('regress_test_decoding: regression_slot', SELECT pg_replication_origin_session_reset(); ERROR: no replication origin is configured -- and magically the replayed xact will be filtered! -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1', 'include-sequences', '0'); data ------ (0 rows) --but new original changes still show up INSERT INTO origin_tbl(data) VALUES ('will be replicated'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1', 'include-sequences', '0'); data -------------------------------------------------------------------------------- BEGIN @@ -227,7 +227,7 @@ SELECT local_id, external_id, 1 | regress_test_decoding: regression_slot_no_lsn | f | t (1 row) -SELECT data FROM pg_logical_slot_get_changes('regression_slot_no_lsn', NULL, NULL, 'skip-empty-xacts', '1', 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot_no_lsn', NULL, NULL, 'skip-empty-xacts', '1', 'include-xids', '0', 'include-sequences', '0'); data ------------------------------------------------------------------------------------- BEGIN diff --git a/contrib/test_decoding/expected/rewrite.out b/contrib/test_decoding/expected/rewrite.out index 0b5eade41f..5d15b192ed 100644 --- a/contrib/test_decoding/expected/rewrite.out +++ b/contrib/test_decoding/expected/rewrite.out @@ -115,7 +115,7 @@ INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (7, 5 COMMIT; -- make old files go away CHECKPOINT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN diff --git a/contrib/test_decoding/expected/slot.out b/contrib/test_decoding/expected/slot.out index 93d7b95d47..de59d544ae 100644 --- a/contrib/test_decoding/expected/slot.out +++ b/contrib/test_decoding/expected/slot.out @@ -107,7 +107,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'in COMMIT (7 rows) -SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data --------------------------------------------------------------------------------------------------------- BEGIN @@ -132,7 +132,7 @@ SELECT :'wal_lsn' = :'end_lsn'; t (1 row) -SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data --------------------------------------------------------------------------------------------------------- BEGIN @@ -140,7 +140,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'in COMMIT (3 rows) -SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); data ------ (0 rows) diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql index f677460d34..ea406b1303 100644 --- a/contrib/test_decoding/sql/ddl.sql +++ b/contrib/test_decoding/sql/ddl.sql @@ -19,7 +19,7 @@ SELECT pg_drop_replication_slot('regression_slot'); -- check that we're detecting a streaming rep slot used for logical decoding SELECT 'init' FROM pg_create_physical_replication_slot('repl'); -SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); SELECT pg_drop_replication_slot('repl'); @@ -68,7 +68,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); -- check that this doesn't produce any changes from the heap rewrite -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); INSERT INTO replication_example(somedata, somenum) VALUES (5, 1); @@ -82,7 +82,7 @@ INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2); COMMIT; -- show changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); -- ON CONFLICT DO UPDATE support BEGIN; @@ -91,7 +91,7 @@ INSERT INTO replication_example(id, somedata, somenum) SELECT i, i, i FROM gener COMMIT; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int); INSERT INTO tr_unique(data) VALUES(10); @@ -104,7 +104,7 @@ INSERT INTO tr_pkey(data) VALUES(1); DELETE FROM tr_pkey; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); /* * check that disk spooling works (also for logical messages) @@ -136,7 +136,7 @@ DROP TABLE spoolme; COMMIT; SELECT data -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0') WHERE data ~ 'UPDATE'; -- check that a large, spooled, upsert works @@ -145,7 +145,7 @@ SELECT g.i, -g.i FROM generate_series(8000, 12000) g(i) ON CONFLICT(id) DO UPDATE SET data = EXCLUDED.data; SELECT substring(data, 1, 29), count(*) -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') WITH ORDINALITY +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0') WITH ORDINALITY GROUP BY 1 ORDER BY min(ordinality); @@ -202,7 +202,7 @@ RELEASE SAVEPOINT subtop; INSERT INTO tr_sub(path) VALUES ('2-top-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); -- make sure rollbacked subtransactions aren't decoded BEGIN; @@ -215,7 +215,7 @@ ROLLBACK TO SAVEPOINT b; INSERT INTO tr_sub(path) VALUES ('3-top-2-#2'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); -- test whether a known, but not yet logged toplevel xact, followed by a -- subxact commit is handled correctly @@ -234,7 +234,7 @@ INSERT INTO tr_sub(path) VALUES ('5-top-1-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); -- check that DDL in aborted subtransactions handled correctly CREATE TABLE tr_sub_ddl(data int); @@ -247,7 +247,7 @@ ALTER TABLE tr_sub_ddl ALTER COLUMN data TYPE bigint; INSERT INTO tr_sub_ddl VALUES(43); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); /* @@ -410,10 +410,10 @@ WHERE id = 1; -- make sure we decode correctly even if the toast table is gone DROP TABLE toasttable; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); -- done, free logical replication slot -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/decoding_in_xact.sql b/contrib/test_decoding/sql/decoding_in_xact.sql index 33a9c4a6c7..b343b74566 100644 --- a/contrib/test_decoding/sql/decoding_in_xact.sql +++ b/contrib/test_decoding/sql/decoding_in_xact.sql @@ -36,6 +36,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc COMMIT; INSERT INTO nobarf(data) VALUES('3'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/replorigin.sql b/contrib/test_decoding/sql/replorigin.sql index f0f4dd4964..cd0a370208 100644 --- a/contrib/test_decoding/sql/replorigin.sql +++ b/contrib/test_decoding/sql/replorigin.sql @@ -60,7 +60,7 @@ BEGIN; -- setup transaction origin SELECT pg_replication_origin_xact_setup('0/aabbccdd', '2013-01-01 00:00'); INSERT INTO target_tbl(data) -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1', 'include-sequences', '0'); COMMIT; -- check replication progress for the session is correct @@ -79,11 +79,11 @@ SELECT pg_replication_origin_progress('regress_test_decoding: regression_slot', SELECT pg_replication_origin_session_reset(); -- and magically the replayed xact will be filtered! -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1', 'include-sequences', '0'); --but new original changes still show up INSERT INTO origin_tbl(data) VALUES ('will be replicated'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1', 'include-sequences', '0'); SELECT pg_drop_replication_slot('regression_slot'); SELECT pg_replication_origin_drop('regress_test_decoding: regression_slot'); @@ -114,7 +114,7 @@ SELECT local_id, external_id, remote_lsn <> '0/0' AS valid_remote_lsn, local_lsn <> '0/0' AS valid_local_lsn FROM pg_replication_origin_status; -SELECT data FROM pg_logical_slot_get_changes('regression_slot_no_lsn', NULL, NULL, 'skip-empty-xacts', '1', 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot_no_lsn', NULL, NULL, 'skip-empty-xacts', '1', 'include-xids', '0', 'include-sequences', '0'); -- Clean up SELECT pg_replication_origin_session_reset(); SELECT pg_drop_replication_slot('regression_slot_no_lsn'); diff --git a/contrib/test_decoding/sql/rewrite.sql b/contrib/test_decoding/sql/rewrite.sql index 945c39eb41..1715bd289d 100644 --- a/contrib/test_decoding/sql/rewrite.sql +++ b/contrib/test_decoding/sql/rewrite.sql @@ -90,7 +90,7 @@ COMMIT; -- make old files go away CHECKPOINT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); -- trigger repeated rewrites of a system catalog with a toast table, -- that previously was buggy: 20180914021046.oi7dm4ra3ot2g2kt@alap3.anarazel.de diff --git a/contrib/test_decoding/sql/slot.sql b/contrib/test_decoding/sql/slot.sql index 70ea1603e6..52a740c43d 100644 --- a/contrib/test_decoding/sql/slot.sql +++ b/contrib/test_decoding/sql/slot.sql @@ -51,7 +51,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot2', 'test_ INSERT INTO replication_example(somedata, text) VALUES (1, 3); SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); INSERT INTO replication_example(somedata, text) VALUES (1, 4); INSERT INTO replication_example(somedata, text) VALUES (1, 5); @@ -65,8 +65,8 @@ SELECT slot_name FROM pg_replication_slot_advance('regression_slot2', pg_current SELECT :'wal_lsn' = :'end_lsn'; -SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-sequences', '0'); DROP TABLE replication_example; From 43e7787dd3387a7455fc4d9c353addb79a50ebe5 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Tue, 8 Mar 2022 10:31:06 -0800 Subject: [PATCH 106/108] plpython: Restore alternative output for plpython_error test. In db23464715f I removed the alternative output for plpython_error. Wrongly so, because the output changed in Python 3.5, not Python 3. --- src/pl/plpython/expected/plpython_error.out | 2 +- src/pl/plpython/expected/plpython_error_5.out | 447 ++++++++++++++++++ 2 files changed, 448 insertions(+), 1 deletion(-) create mode 100644 src/pl/plpython/expected/plpython_error_5.out diff --git a/src/pl/plpython/expected/plpython_error.out b/src/pl/plpython/expected/plpython_error.out index 7fe864a1a5..9af7ea7292 100644 --- a/src/pl/plpython/expected/plpython_error.out +++ b/src/pl/plpython/expected/plpython_error.out @@ -243,7 +243,7 @@ $$ plpy.nonexistent $$ LANGUAGE plpython3u; SELECT toplevel_attribute_error(); -ERROR: AttributeError: module 'plpy' has no attribute 'nonexistent' +ERROR: AttributeError: 'module' object has no attribute 'nonexistent' CONTEXT: Traceback (most recent call last): PL/Python function "toplevel_attribute_error", line 2, in plpy.nonexistent diff --git a/src/pl/plpython/expected/plpython_error_5.out b/src/pl/plpython/expected/plpython_error_5.out new file mode 100644 index 0000000000..7fe864a1a5 --- /dev/null +++ b/src/pl/plpython/expected/plpython_error_5.out @@ -0,0 +1,447 @@ +-- test error handling, i forgot to restore Warn_restart in +-- the trigger handler once. the errors and subsequent core dump were +-- interesting. +/* Flat out Python syntax error + */ +CREATE FUNCTION python_syntax_error() RETURNS text + AS +'.syntaxerror' + LANGUAGE plpython3u; +ERROR: could not compile PL/Python function "python_syntax_error" +DETAIL: SyntaxError: invalid syntax (, line 2) +/* With check_function_bodies = false the function should get defined + * and the error reported when called + */ +SET check_function_bodies = false; +CREATE FUNCTION python_syntax_error() RETURNS text + AS +'.syntaxerror' + LANGUAGE plpython3u; +SELECT python_syntax_error(); +ERROR: could not compile PL/Python function "python_syntax_error" +DETAIL: SyntaxError: invalid syntax (, line 2) +/* Run the function twice to check if the hashtable entry gets cleaned up */ +SELECT python_syntax_error(); +ERROR: could not compile PL/Python function "python_syntax_error" +DETAIL: SyntaxError: invalid syntax (, line 2) +RESET check_function_bodies; +/* Flat out syntax error + */ +CREATE FUNCTION sql_syntax_error() RETURNS text + AS +'plpy.execute("syntax error")' + LANGUAGE plpython3u; +SELECT sql_syntax_error(); +ERROR: spiexceptions.SyntaxError: syntax error at or near "syntax" +LINE 1: syntax error + ^ +QUERY: syntax error +CONTEXT: Traceback (most recent call last): + PL/Python function "sql_syntax_error", line 1, in + plpy.execute("syntax error") +PL/Python function "sql_syntax_error" +/* check the handling of uncaught python exceptions + */ +CREATE FUNCTION exception_index_invalid(text) RETURNS text + AS +'return args[1]' + LANGUAGE plpython3u; +SELECT exception_index_invalid('test'); +ERROR: IndexError: list index out of range +CONTEXT: Traceback (most recent call last): + PL/Python function "exception_index_invalid", line 1, in + return args[1] +PL/Python function "exception_index_invalid" +/* check handling of nested exceptions + */ +CREATE FUNCTION exception_index_invalid_nested() RETURNS text + AS +'rv = plpy.execute("SELECT test5(''foo'')") +return rv[0]' + LANGUAGE plpython3u; +SELECT exception_index_invalid_nested(); +ERROR: spiexceptions.UndefinedFunction: function test5(unknown) does not exist +LINE 1: SELECT test5('foo') + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +QUERY: SELECT test5('foo') +CONTEXT: Traceback (most recent call last): + PL/Python function "exception_index_invalid_nested", line 1, in + rv = plpy.execute("SELECT test5('foo')") +PL/Python function "exception_index_invalid_nested" +/* a typo + */ +CREATE FUNCTION invalid_type_uncaught(a text) RETURNS text + AS +'if "plan" not in SD: + q = "SELECT fname FROM users WHERE lname = $1" + SD["plan"] = plpy.prepare(q, [ "test" ]) +rv = plpy.execute(SD["plan"], [ a ]) +if len(rv): + return rv[0]["fname"] +return None +' + LANGUAGE plpython3u; +SELECT invalid_type_uncaught('rick'); +ERROR: spiexceptions.UndefinedObject: type "test" does not exist +CONTEXT: Traceback (most recent call last): + PL/Python function "invalid_type_uncaught", line 3, in + SD["plan"] = plpy.prepare(q, [ "test" ]) +PL/Python function "invalid_type_uncaught" +/* for what it's worth catch the exception generated by + * the typo, and return None + */ +CREATE FUNCTION invalid_type_caught(a text) RETURNS text + AS +'if "plan" not in SD: + q = "SELECT fname FROM users WHERE lname = $1" + try: + SD["plan"] = plpy.prepare(q, [ "test" ]) + except plpy.SPIError as ex: + plpy.notice(str(ex)) + return None +rv = plpy.execute(SD["plan"], [ a ]) +if len(rv): + return rv[0]["fname"] +return None +' + LANGUAGE plpython3u; +SELECT invalid_type_caught('rick'); +NOTICE: type "test" does not exist + invalid_type_caught +--------------------- + +(1 row) + +/* for what it's worth catch the exception generated by + * the typo, and reraise it as a plain error + */ +CREATE FUNCTION invalid_type_reraised(a text) RETURNS text + AS +'if "plan" not in SD: + q = "SELECT fname FROM users WHERE lname = $1" + try: + SD["plan"] = plpy.prepare(q, [ "test" ]) + except plpy.SPIError as ex: + plpy.error(str(ex)) +rv = plpy.execute(SD["plan"], [ a ]) +if len(rv): + return rv[0]["fname"] +return None +' + LANGUAGE plpython3u; +SELECT invalid_type_reraised('rick'); +ERROR: plpy.Error: type "test" does not exist +CONTEXT: Traceback (most recent call last): + PL/Python function "invalid_type_reraised", line 6, in + plpy.error(str(ex)) +PL/Python function "invalid_type_reraised" +/* no typo no messing about + */ +CREATE FUNCTION valid_type(a text) RETURNS text + AS +'if "plan" not in SD: + SD["plan"] = plpy.prepare("SELECT fname FROM users WHERE lname = $1", [ "text" ]) +rv = plpy.execute(SD["plan"], [ a ]) +if len(rv): + return rv[0]["fname"] +return None +' + LANGUAGE plpython3u; +SELECT valid_type('rick'); + valid_type +------------ + +(1 row) + +/* error in nested functions to get a traceback +*/ +CREATE FUNCTION nested_error() RETURNS text + AS +'def fun1(): + plpy.error("boom") + +def fun2(): + fun1() + +def fun3(): + fun2() + +fun3() +return "not reached" +' + LANGUAGE plpython3u; +SELECT nested_error(); +ERROR: plpy.Error: boom +CONTEXT: Traceback (most recent call last): + PL/Python function "nested_error", line 10, in + fun3() + PL/Python function "nested_error", line 8, in fun3 + fun2() + PL/Python function "nested_error", line 5, in fun2 + fun1() + PL/Python function "nested_error", line 2, in fun1 + plpy.error("boom") +PL/Python function "nested_error" +/* raising plpy.Error is just like calling plpy.error +*/ +CREATE FUNCTION nested_error_raise() RETURNS text + AS +'def fun1(): + raise plpy.Error("boom") + +def fun2(): + fun1() + +def fun3(): + fun2() + +fun3() +return "not reached" +' + LANGUAGE plpython3u; +SELECT nested_error_raise(); +ERROR: plpy.Error: boom +CONTEXT: Traceback (most recent call last): + PL/Python function "nested_error_raise", line 10, in + fun3() + PL/Python function "nested_error_raise", line 8, in fun3 + fun2() + PL/Python function "nested_error_raise", line 5, in fun2 + fun1() + PL/Python function "nested_error_raise", line 2, in fun1 + raise plpy.Error("boom") +PL/Python function "nested_error_raise" +/* using plpy.warning should not produce a traceback +*/ +CREATE FUNCTION nested_warning() RETURNS text + AS +'def fun1(): + plpy.warning("boom") + +def fun2(): + fun1() + +def fun3(): + fun2() + +fun3() +return "you''ve been warned" +' + LANGUAGE plpython3u; +SELECT nested_warning(); +WARNING: boom + nested_warning +-------------------- + you've been warned +(1 row) + +/* AttributeError at toplevel used to give segfaults with the traceback +*/ +CREATE FUNCTION toplevel_attribute_error() RETURNS void AS +$$ +plpy.nonexistent +$$ LANGUAGE plpython3u; +SELECT toplevel_attribute_error(); +ERROR: AttributeError: module 'plpy' has no attribute 'nonexistent' +CONTEXT: Traceback (most recent call last): + PL/Python function "toplevel_attribute_error", line 2, in + plpy.nonexistent +PL/Python function "toplevel_attribute_error" +/* Calling PL/Python functions from SQL and vice versa should not lose context. + */ +CREATE OR REPLACE FUNCTION python_traceback() RETURNS void AS $$ +def first(): + second() + +def second(): + third() + +def third(): + plpy.execute("select sql_error()") + +first() +$$ LANGUAGE plpython3u; +CREATE OR REPLACE FUNCTION sql_error() RETURNS void AS $$ +begin + select 1/0; +end +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION python_from_sql_error() RETURNS void AS $$ +begin + select python_traceback(); +end +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION sql_from_python_error() RETURNS void AS $$ +plpy.execute("select sql_error()") +$$ LANGUAGE plpython3u; +SELECT python_traceback(); +ERROR: spiexceptions.DivisionByZero: division by zero +CONTEXT: Traceback (most recent call last): + PL/Python function "python_traceback", line 11, in + first() + PL/Python function "python_traceback", line 3, in first + second() + PL/Python function "python_traceback", line 6, in second + third() + PL/Python function "python_traceback", line 9, in third + plpy.execute("select sql_error()") +PL/Python function "python_traceback" +SELECT sql_error(); +ERROR: division by zero +CONTEXT: SQL statement "select 1/0" +PL/pgSQL function sql_error() line 3 at SQL statement +SELECT python_from_sql_error(); +ERROR: spiexceptions.DivisionByZero: division by zero +CONTEXT: Traceback (most recent call last): + PL/Python function "python_traceback", line 11, in + first() + PL/Python function "python_traceback", line 3, in first + second() + PL/Python function "python_traceback", line 6, in second + third() + PL/Python function "python_traceback", line 9, in third + plpy.execute("select sql_error()") +PL/Python function "python_traceback" +SQL statement "select python_traceback()" +PL/pgSQL function python_from_sql_error() line 3 at SQL statement +SELECT sql_from_python_error(); +ERROR: spiexceptions.DivisionByZero: division by zero +CONTEXT: Traceback (most recent call last): + PL/Python function "sql_from_python_error", line 2, in + plpy.execute("select sql_error()") +PL/Python function "sql_from_python_error" +/* check catching specific types of exceptions + */ +CREATE TABLE specific ( + i integer PRIMARY KEY +); +CREATE FUNCTION specific_exception(i integer) RETURNS void AS +$$ +from plpy import spiexceptions +try: + plpy.execute("insert into specific values (%s)" % (i or "NULL")); +except spiexceptions.NotNullViolation as e: + plpy.notice("Violated the NOT NULL constraint, sqlstate %s" % e.sqlstate) +except spiexceptions.UniqueViolation as e: + plpy.notice("Violated the UNIQUE constraint, sqlstate %s" % e.sqlstate) +$$ LANGUAGE plpython3u; +SELECT specific_exception(2); + specific_exception +-------------------- + +(1 row) + +SELECT specific_exception(NULL); +NOTICE: Violated the NOT NULL constraint, sqlstate 23502 + specific_exception +-------------------- + +(1 row) + +SELECT specific_exception(2); +NOTICE: Violated the UNIQUE constraint, sqlstate 23505 + specific_exception +-------------------- + +(1 row) + +/* SPI errors in PL/Python functions should preserve the SQLSTATE value + */ +CREATE FUNCTION python_unique_violation() RETURNS void AS $$ +plpy.execute("insert into specific values (1)") +plpy.execute("insert into specific values (1)") +$$ LANGUAGE plpython3u; +CREATE FUNCTION catch_python_unique_violation() RETURNS text AS $$ +begin + begin + perform python_unique_violation(); + exception when unique_violation then + return 'ok'; + end; + return 'not reached'; +end; +$$ language plpgsql; +SELECT catch_python_unique_violation(); + catch_python_unique_violation +------------------------------- + ok +(1 row) + +/* manually starting subtransactions - a bad idea + */ +CREATE FUNCTION manual_subxact() RETURNS void AS $$ +plpy.execute("savepoint save") +plpy.execute("create table foo(x integer)") +plpy.execute("rollback to save") +$$ LANGUAGE plpython3u; +SELECT manual_subxact(); +ERROR: plpy.SPIError: SPI_execute failed: SPI_ERROR_TRANSACTION +CONTEXT: Traceback (most recent call last): + PL/Python function "manual_subxact", line 2, in + plpy.execute("savepoint save") +PL/Python function "manual_subxact" +/* same for prepared plans + */ +CREATE FUNCTION manual_subxact_prepared() RETURNS void AS $$ +save = plpy.prepare("savepoint save") +rollback = plpy.prepare("rollback to save") +plpy.execute(save) +plpy.execute("create table foo(x integer)") +plpy.execute(rollback) +$$ LANGUAGE plpython3u; +SELECT manual_subxact_prepared(); +ERROR: plpy.SPIError: SPI_execute_plan failed: SPI_ERROR_TRANSACTION +CONTEXT: Traceback (most recent call last): + PL/Python function "manual_subxact_prepared", line 4, in + plpy.execute(save) +PL/Python function "manual_subxact_prepared" +/* raising plpy.spiexception.* from python code should preserve sqlstate + */ +CREATE FUNCTION plpy_raise_spiexception() RETURNS void AS $$ +raise plpy.spiexceptions.DivisionByZero() +$$ LANGUAGE plpython3u; +DO $$ +BEGIN + SELECT plpy_raise_spiexception(); +EXCEPTION WHEN division_by_zero THEN + -- NOOP +END +$$ LANGUAGE plpgsql; +/* setting a custom sqlstate should be handled + */ +CREATE FUNCTION plpy_raise_spiexception_override() RETURNS void AS $$ +exc = plpy.spiexceptions.DivisionByZero() +exc.sqlstate = 'SILLY' +raise exc +$$ LANGUAGE plpython3u; +DO $$ +BEGIN + SELECT plpy_raise_spiexception_override(); +EXCEPTION WHEN SQLSTATE 'SILLY' THEN + -- NOOP +END +$$ LANGUAGE plpgsql; +/* test the context stack trace for nested execution levels + */ +CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ +plpy.execute("DO LANGUAGE plpython3u $x$ plpy.notice('inside DO') $x$") +return 1 +$$ LANGUAGE plpython3u; +CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ +plpy.execute("SELECT notice_innerfunc()") +return 1 +$$ LANGUAGE plpython3u; +\set SHOW_CONTEXT always +SELECT notice_outerfunc(); +NOTICE: inside DO +CONTEXT: PL/Python anonymous code block +SQL statement "DO LANGUAGE plpython3u $x$ plpy.notice('inside DO') $x$" +PL/Python function "notice_innerfunc" +SQL statement "SELECT notice_innerfunc()" +PL/Python function "notice_outerfunc" + notice_outerfunc +------------------ + 1 +(1 row) + From 15420b1a46e87e426d177700839e6d725c043b96 Mon Sep 17 00:00:00 2001 From: David Fetter Date: Tue, 22 Feb 2022 07:06:03 -0800 Subject: [PATCH 107/108] Crude re-jiggering for use elsewhere --- src/backend/utils/adt/numutils.c | 47 -------------------------------- src/include/port/pg_bitutils.h | 47 ++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 47 deletions(-) diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c index cc3f95d399..e75b2a5329 100644 --- a/src/backend/utils/adt/numutils.c +++ b/src/backend/utils/adt/numutils.c @@ -38,53 +38,6 @@ static const char DIGIT_TABLE[200] = "80" "81" "82" "83" "84" "85" "86" "87" "88" "89" "90" "91" "92" "93" "94" "95" "96" "97" "98" "99"; -/* - * Adapted from http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10 - */ -static inline int -decimalLength32(const uint32 v) -{ - int t; - static const uint32 PowersOfTen[] = { - 1, 10, 100, - 1000, 10000, 100000, - 1000000, 10000000, 100000000, - 1000000000 - }; - - /* - * Compute base-10 logarithm by dividing the base-2 logarithm by a - * good-enough approximation of the base-2 logarithm of 10 - */ - t = (pg_leftmost_one_pos32(v) + 1) * 1233 / 4096; - return t + (v >= PowersOfTen[t]); -} - -static inline int -decimalLength64(const uint64 v) -{ - int t; - static const uint64 PowersOfTen[] = { - UINT64CONST(1), UINT64CONST(10), - UINT64CONST(100), UINT64CONST(1000), - UINT64CONST(10000), UINT64CONST(100000), - UINT64CONST(1000000), UINT64CONST(10000000), - UINT64CONST(100000000), UINT64CONST(1000000000), - UINT64CONST(10000000000), UINT64CONST(100000000000), - UINT64CONST(1000000000000), UINT64CONST(10000000000000), - UINT64CONST(100000000000000), UINT64CONST(1000000000000000), - UINT64CONST(10000000000000000), UINT64CONST(100000000000000000), - UINT64CONST(1000000000000000000), UINT64CONST(10000000000000000000) - }; - - /* - * Compute base-10 logarithm by dividing the base-2 logarithm by a - * good-enough approximation of the base-2 logarithm of 10 - */ - t = (pg_leftmost_one_pos64(v) + 1) * 1233 / 4096; - return t + (v >= PowersOfTen[t]); -} - /* * Convert input string to a signed 16 bit integer. * diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h index 04e58cd1c4..ae16131f88 100644 --- a/src/include/port/pg_bitutils.h +++ b/src/include/port/pg_bitutils.h @@ -299,4 +299,51 @@ pg_rotate_left32(uint32 word, int n) return (word << n) | (word >> (32 - n)); } +/* + * Adapted from http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10 + */ +static inline int +decimalLength32(const uint32 v) +{ + int t; + static const uint32 PowersOfTen[] = { + 1, 10, 100, + 1000, 10000, 100000, + 1000000, 10000000, 100000000, + 1000000000 + }; + + /* + * Compute base-10 logarithm by dividing the base-2 logarithm by a + * good-enough approximation of the base-2 logarithm of 10 + */ + t = (pg_leftmost_one_pos32(v) + 1) * 1233 / 4096; + return t + (v >= PowersOfTen[t]); +} + +static inline int +decimalLength64(const uint64 v) +{ + int t; + static const uint64 PowersOfTen[] = { + UINT64CONST(1), UINT64CONST(10), + UINT64CONST(100), UINT64CONST(1000), + UINT64CONST(10000), UINT64CONST(100000), + UINT64CONST(1000000), UINT64CONST(10000000), + UINT64CONST(100000000), UINT64CONST(1000000000), + UINT64CONST(10000000000), UINT64CONST(100000000000), + UINT64CONST(1000000000000), UINT64CONST(10000000000000), + UINT64CONST(100000000000000), UINT64CONST(1000000000000000), + UINT64CONST(10000000000000000), UINT64CONST(100000000000000000), + UINT64CONST(1000000000000000000), UINT64CONST(10000000000000000000) + }; + + /* + * Compute base-10 logarithm by dividing the base-2 logarithm by a + * good-enough approximation of the base-2 logarithm of 10 + */ + t = (pg_leftmost_one_pos64(v) + 1) * 1233 / 4096; + return t + (v >= PowersOfTen[t]); +} + #endif /* PG_BITUTILS_H */ From 2720344de289fad9869439fb8f2d5f62dce61d75 Mon Sep 17 00:00:00 2001 From: David Fetter Date: Tue, 22 Feb 2022 07:06:40 -0800 Subject: [PATCH 108/108] Changed NBASE from 10_000 to 1_000_000_000 --- src/backend/utils/adt/numeric.c | 146 +++++++++++++++++++++++++++----- 1 file changed, 126 insertions(+), 20 deletions(-) diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 45547f6ae7..9525f36968 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -35,6 +35,7 @@ #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "nodes/supportnodes.h" +#include "port/pg_bitutils.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/float.h" @@ -57,18 +58,18 @@ * Numeric values are represented in a base-NBASE floating point format. * Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed * and wide enough to store a digit. We assume that NBASE*NBASE can fit in - * an int. Although the purely calculational routines could handle any even - * NBASE that's less than sqrt(INT_MAX), in practice we are only interested + * an int64. Although the purely calculational routines could handle any even + * NBASE that's less than sqrt(INT64_MAX), in practice we are only interested * in NBASE a power of ten, so that I/O conversions and decimal rounding * are easy. Also, it's actually more efficient if NBASE is rather less than - * sqrt(INT_MAX), so that there is "headroom" for mul_var and div_var_fast to + * sqrt(INT64_MAX), so that there is "headroom" for mul_var and div_var_fast to * postpone processing carries. * - * Values of NBASE other than 10000 are considered of historical interest only + * Values of NBASE other than 1000000000 are considered of historical interest only * and are no longer supported in any sense; no mechanism exists for the client * to discover the base, so every client supporting binary mode expects the - * base-10000 format. If you plan to change this, also note the numeric - * abbreviation code, which assumes NBASE=10000. + * base-1000000000 format. If you plan to change this, also note the numeric + * abbreviation code, which assumes NBASE=1000000000. * ---------- */ @@ -92,7 +93,7 @@ typedef signed char NumericDigit; typedef signed char NumericDigit; #endif -#if 1 +#if 0 #define NBASE 10000 #define HALF_NBASE 5000 #define DEC_DIGITS 4 /* decimal digits per NBASE digit */ @@ -102,6 +103,16 @@ typedef signed char NumericDigit; typedef int16 NumericDigit; #endif +#if 1 +#define NBASE 1000000000 +#define HALF_NBASE 500000000 +#define DEC_DIGITS 9 /* decimal digits per NBASE digit */ +#define MUL_GUARD_DIGITS 2 /* these are measured in NBASE digits */ +#define DIV_GUARD_DIGITS 4 + +typedef int32 NumericDigit; +#endif + /* * The Numeric type as stored on disk. * @@ -427,7 +438,9 @@ static const NumericDigit const_two_data[1] = {2}; static const NumericVar const_two = {1, 0, NUMERIC_POS, 0, NULL, (NumericDigit *) const_two_data}; -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 +static const NumericDigit const_zero_point_nine_data[1] = {900000000}; +#elif DEC_DIGITS == 4 static const NumericDigit const_zero_point_nine_data[1] = {9000}; #elif DEC_DIGITS == 2 static const NumericDigit const_zero_point_nine_data[1] = {90}; @@ -437,7 +450,9 @@ static const NumericDigit const_zero_point_nine_data[1] = {9}; static const NumericVar const_zero_point_nine = {1, -1, NUMERIC_POS, 1, NULL, (NumericDigit *) const_zero_point_nine_data}; -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 +static const NumericDigit const_one_point_one_data[2] = {1, 100000000}; +#elif DEC_DIGITS == 4 static const NumericDigit const_one_point_one_data[2] = {1, 1000}; #elif DEC_DIGITS == 2 static const NumericDigit const_one_point_one_data[2] = {1, 10}; @@ -456,7 +471,9 @@ static const NumericVar const_pinf = static const NumericVar const_ninf = {0, 0, NUMERIC_NINF, 0, NULL, NULL}; -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 +static const int round_powers[9] = {0, 100000000, 10000000, 1000000, 100000, 10000, 1000, 100, 10}; +#elif DEC_DIGITS == 4 static const int round_powers[4] = {0, 1000, 100, 10}; #endif @@ -990,8 +1007,9 @@ numeric_normalize(Numeric num) /* * numeric_recv - converts external binary format to numeric * - * External format is a sequence of int16's: - * ndigits, weight, sign, dscale, NumericDigits. + * External format is a header sequence of int16's: + * ndigits, weight, sign, dscale + * followed by a sequence of int32's, which are NumericDigits. */ Datum numeric_recv(PG_FUNCTION_ARGS) @@ -1095,7 +1113,7 @@ numeric_send(PG_FUNCTION_ARGS) pq_sendint16(&buf, x.sign); pq_sendint16(&buf, x.dscale); for (i = 0; i < x.ndigits; i++) - pq_sendint16(&buf, x.digits[i]); + pq_sendint32(&buf, x.digits[i]); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } @@ -4174,7 +4192,7 @@ int64_div_fast_to_numeric(int64 val1, int log10val2) */ if (m > 0) { - static int pow10[] = {1, 10, 100, 1000}; + static int pow10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000 }; StaticAssertStmt(lengthof(pow10) == DEC_DIGITS, "mismatch with DEC_DIGITS"); if (unlikely(pg_mul_s64_overflow(val1, pow10[DEC_DIGITS - m], &val1))) @@ -6944,7 +6962,17 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest) while (ndigits-- > 0) { -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 + *digits++ = 100000000 * decdigits[i] + + 10000000 * decdigits[i+1] + + 1000000 * decdigits[i+2] + + 100000 * decdigits[i+3] + + 10000 * decdigits[i+4] + + 1000 * decdigits[i+5] + + 100 * decdigits[i+6] + + 10 * decdigits[i+7] + + decdigits[i+8]; +#elif DEC_DIGITS == 4 *digits++ = ((decdigits[i] * 10 + decdigits[i + 1]) * 10 + decdigits[i + 2]) * 10 + decdigits[i + 3]; #elif DEC_DIGITS == 2 @@ -7098,7 +7126,53 @@ get_str_from_var(const NumericVar *var) { dig = (d < var->ndigits) ? var->digits[d] : 0; /* In the first digit, suppress extra leading decimal zeroes */ -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 + { + bool putit = (d > 0); + + d1 = dig / 100000000; + dig -= d1 * 100000000; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + d1 = dig / 10000000; + dig -= d1 * 10000000; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + d1 = dig / 1000000; + dig -= d1 * 1000000; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + d1 = dig / 100000; + dig -= d1 * 100000; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + d1 = dig / 10000; + dig -= d1 * 10000; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + d1 = dig / 1000; + dig -= d1 * 1000; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + d1 = dig / 100; + dig -= d1 * 100; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + d1 = dig / 10; + dig -= d1 * 10; + putit |= (d1 > 0); + if (putit) + *cp++ = d1 + '0'; + *cp++ = dig + '0'; + } +#elif DEC_DIGITS == 4 { bool putit = (d > 0); @@ -7145,7 +7219,33 @@ get_str_from_var(const NumericVar *var) for (i = 0; i < dscale; d++, i += DEC_DIGITS) { dig = (d >= 0 && d < var->ndigits) ? var->digits[d] : 0; -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 + d1 = dig / 100000000; + dig -= d1 * 100000000; + *cp++ = d1 + '0'; + d1 = dig / 10000000; + dig -= d1 * 10000000; + *cp++ = d1 + '0'; + d1 = dig / 1000000; + dig -= d1 * 1000000; + *cp++ = d1 + '0'; + d1 = dig / 100000; + dig -= d1 * 100000; + *cp++ = d1 + '0'; + d1 = dig / 10000; + dig -= d1 * 10000; + *cp++ = d1 + '0'; + d1 = dig / 1000; + dig -= d1 * 1000; + *cp++ = d1 + '0'; + d1 = dig / 100; + dig -= d1 * 100; + *cp++ = d1 + '0'; + d1 = dig / 10; + dig -= d1 * 10; + *cp++ = d1 + '0'; + *cp++ = dig + '0'; +#elif DEC_DIGITS == 4 d1 = dig / 1000; dig -= d1 * 1000; *cp++ = d1 + '0'; @@ -7498,7 +7598,9 @@ apply_typmod(NumericVar *var, int32 typmod) if (dig) { /* Adjust for any high-order decimal zero digits */ -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 + ddigits -= DEC_DIGITS - decimalLength32(dig); +#elif DEC_DIGITS == 4 if (dig < 10) ddigits -= 3; else if (dig < 100) @@ -10988,7 +11090,9 @@ round_var(NumericVar *var, int rscale) int extra, pow10; -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 + pow10 = round_powers[di]; +#elif DEC_DIGITS == 4 pow10 = round_powers[di]; #elif DEC_DIGITS == 2 pow10 = 10; @@ -11088,7 +11192,9 @@ trunc_var(NumericVar *var, int rscale) int extra, pow10; -#if DEC_DIGITS == 4 +#if DEC_DIGITS == 9 + pow10 = round_powers[di]; +#elif DEC_DIGITS == 4 pow10 = round_powers[di]; #elif DEC_DIGITS == 2 pow10 = 10;