diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml
index 261ead150395..8290c2d105e3 100644
--- a/doc/src/sgml/ref/pg_restore.sgml
+++ b/doc/src/sgml/ref/pg_restore.sgml
@@ -118,6 +118,21 @@ PostgreSQL documentation
ignorable error messages will be reported,
unless is also specified.
+
+ If the schema is not restored but the data is restored (for example
+ using
+ or ), then instead of DROP, a TRUNCATE
+ will be attempted before COPYing the data. So if you want to
+ overwrite an existing database without re-defining the schema, then
+ issue .
+
+
+ Together with it is a high performance
+ way to load the tables, as it avoids logging to the WAL (if the
+ server is configured with ).
+ Warning: foreign key constraints might cause table truncation to
+ fail.
+
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 4ebef1e86445..26ee733f6831 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -105,6 +105,7 @@ typedef struct _restoreOptions
* instead of OWNER TO */
char *superuser; /* Username to use as superuser */
char *use_role; /* Issue SET ROLE to this */
+ int clean;
int dropSchema;
int disable_dollar_quoting;
int dump_inserts; /* 0 = COPY, otherwise rows per INSERT */
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index dce88f040ace..8fad5dae8338 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -981,7 +981,12 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
* this run (so that we know it is empty) and we are not
* restoring a load-via-partition-root data item then we
* wrap the COPY in a transaction and precede it with a
- * TRUNCATE. If wal_level is set to minimal this prevents
+ * TRUNCATE.
+ *
+ * Likewise if the table was pre-existing and the data is
+ * being restored with --clean.
+ *
+ * If wal_level is set to minimal this prevents
* WAL-logging the COPY. This obtains a speedup similar
* to that from using single_txn mode in non-parallel
* restores.
@@ -992,11 +997,14 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
* loaded data. (We assume that all partitions of a
* partitioned table will be treated the same way.)
*/
- use_truncate = is_parallel && te->created &&
+ use_truncate = ((is_parallel && te->created) || ropt->clean) &&
!is_load_via_partition_root(te);
if (use_truncate)
{
+ pg_log_debug("BEGIN transaction and TRUNCATE table \"%s.%s\"",
+ te->namespace, te->tag);
+
/*
* Parallel restore is always talking directly to a
* server, so no need to see if we should issue BEGIN.
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 6c129278bc52..31604c7b8093 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -177,7 +177,7 @@ main(int argc, char **argv)
data_only = true;
break;
case 'c': /* clean (i.e., drop) schema prior to create */
- opts->dropSchema = 1;
+ opts->clean = 1;
break;
case 'C':
opts->createDB = 1;
@@ -381,9 +381,6 @@ main(int argc, char **argv)
pg_fatal("options %s and %s cannot be used together",
"-s/--schema-only", "--statistics");
- if (data_only && opts->dropSchema)
- pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
-
if (opts->single_txn && opts->txn_size > 0)
pg_fatal("options -1/--single-transaction and --transaction-size cannot be used together");
@@ -410,6 +407,16 @@ main(int argc, char **argv)
opts->dumpStatistics = ((opts->dumpStatistics && !schema_only && !data_only) ||
(statistics_only || with_statistics)) && !no_statistics;
+ /*
+ * If --clean has been issued and the SQL schema is being restored, then
+ * clear the schema first and recreate it. No need to clear data.
+ */
+ if (opts->clean && opts->dumpSchema)
+ {
+ opts->clean = 0;
+ opts->dropSchema = 1;
+ }
+
opts->disable_triggers = disable_triggers;
opts->enable_row_security = enable_row_security;
opts->noDataForFailedTables = no_data_for_failed_tables;
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 37d893d5e6a5..9cfa25dc0ec3 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -101,12 +101,6 @@
qr/\Qpg_dump: error: options -c\/--clean and -a\/--data-only cannot be used together\E/,
'pg_dump: options -c/--clean and -a/--data-only cannot be used together');
-command_fails_like(
- [ 'pg_restore', '-c', '-a', '-f -' ],
- qr/\Qpg_restore: error: options -c\/--clean and -a\/--data-only cannot be used together\E/,
- 'pg_restore: options -c/--clean and -a/--data-only cannot be used together'
-);
-
command_fails_like(
[ 'pg_dump', '--if-exists' ],
qr/\Qpg_dump: error: option --if-exists requires option -c\/--clean\E/,
diff --git a/src/bin/pg_dump/t/007_pg_restore.pl b/src/bin/pg_dump/t/007_pg_restore.pl
new file mode 100644
index 000000000000..402d97fad81e
--- /dev/null
+++ b/src/bin/pg_dump/t/007_pg_restore.pl
@@ -0,0 +1,150 @@
+# Copyright (c) 2021-2025, PostgreSQL Global Development Group
+
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
+
+my $node = PostgreSQL::Test::Cluster->new('main');
+$node->init;
+$node->start;
+
+my $port = $node->port;
+
+
+# TODO test that --section=post --clean, does not clean any tables.
+
+# Create database
+$node->safe_psql('postgres','
+ CREATE DATABASE db1;
+ \c db1
+ CREATE TABLE t1 (
+ i integer
+ );
+ INSERT INTO t1 VALUES (1), (2), (3), (4);
+ CREATE TABLE t2 (
+ t text
+ );
+ INSERT INTO t2 VALUES (\'a\'), (\'bb\'), (\'ccc\');');
+
+# Function to compare two databases of the above kind.
+sub compare_db_contents
+{
+ my ($db1, $db2, $should_match) = @_;
+ $should_match = 1
+ unless defined $should_match;
+
+ my $query = "
+ SELECT * FROM t1 ORDER BY i;
+ SELECT * FROM t2 ORDER BY t;
+ ";
+ my $result1 = $node->safe_psql($db1, $query);
+ my $result2 = $node->safe_psql($db2, $query);
+
+ if ($should_match)
+ {
+ is($result2, $result1, "The database contents should match");
+ } else {
+ isnt($result2, $result1, "The database contents should NOT match");
+ }
+}
+
+sub test_pg_restore
+{
+ my $dump_file = shift;
+ my $file_basename = File::Basename::basename($dump_file);
+
+ my @cmd = ( 'pg_restore', '--dbname', 'db1_restored' );
+ my $cmd_s = "pg_restore";
+
+ # Optionally this function takes a hash as last parameter.
+ if ($_[0])
+ {
+ my (%hash_args) = @_;
+ shift;
+ my @extra_args = $hash_args{'extra_args'};
+ if (@extra_args)
+ {
+ @cmd = (@cmd, @extra_args);
+ $cmd_s = "$cmd_s @extra_args";
+ }
+ }
+
+ $node->safe_psql(
+ 'postgres',
+ 'DROP DATABASE IF EXISTS db1_restored;');
+ ok(1, "clean up");
+
+ # Restore into a new database
+ $node->safe_psql('postgres',
+ 'CREATE DATABASE db1_restored;');
+ $node->command_ok([@cmd,
+ $dump_file],
+ "$cmd_s $file_basename");
+
+ # Verify restored db matches the dumped one
+ compare_db_contents('db1', 'db1_restored');
+
+ # Restore again with --data-only.
+ # Now the rows should be duplicate, the databases shouldn't match.
+ $node->command_ok([@cmd, '--data-only',
+ $dump_file],
+ "$cmd_s --data-only $file_basename");
+ compare_db_contents('db1', 'db1_restored', 0);
+
+ # Restore again with --data-only --clean.
+ # The database contents should match.
+ $node->command_ok([@cmd, '--clean', '--data-only',
+ $dump_file],
+ "$cmd_s --clean --data-only $file_basename");
+ compare_db_contents('db1', 'db1_restored');
+
+ # Restore from stdin.
+ my $stderr;
+ my $result = $node->run_log([@cmd, '--clean', '--data-only'],
+ ('<' => $dump_file,
+ '2>' => \$stderr));
+ if (grep {/^-j/} @cmd)
+ {
+ ok(!$result, "should fail: $cmd_s --clean --data-only < $file_basename");
+ chomp($stderr);
+ like($stderr,
+ '/parallel restore from standard input is not supported$/',
+ "stderr: parallel restore from standard input is not supported");
+ }
+ else
+ {
+ ok($result, "$cmd_s --clean --data-only < $file_basename");
+ compare_db_contents('db1', 'db1_restored');
+ }
+}
+
+
+# Basic dump
+my $d1 = "$tempdir/dump_file";
+$node->command_ok(['pg_dump', '--format=custom',
+ '--file', $d1, 'db1'],
+ 'pg_dump --format=custom --file dump_file');
+# Dump also to stdout, as the TOC doesn't contain offsets.
+my $d2 = "$tempdir/dump_file_stdout";
+my $result = $node->run_log(['pg_dump', '--format=custom', 'db1'],
+ ('>' => $d2));
+ok($result, "pg_dump --format=custom > dump_file_stdout");
+
+
+# Run all pg_restore testcases against each archive.
+test_pg_restore($d1);
+test_pg_restore($d2);
+test_pg_restore($d1, ('extra_args' => ('-j2')));
+test_pg_restore($d2, ('extra_args' => ('-j2')));
+test_pg_restore($d1, ('extra_args' => ('--single-transaction')));
+test_pg_restore($d2, ('extra_args' => ('--single-transaction')));
+
+
+$node->stop('fast');
+
+done_testing();