>
>
> This thread got slightly mixed up, so I'm replying to the v45-0001
> posted here, and also in response to Michael's and Corey's comments
> from:
>
> https://www.postgresql.org/message-id/Z5H0iRaJc1wnDVLE%40paquier.xyz


Thanks.


>
> which makes sense to me. The v45-0001 patch itself could use some
> cleanup, but I can take care of that at commit time if we agree on the
> locking scheme.


While no changes were made to 0001 since v45, 0002 has fixed the failing
regression tests from v45. Other cleanup and a review of documentation
wording has been done, some of which I'm still not totally satisfied with,
so I'm going to give it another look tomorrow, but am putting this out for
reviewers in the mean time.
From fb095b7ea75ef366eaae5eb7b5322b9869a760ea Mon Sep 17 00:00:00 2001
From: Corey Huinker <corey.huin...@gmail.com>
Date: Tue, 21 Jan 2025 11:52:58 -0500
Subject: [PATCH v46 1/2] Lock table first when setting index relation
 statistics.

Jian He reported [1] a missing lock relation bug in
pg_restore_relation_stats when restoring stats to an index.

This fix follows the steps for proper locking prior to an inplace update
of a relation as specified in aac2c9b4fde8, and mimics the locking
behavior of the analyze command, as well as correctly doing the ACL
checks against the underlying relation of an index rather than the index
itself.

There is no special case for partitioned indexes, so while we want to
the ACL checks against the underlying relation, we need to take out
the more restrictive ShareUpdateExclusiveLock on the partitioned index.

[1] https://www.postgresql.org/message-id/CACJufxGreTY7qsCV8%2BBkuv0p5SXGTScgh%3DD%2BDq6%3D%2B_%3DXTp7FWg%40mail.gmail.com
---
 src/backend/statistics/stat_utils.c        | 45 +++++++++++++++---
 src/test/regress/expected/stats_import.out | 53 ++++++++++++++++++++++
 src/test/regress/sql/stats_import.sql      | 36 +++++++++++++++
 3 files changed, 128 insertions(+), 6 deletions(-)

diff --git a/src/backend/statistics/stat_utils.c b/src/backend/statistics/stat_utils.c
index 0d446f55b0..f87007e72c 100644
--- a/src/backend/statistics/stat_utils.c
+++ b/src/backend/statistics/stat_utils.c
@@ -17,13 +17,16 @@
 #include "postgres.h"
 
 #include "access/relation.h"
+#include "catalog/index.h"
 #include "catalog/pg_database.h"
 #include "funcapi.h"
 #include "miscadmin.h"
 #include "statistics/stat_utils.h"
+#include "storage/lmgr.h"
 #include "utils/acl.h"
 #include "utils/array.h"
 #include "utils/builtins.h"
+#include "utils/lsyscache.h"
 #include "utils/rel.h"
 
 /*
@@ -126,18 +129,45 @@ stats_check_arg_pair(FunctionCallInfo fcinfo,
 void
 stats_lock_check_privileges(Oid reloid)
 {
-	Relation	rel = relation_open(reloid, ShareUpdateExclusiveLock);
-	const char	relkind = rel->rd_rel->relkind;
+	Relation	rel;
+	Oid			relation_oid = reloid;
+	Oid			index_oid = InvalidOid;
+	LOCKMODE	index_lockmode = AccessShareLock;
 
-	/* All of the types that can be used with ANALYZE, plus indexes */
-	switch (relkind)
+	/*
+	 * For indexes, we follow what do_analyze_rel() does so as to avoid any
+	 * deadlocks with analyze/vacuum, which is to take out a
+	 * ShareUpdateExclusive on table/matview first and then take an
+	 * AccessShareLock on the index itself. See check_inplace_rel_lock()
+	 * to see how this special case is implemented.
+	 *
+	 * Partitioned indexes do not have an exception in check_inplace_rel_lock(),
+	 * so we want to take a ShareUpdateExclusive lock there instead.
+	 */
+	switch(get_rel_relkind(reloid))
 	{
-		case RELKIND_RELATION:
 		case RELKIND_INDEX:
+			relation_oid = IndexGetRelation(reloid, false);
+			index_oid = reloid;
+			break;
+		case RELKIND_PARTITIONED_INDEX:
+			relation_oid = IndexGetRelation(reloid, false);
+			index_oid = reloid;
+			index_lockmode = ShareUpdateExclusiveLock;
+			break;
+		default:
+			break;
+	}
+
+	rel = relation_open(relation_oid, ShareUpdateExclusiveLock);
+
+	switch (rel->rd_rel->relkind)
+	{
+		/* All of the types that can be used with ANALYZE */
+		case RELKIND_RELATION:
 		case RELKIND_MATVIEW:
 		case RELKIND_FOREIGN_TABLE:
 		case RELKIND_PARTITIONED_TABLE:
-		case RELKIND_PARTITIONED_INDEX:
 			break;
 		default:
 			ereport(ERROR,
@@ -164,6 +194,9 @@ stats_lock_check_privileges(Oid reloid)
 						   NameStr(rel->rd_rel->relname));
 	}
 
+	if (OidIsValid(index_oid))
+		LockRelationOid(index_oid, index_lockmode);
+
 	relation_close(rel, NoLock);
 }
 
diff --git a/src/test/regress/expected/stats_import.out b/src/test/regress/expected/stats_import.out
index fb50da1cd8..6cd584da68 100644
--- a/src/test/regress/expected/stats_import.out
+++ b/src/test/regress/expected/stats_import.out
@@ -85,6 +85,26 @@ WHERE oid = 'stats_import.test'::regclass;
        17 |       400 |             4
 (1 row)
 
+CREATE INDEX test_i ON stats_import.test(id);
+-- regular indexes have special case locking rules
+SELECT
+    pg_catalog.pg_set_relation_stats(
+        relation => 'stats_import.test_i'::regclass,
+        relpages => 18::integer);
+ pg_set_relation_stats 
+-----------------------
+ 
+(1 row)
+
+SELECT
+    pg_catalog.pg_restore_relation_stats(
+        'relation', 'stats_import.test_i'::regclass,
+        'relpages', 19::integer );
+ pg_restore_relation_stats 
+---------------------------
+ t
+(1 row)
+
 -- positional arguments
 SELECT
     pg_catalog.pg_set_relation_stats(
@@ -182,6 +202,7 @@ CREATE TABLE stats_import.part_child_1
   PARTITION OF stats_import.part_parent
   FOR VALUES FROM (0) TO (10)
   WITH (autovacuum_enabled = false);
+CREATE INDEX part_parent_i ON stats_import.part_parent(i);
 ANALYZE stats_import.part_parent;
 SELECT relpages
 FROM pg_class
@@ -202,6 +223,25 @@ SELECT
  
 (1 row)
 
+-- Partitioned indexes aren't analyzed but it is possible to set stats.
+SELECT
+    pg_catalog.pg_set_relation_stats(
+        relation => 'stats_import.part_parent_i'::regclass,
+        relpages => 2::integer);
+ pg_set_relation_stats 
+-----------------------
+ 
+(1 row)
+
+SELECT
+    pg_catalog.pg_restore_relation_stats(
+        'relation', 'stats_import.part_parent_i'::regclass,
+        'relpages', 2::integer);
+ pg_restore_relation_stats 
+---------------------------
+ t
+(1 row)
+
 -- nothing stops us from setting it to -1
 SELECT
     pg_catalog.pg_set_relation_stats(
@@ -1414,6 +1454,19 @@ SELECT 3, 'tre', (3, 3.3, 'TRE', '2003-03-03', NULL)::stats_import.complex_type,
 UNION ALL
 SELECT 4, 'four', NULL, int4range(0,100), NULL;
 CREATE INDEX is_odd ON stats_import.test(((comp).a % 2 = 1));
+-- Test for proper locking
+SELECT * FROM pg_catalog.pg_restore_relation_stats(
+    'relation', 'stats_import.is_odd'::regclass,
+    'version', '180000'::integer,
+    'relpages', '11'::integer,
+    'reltuples', '10000'::real,
+    'relallvisible', '0'::integer
+);
+ pg_restore_relation_stats 
+---------------------------
+ t
+(1 row)
+
 -- Generate statistics on table with data
 ANALYZE stats_import.test;
 CREATE TABLE stats_import.test_clone ( LIKE stats_import.test )
diff --git a/src/test/regress/sql/stats_import.sql b/src/test/regress/sql/stats_import.sql
index d3058bf8f6..23e85fc6ba 100644
--- a/src/test/regress/sql/stats_import.sql
+++ b/src/test/regress/sql/stats_import.sql
@@ -64,6 +64,19 @@ SELECT relpages, reltuples, relallvisible
 FROM pg_class
 WHERE oid = 'stats_import.test'::regclass;
 
+CREATE INDEX test_i ON stats_import.test(id);
+
+-- regular indexes have special case locking rules
+SELECT
+    pg_catalog.pg_set_relation_stats(
+        relation => 'stats_import.test_i'::regclass,
+        relpages => 18::integer);
+
+SELECT
+    pg_catalog.pg_restore_relation_stats(
+        'relation', 'stats_import.test_i'::regclass,
+        'relpages', 19::integer );
+
 -- positional arguments
 SELECT
     pg_catalog.pg_set_relation_stats(
@@ -127,6 +140,8 @@ CREATE TABLE stats_import.part_child_1
   FOR VALUES FROM (0) TO (10)
   WITH (autovacuum_enabled = false);
 
+CREATE INDEX part_parent_i ON stats_import.part_parent(i);
+
 ANALYZE stats_import.part_parent;
 
 SELECT relpages
@@ -140,6 +155,17 @@ SELECT
         relation => 'stats_import.part_parent'::regclass,
         relpages => 2::integer);
 
+-- Partitioned indexes aren't analyzed but it is possible to set stats.
+SELECT
+    pg_catalog.pg_set_relation_stats(
+        relation => 'stats_import.part_parent_i'::regclass,
+        relpages => 2::integer);
+
+SELECT
+    pg_catalog.pg_restore_relation_stats(
+        'relation', 'stats_import.part_parent_i'::regclass,
+        'relpages', 2::integer);
+
 -- nothing stops us from setting it to -1
 SELECT
     pg_catalog.pg_set_relation_stats(
@@ -1062,6 +1088,16 @@ SELECT 4, 'four', NULL, int4range(0,100), NULL;
 
 CREATE INDEX is_odd ON stats_import.test(((comp).a % 2 = 1));
 
+
+-- Test for proper locking
+SELECT * FROM pg_catalog.pg_restore_relation_stats(
+    'relation', 'stats_import.is_odd'::regclass,
+    'version', '180000'::integer,
+    'relpages', '11'::integer,
+    'reltuples', '10000'::real,
+    'relallvisible', '0'::integer
+);
+
 -- Generate statistics on table with data
 ANALYZE stats_import.test;
 

base-commit: 9e020050b8fa8e184bc1d58e6a4bc1edfa76cb8c
-- 
2.48.1

From 983d67a266ce3f66df7e8e8579db38b13eaf9816 Mon Sep 17 00:00:00 2001
From: Corey Huinker <corey.huin...@gmail.com>
Date: Sat, 16 Mar 2024 17:21:10 -0400
Subject: [PATCH v46 2/2] Enable dumping of table/index stats in pg_dump.

For each table/matview/index dumped, it will generate a statement that
calls pg_set_relation_stats(), and it will generate a series of
statements that call pg_set_attribute_stats(), one per attribute that
has a corresponding row in pg_statistic. These statements will restore
the statistics of the current system onto the destination system.

Adds the command-line options -X / --statistics-only, which are mutually
exclusive to --schema-only and --data-only.

Statistics are not dumped when --schema-only is specified, except during
a binary upgrade.

As is the pattern with pg_dump options, statistics can be disabled using
--no-statistics.

Table statistics are dumped in the data section. This is true even if
dumping stats in a binary upgrade. Index statistics are dumped in the
post-data section.

Add --no-data option.  This option is useful for situations where
someone wishes to test query plans from a production database without
copying production data.

This also makes the corresponding change to the simulated pg_upgrade in
the TAP tests for pg_dump.

Add --no-schema option to pg_dump, etc.  Previously, users could use
--data-only when they wanted to suppress schema from a dump. However,
that no longer makes sense now that the data/schema binary has become
the data/schema/statistics trinary.
---
 src/bin/pg_dump/pg_backup.h           |  10 +-
 src/bin/pg_dump/pg_backup_archiver.c  |  97 +++++--
 src/bin/pg_dump/pg_backup_archiver.h  |   3 +-
 src/bin/pg_dump/pg_backup_directory.c |   2 +-
 src/bin/pg_dump/pg_dump.c             | 390 +++++++++++++++++++++++++-
 src/bin/pg_dump/pg_dump.h             |  11 +
 src/bin/pg_dump/pg_dump_sort.c        |  36 ++-
 src/bin/pg_dump/pg_dumpall.c          |   5 +
 src/bin/pg_dump/pg_restore.c          |  31 +-
 src/bin/pg_dump/t/001_basic.pl        |  18 ++
 src/bin/pg_dump/t/002_pg_dump.pl      | 111 +++++++-
 src/bin/pg_upgrade/dump.c             |   6 +-
 src/bin/pg_upgrade/option.c           |  12 +
 src/bin/pg_upgrade/pg_upgrade.h       |   1 +
 doc/src/sgml/ref/pg_dump.sgml         |  69 ++++-
 doc/src/sgml/ref/pg_dumpall.sgml      |  38 +++
 doc/src/sgml/ref/pg_restore.sgml      |  47 +++-
 doc/src/sgml/ref/pgupgrade.sgml       |  18 ++
 src/tools/pgindent/typedefs.list      |   1 +
 19 files changed, 841 insertions(+), 65 deletions(-)

diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index f0f19bb0b2..3fa1474fad 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -110,9 +110,12 @@ typedef struct _restoreOptions
 	int			column_inserts;
 	int			if_exists;
 	int			no_comments;	/* Skip comments */
+	int			no_data;			/* Skip data */
 	int			no_publications;	/* Skip publication entries */
+	int			no_schema;			/* Skip schema generation */
 	int			no_security_labels; /* Skip security label entries */
 	int			no_subscriptions;	/* Skip subscription entries */
+	int			no_statistics;		/* Skip statistics import */
 	int			strict_names;
 
 	const char *filename;
@@ -160,6 +163,7 @@ typedef struct _restoreOptions
 	/* flags derived from the user-settable flags */
 	bool		dumpSchema;
 	bool		dumpData;
+	bool		dumpStatistics;
 } RestoreOptions;
 
 typedef struct _dumpOptions
@@ -179,8 +183,11 @@ typedef struct _dumpOptions
 	int			column_inserts;
 	int			if_exists;
 	int			no_comments;
-	int			no_security_labels;
+	int			no_data;
 	int			no_publications;
+	int			no_schema;
+	int			no_security_labels;
+	int			no_statistics;
 	int			no_subscriptions;
 	int			no_toast_compression;
 	int			no_unlogged_table_data;
@@ -208,6 +215,7 @@ typedef struct _dumpOptions
 	/* flags derived from the user-settable flags */
 	bool		dumpSchema;
 	bool		dumpData;
+	bool		dumpStatistics;
 } DumpOptions;
 
 /*
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 707a3fc844..cfa1349b8a 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -46,6 +46,12 @@
 #define TEXT_DUMP_HEADER "--\n-- PostgreSQL database dump\n--\n\n"
 #define TEXT_DUMPALL_HEADER "--\n-- PostgreSQL database cluster dump\n--\n\n"
 
+typedef enum entryType
+{
+	default_entry,
+	data_entry,
+	statistics_entry
+}			entryType;
 
 static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
 							   const pg_compress_specification compression_spec,
@@ -53,7 +59,7 @@ static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
 							   SetupWorkerPtrType setupWorkerPtr,
 							   DataDirSyncMethod sync_method);
 static void _getObjectDescription(PQExpBuffer buf, const TocEntry *te);
-static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData);
+static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, entryType entry_type);
 static char *sanitize_line(const char *str, bool want_hyphen);
 static void _doSetFixedOutputState(ArchiveHandle *AH);
 static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
@@ -149,6 +155,7 @@ InitDumpOptions(DumpOptions *opts)
 	opts->dumpSections = DUMP_UNSECTIONED;
 	opts->dumpSchema = true;
 	opts->dumpData = true;
+	opts->dumpStatistics = true;
 }
 
 /*
@@ -169,9 +176,10 @@ dumpOptionsFromRestoreOptions(RestoreOptions *ropt)
 	dopt->outputClean = ropt->dropSchema;
 	dopt->dumpData = ropt->dumpData;
 	dopt->dumpSchema = ropt->dumpSchema;
+	dopt->dumpSections = ropt->dumpSections;
+	dopt->dumpStatistics = ropt->dumpStatistics;
 	dopt->if_exists = ropt->if_exists;
 	dopt->column_inserts = ropt->column_inserts;
-	dopt->dumpSections = ropt->dumpSections;
 	dopt->aclsSkip = ropt->aclsSkip;
 	dopt->outputSuperuser = ropt->superuser;
 	dopt->outputCreateDB = ropt->createDB;
@@ -186,6 +194,9 @@ dumpOptionsFromRestoreOptions(RestoreOptions *ropt)
 	dopt->no_publications = ropt->no_publications;
 	dopt->no_security_labels = ropt->no_security_labels;
 	dopt->no_subscriptions = ropt->no_subscriptions;
+	dopt->no_data = ropt->no_data;
+	dopt->no_schema = ropt->no_schema;
+	dopt->no_statistics = ropt->no_statistics;
 	dopt->lockWaitTimeout = ropt->lockWaitTimeout;
 	dopt->include_everything = ropt->include_everything;
 	dopt->enable_row_security = ropt->enable_row_security;
@@ -418,30 +429,30 @@ RestoreArchive(Archive *AHX)
 	}
 
 	/*
-	 * Work out if we have an implied data-only restore. This can happen if
-	 * the dump was data only or if the user has used a toc list to exclude
-	 * all of the schema data. All we do is look for schema entries - if none
-	 * are found then we unset the dumpSchema flag.
+	 * Work out if we have an schema-less restore. This can happen if the dump
+	 * was data-only or statistics-only or no-schema or if the user has used a
+	 * toc list to exclude all of the schema data. All we do is look for
+	 * schema entries - if none are found then we unset the dumpSchema flag.
 	 *
 	 * We could scan for wanted TABLE entries, but that is not the same as
 	 * data-only. At this stage, it seems unnecessary (6-Mar-2001).
 	 */
 	if (ropt->dumpSchema)
 	{
-		int			impliedDataOnly = 1;
+		bool		no_schema_found = true;
 
 		for (te = AH->toc->next; te != AH->toc; te = te->next)
 		{
 			if ((te->reqs & REQ_SCHEMA) != 0)
-			{					/* It's schema, and it's wanted */
-				impliedDataOnly = 0;
+			{
+				no_schema_found = false;
 				break;
 			}
 		}
-		if (impliedDataOnly)
+		if (no_schema_found)
 		{
 			ropt->dumpSchema = false;
-			pg_log_info("implied data-only restore");
+			pg_log_info("implied no-schema restore");
 		}
 	}
 
@@ -739,7 +750,7 @@ RestoreArchive(Archive *AHX)
 
 		for (te = AH->toc->next; te != AH->toc; te = te->next)
 		{
-			if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
+			if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) == 0)
 				continue;		/* ignore if not to be dumped at all */
 
 			switch (_tocEntryRestorePass(te))
@@ -760,7 +771,7 @@ RestoreArchive(Archive *AHX)
 		{
 			for (te = AH->toc->next; te != AH->toc; te = te->next)
 			{
-				if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0 &&
+				if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 &&
 					_tocEntryRestorePass(te) == RESTORE_PASS_ACL)
 					(void) restore_toc_entry(AH, te, false);
 			}
@@ -770,7 +781,7 @@ RestoreArchive(Archive *AHX)
 		{
 			for (te = AH->toc->next; te != AH->toc; te = te->next)
 			{
-				if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0 &&
+				if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 &&
 					_tocEntryRestorePass(te) == RESTORE_PASS_POST_ACL)
 					(void) restore_toc_entry(AH, te, false);
 			}
@@ -869,7 +880,7 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
 			pg_log_info("creating %s \"%s\"",
 						te->desc, te->tag);
 
-		_printTocEntry(AH, te, false);
+		_printTocEntry(AH, te, default_entry);
 		defnDumped = true;
 
 		if (strcmp(te->desc, "TABLE") == 0)
@@ -938,7 +949,7 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
 			 */
 			if (AH->PrintTocDataPtr != NULL)
 			{
-				_printTocEntry(AH, te, true);
+				_printTocEntry(AH, te, data_entry);
 
 				if (strcmp(te->desc, "BLOBS") == 0 ||
 					strcmp(te->desc, "BLOB COMMENTS") == 0)
@@ -1036,15 +1047,21 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
 		{
 			/* If we haven't already dumped the defn part, do so now */
 			pg_log_info("executing %s %s", te->desc, te->tag);
-			_printTocEntry(AH, te, false);
+			_printTocEntry(AH, te, default_entry);
 		}
 	}
 
+	/*
+	 * If it has a statistics component that we want, then process that
+	 */
+	if ((reqs & REQ_STATS) != 0)
+		_printTocEntry(AH, te, statistics_entry);
+
 	/*
 	 * If we emitted anything for this TOC entry, that counts as one action
 	 * against the transaction-size limit.  Commit if it's time to.
 	 */
-	if ((reqs & (REQ_SCHEMA | REQ_DATA)) != 0 && ropt->txn_size > 0)
+	if ((reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 && ropt->txn_size > 0)
 	{
 		if (++AH->txnCount >= ropt->txn_size)
 		{
@@ -1084,6 +1101,7 @@ NewRestoreOptions(void)
 	opts->compression_spec.level = 0;
 	opts->dumpSchema = true;
 	opts->dumpData = true;
+	opts->dumpStatistics = true;
 
 	return opts;
 }
@@ -1329,7 +1347,7 @@ PrintTOCSummary(Archive *AHX)
 		te->reqs = _tocEntryRequired(te, curSection, AH);
 		/* Now, should we print it? */
 		if (ropt->verbose ||
-			(te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0)
+			(te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0)
 		{
 			char	   *sanitized_name;
 			char	   *sanitized_schema;
@@ -2582,7 +2600,7 @@ WriteToc(ArchiveHandle *AH)
 	tocCount = 0;
 	for (te = AH->toc->next; te != AH->toc; te = te->next)
 	{
-		if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) != 0)
+		if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS | REQ_SPECIAL)) != 0)
 			tocCount++;
 	}
 
@@ -2592,7 +2610,7 @@ WriteToc(ArchiveHandle *AH)
 
 	for (te = AH->toc->next; te != AH->toc; te = te->next)
 	{
-		if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) == 0)
+		if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS | REQ_SPECIAL)) == 0)
 			continue;
 
 		WriteInt(AH, te->dumpId);
@@ -2918,6 +2936,14 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
 		strcmp(te->desc, "SEARCHPATH") == 0)
 		return REQ_SPECIAL;
 
+	if (strcmp(te->desc, "STATISTICS DATA") == 0)
+	{
+		if (!ropt->dumpStatistics)
+			return 0;
+		else
+			res = REQ_STATS;
+	}
+
 	/*
 	 * DATABASE and DATABASE PROPERTIES also have a special rule: they are
 	 * restored in createDB mode, and not restored otherwise, independently of
@@ -2962,6 +2988,10 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
 	if (ropt->no_subscriptions && strcmp(te->desc, "SUBSCRIPTION") == 0)
 		return 0;
 
+	/* If it's statistics and we don't want statistics, maybe ignore it */
+	if (!ropt->dumpStatistics && strcmp(te->desc, "STATISTICS DATA") == 0)
+		return 0;
+
 	/* Ignore it if section is not to be dumped/restored */
 	switch (curSection)
 	{
@@ -2991,6 +3021,7 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
 	 */
 	if (strcmp(te->desc, "ACL") == 0 ||
 		strcmp(te->desc, "COMMENT") == 0 ||
+		strcmp(te->desc, "STATISTICS DATA") == 0 ||
 		strcmp(te->desc, "SECURITY LABEL") == 0)
 	{
 		/* Database properties react to createDB, not selectivity options. */
@@ -3107,6 +3138,7 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
 		}
 	}
 
+
 	/*
 	 * Determine whether the TOC entry contains schema and/or data components,
 	 * and mask off inapplicable REQ bits.  If it had a dataDumper, assume
@@ -3172,12 +3204,12 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
 				strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
 			   (strcmp(te->desc, "SECURITY LABEL") == 0 &&
 				strncmp(te->tag, "LARGE OBJECT", 12) == 0))))
-			res = res & REQ_SCHEMA;
+			res = res & (REQ_SCHEMA | REQ_STATS);
 	}
 
 	/* Mask it if we don't want schema */
 	if (!ropt->dumpSchema)
-		res = res & REQ_DATA;
+		res = res & (REQ_DATA | REQ_STATS);
 
 	return res;
 }
@@ -3729,7 +3761,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te)
  * will remain at default, until the matching ACL TOC entry is restored.
  */
 static void
-_printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
+_printTocEntry(ArchiveHandle *AH, TocEntry *te, entryType entry_type)
 {
 	RestoreOptions *ropt = AH->public.ropt;
 
@@ -3753,10 +3785,17 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
 		char	   *sanitized_schema;
 		char	   *sanitized_owner;
 
-		if (isData)
-			pfx = "Data for ";
-		else
-			pfx = "";
+		switch (entry_type)
+		{
+			case data_entry:
+				pfx = "Data for ";
+				break;
+			case statistics_entry:
+				pfx = "Statistics for ";
+				break;
+			default:
+				pfx = "";
+		}
 
 		ahprintf(AH, "--\n");
 		if (AH->public.verbose)
@@ -4324,7 +4363,7 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
 		if (next_work_item != NULL)
 		{
 			/* If not to be restored, don't waste time launching a worker */
-			if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
+			if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) == 0)
 			{
 				pg_log_info("skipping item %d %s %s",
 							next_work_item->dumpId,
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index ce5ed1dd39..a2064f471e 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -209,7 +209,8 @@ typedef enum
 
 #define REQ_SCHEMA	0x01		/* want schema */
 #define REQ_DATA	0x02		/* want data */
-#define REQ_SPECIAL	0x04		/* for special TOC entries */
+#define REQ_STATS	0x04
+#define REQ_SPECIAL	0x08		/* for special TOC entries */
 
 struct _archiveHandle
 {
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 240a1d4106..b2a841bb0f 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -780,7 +780,7 @@ _PrepParallelRestore(ArchiveHandle *AH)
 			continue;
 
 		/* We may ignore items not due to be restored */
-		if ((te->reqs & REQ_DATA) == 0)
+		if ((te->reqs & (REQ_DATA | REQ_STATS)) == 0)
 			continue;
 
 		/*
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 02e1fdf8f7..c92a31902a 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -431,6 +431,7 @@ main(int argc, char **argv)
 	DataDirSyncMethod sync_method = DATA_DIR_SYNC_METHOD_FSYNC;
 	bool		data_only = false;
 	bool		schema_only = false;
+	bool		statistics_only = false;
 
 	static DumpOptions dopt;
 
@@ -467,6 +468,7 @@ main(int argc, char **argv)
 		{"encoding", required_argument, NULL, 'E'},
 		{"help", no_argument, NULL, '?'},
 		{"version", no_argument, NULL, 'V'},
+		{"statistics-only", no_argument, NULL, 'X'},
 
 		/*
 		 * the following options don't have an equivalent short option letter
@@ -493,8 +495,11 @@ main(int argc, char **argv)
 		{"strict-names", no_argument, &strict_names, 1},
 		{"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
 		{"no-comments", no_argument, &dopt.no_comments, 1},
+		{"no-data", no_argument, &dopt.no_data, 1},
 		{"no-publications", no_argument, &dopt.no_publications, 1},
+		{"no-schema", no_argument, &dopt.no_schema, 1},
 		{"no-security-labels", no_argument, &dopt.no_security_labels, 1},
+		{"no-statistics", no_argument, &dopt.no_statistics, 1},
 		{"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
 		{"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
 		{"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
@@ -540,7 +545,7 @@ main(int argc, char **argv)
 
 	InitDumpOptions(&dopt);
 
-	while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
+	while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxXZ:",
 							long_options, &optindex)) != -1)
 	{
 		switch (c)
@@ -614,6 +619,10 @@ main(int argc, char **argv)
 				dopt.cparams.pgport = pg_strdup(optarg);
 				break;
 
+			case 'X':			/* Dump statistics only */
+				statistics_only = true;
+				break;
+
 			case 'R':
 				/* no-op, still accepted for backwards compatibility */
 				break;
@@ -785,6 +794,17 @@ main(int argc, char **argv)
 
 	if (data_only && schema_only)
 		pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
+	if (schema_only && statistics_only)
+		pg_fatal("options -s/--schema-only and -X/--statistics-only cannot be used together");
+	if (data_only && statistics_only)
+		pg_fatal("options -a/--data-only and -X/--statistics-only cannot be used together");
+
+	if (data_only && dopt.no_data)
+		pg_fatal("options -a/--data-only and --no-data cannot be used together");
+	if (schema_only && dopt.no_schema)
+		pg_fatal("options -s/--schema-only and --no-schema cannot be used together");
+	if (statistics_only && dopt.no_statistics)
+		pg_fatal("options -X/--statistics-only and --no-statistics cannot be used together");
 
 	if (schema_only && foreign_servers_include_patterns.head != NULL)
 		pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
@@ -799,8 +819,9 @@ main(int argc, char **argv)
 		pg_fatal("option --if-exists requires option -c/--clean");
 
 	/* set derivative flags */
-	dopt.dumpSchema = (!data_only);
-	dopt.dumpData = (!schema_only);
+	dopt.dumpData = data_only || (!schema_only && !statistics_only && !dopt.no_data);
+	dopt.dumpSchema = schema_only || (!data_only && !statistics_only && !dopt.no_schema);
+	dopt.dumpStatistics = statistics_only || (!data_only && !schema_only && !dopt.no_statistics);
 
 	/*
 	 * --inserts are already implied above if --column-inserts or
@@ -1100,6 +1121,7 @@ main(int argc, char **argv)
 	ropt->dropSchema = dopt.outputClean;
 	ropt->dumpData = dopt.dumpData;
 	ropt->dumpSchema = dopt.dumpSchema;
+	ropt->dumpStatistics = dopt.dumpStatistics;
 	ropt->if_exists = dopt.if_exists;
 	ropt->column_inserts = dopt.column_inserts;
 	ropt->dumpSections = dopt.dumpSections;
@@ -1178,7 +1200,7 @@ help(const char *progname)
 	printf(_("  -?, --help                   show this help, then exit\n"));
 
 	printf(_("\nOptions controlling the output content:\n"));
-	printf(_("  -a, --data-only              dump only the data, not the schema\n"));
+	printf(_("  -a, --data-only              dump only the data, not the schema or statistics\n"));
 	printf(_("  -b, --large-objects          include large objects in dump\n"));
 	printf(_("  --blobs                      (same as --large-objects, deprecated)\n"));
 	printf(_("  -B, --no-large-objects       exclude large objects in dump\n"));
@@ -1191,11 +1213,12 @@ help(const char *progname)
 	printf(_("  -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
 	printf(_("  -O, --no-owner               skip restoration of object ownership in\n"
 			 "                               plain-text format\n"));
-	printf(_("  -s, --schema-only            dump only the schema, no data\n"));
+	printf(_("  -s, --schema-only            dump only the schema, no data or statistics\n"));
 	printf(_("  -S, --superuser=NAME         superuser user name to use in plain-text format\n"));
 	printf(_("  -t, --table=PATTERN          dump only the specified table(s)\n"));
 	printf(_("  -T, --exclude-table=PATTERN  do NOT dump the specified table(s)\n"));
 	printf(_("  -x, --no-privileges          do not dump privileges (grant/revoke)\n"));
+	printf(_("  -X, --statistics-only        dump only the statistics, not schema or data\n"));
 	printf(_("  --binary-upgrade             for use by upgrade utilities only\n"));
 	printf(_("  --column-inserts             dump data as INSERT commands with column names\n"));
 	printf(_("  --disable-dollar-quoting     disable dollar quoting, use SQL standard quoting\n"));
@@ -1220,8 +1243,11 @@ help(const char *progname)
 	printf(_("  --inserts                    dump data as INSERT commands, rather than COPY\n"));
 	printf(_("  --load-via-partition-root    load partitions via the root table\n"));
 	printf(_("  --no-comments                do not dump comment commands\n"));
+	printf(_("  --no-data                    do not dump data\n"));
 	printf(_("  --no-publications            do not dump publications\n"));
+	printf(_("  --no-schema                  do not dump schema\n"));
 	printf(_("  --no-security-labels         do not dump security label assignments\n"));
+	printf(_("  --no-statistics              do not dump statistics\n"));
 	printf(_("  --no-subscriptions           do not dump subscriptions\n"));
 	printf(_("  --no-table-access-method     do not dump table access methods\n"));
 	printf(_("  --no-tablespaces             do not dump tablespace assignments\n"));
@@ -6778,6 +6804,43 @@ getFuncs(Archive *fout)
 	destroyPQExpBuffer(query);
 }
 
+/*
+ * getRelationStatistics
+ *    register the statistics object as a dependent of the relation.
+ *
+ */
+static RelStatsInfo *
+getRelationStatistics(Archive *fout, DumpableObject *rel, char relkind)
+{
+	if ((relkind == RELKIND_RELATION) ||
+		(relkind == RELKIND_PARTITIONED_TABLE) ||
+		(relkind == RELKIND_INDEX) ||
+		(relkind == RELKIND_PARTITIONED_INDEX) ||
+		(relkind == RELKIND_MATVIEW))
+	{
+		RelStatsInfo	   *info = pg_malloc0(sizeof(RelStatsInfo));
+		DumpableObject	   *dobj = &info->dobj;
+
+		dobj->objType = DO_REL_STATS;
+		dobj->catId.tableoid = 0;
+		dobj->catId.oid = 0;
+		AssignDumpId(dobj);
+		dobj->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
+		dobj->dependencies[0] = rel->dumpId;
+		dobj->nDeps = 1;
+		dobj->allocDeps = 1;
+		dobj->components |= DUMP_COMPONENT_STATISTICS;
+		dobj->dump = rel->dump;
+		dobj->name = pg_strdup(rel->name);
+		dobj->namespace = rel->namespace;
+		info->relkind = relkind;
+		info->postponed_def = false;
+
+		return info;
+	}
+	return NULL;
+}
+
 /*
  * getTables
  *	  read all the tables (no indexes) in the system catalogs,
@@ -7203,6 +7266,8 @@ getTables(Archive *fout, int *numTables)
 				}
 			}
 		}
+		if (tblinfo[i].interesting || dopt->dumpStatistics)
+			getRelationStatistics(fout, &tblinfo[i].dobj, tblinfo[i].relkind);
 	}
 
 	if (query->len != 0)
@@ -7649,11 +7714,14 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
 		for (int c = 0; c < numinds; c++, j++)
 		{
 			char		contype;
+			char		indexkind;
+			RelStatsInfo   *relstats;
 
 			indxinfo[j].dobj.objType = DO_INDEX;
 			indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
 			indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
 			AssignDumpId(&indxinfo[j].dobj);
+			indxinfo[j].dobj.components |= DUMP_COMPONENT_STATISTICS;
 			indxinfo[j].dobj.dump = tbinfo->dobj.dump;
 			indxinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_indexname));
 			indxinfo[j].dobj.namespace = tbinfo->dobj.namespace;
@@ -7676,7 +7744,14 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
 			{
 				NULL, NULL
 			};
+
+			if (indxinfo[j].parentidx == 0)
+				indexkind = RELKIND_INDEX;
+			else
+				indexkind = RELKIND_PARTITIONED_INDEX;
+
 			contype = *(PQgetvalue(res, j, i_contype));
+			relstats = getRelationStatistics(fout, &indxinfo[j].dobj, indexkind);
 
 			if (contype == 'p' || contype == 'u' || contype == 'x')
 			{
@@ -7710,6 +7785,8 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
 				constrinfo->separate = true;
 
 				indxinfo[j].indexconstraint = constrinfo->dobj.dumpId;
+				if (relstats != NULL)
+					addObjectDependency(&relstats->dobj, constrinfo->dobj.dumpId);
 			}
 			else
 			{
@@ -10297,6 +10374,296 @@ dumpComment(Archive *fout, const char *type,
 						catalogId, subid, dumpId, NULL);
 }
 
+/*
+ * Tabular description of the parameters to pg_restore_relation_stats()
+ * param_name, param_type
+ */
+static const char *rel_stats_arginfo[][2] = {
+	{"relation", "regclass"},
+	{"version", "integer"},
+	{"relpages", "integer"},
+	{"reltuples", "real"},
+	{"relallvisible", "integer"},
+};
+
+/*
+ * Tabular description of the parameters to pg_restore_attribute_stats()
+ * param_name, param_type
+ */
+static const char *att_stats_arginfo[][2] = {
+	{"relation", "regclass"},
+	{"attname", "name"},
+	{"inherited", "boolean"},
+	{"version", "integer"},
+	{"null_frac", "float4"},
+	{"avg_width", "integer"},
+	{"n_distinct", "float4"},
+	{"most_common_vals", "text"},
+	{"most_common_freqs", "float4[]"},
+	{"histogram_bounds", "text"},
+	{"correlation", "float4"},
+	{"most_common_elems", "text"},
+	{"most_common_elem_freqs", "float4[]"},
+	{"elem_count_histogram", "float4[]"},
+	{"range_length_histogram", "text"},
+	{"range_empty_frac", "float4"},
+	{"range_bounds_histogram", "text"},
+};
+
+/*
+ * getRelStatsExportQuery --
+ *
+ * Generate a query that will fetch all relation (e.g. pg_class)
+ * stats for a given relation.
+ */
+static void
+getRelStatsExportQuery(PQExpBuffer query, Archive *fout,
+					   const char *schemaname, const char *relname)
+{
+	resetPQExpBuffer(query);
+	appendPQExpBufferStr(query,
+						 "SELECT c.oid::regclass AS relation, "
+						 "current_setting('server_version_num') AS version, "
+						 "c.relpages, c.reltuples, c.relallvisible "
+						 "FROM pg_class c "
+						 "JOIN pg_namespace n "
+						 "ON n.oid = c.relnamespace "
+						 "WHERE n.nspname = ");
+	appendStringLiteralAH(query, schemaname, fout);
+	appendPQExpBufferStr(query, " AND c.relname = ");
+	appendStringLiteralAH(query, relname, fout);
+}
+
+/*
+ * getAttStatsExportQuery --
+ *
+ * Generate a query that will fetch all attribute (e.g. pg_statistic)
+ * stats for a given relation.
+ */
+static void
+getAttStatsExportQuery(PQExpBuffer query, Archive *fout,
+					   const char *schemaname, const char *relname)
+{
+	resetPQExpBuffer(query);
+	appendPQExpBufferStr(query,
+						 "SELECT c.oid::regclass AS relation, "
+						 "s.attname,"
+						 "s.inherited,"
+						 "current_setting('server_version_num') AS version, "
+						 "s.null_frac,"
+						 "s.avg_width,"
+						 "s.n_distinct,"
+						 "s.most_common_vals,"
+						 "s.most_common_freqs,"
+						 "s.histogram_bounds,"
+						 "s.correlation,"
+						 "s.most_common_elems,"
+						 "s.most_common_elem_freqs,"
+						 "s.elem_count_histogram,");
+
+	if (fout->remoteVersion >= 170000)
+		appendPQExpBufferStr(query,
+							 "s.range_length_histogram,"
+							 "s.range_empty_frac,"
+							 "s.range_bounds_histogram ");
+	else
+		appendPQExpBufferStr(query,
+							 "NULL AS range_length_histogram,"
+							 "NULL AS range_empty_frac,"
+							 "NULL AS range_bounds_histogram ");
+
+	appendPQExpBufferStr(query,
+						 "FROM pg_stats s "
+						 "JOIN pg_namespace n "
+						 "ON n.nspname = s.schemaname "
+						 "JOIN pg_class c "
+						 "ON c.relname = s.tablename "
+						 "AND c.relnamespace = n.oid "
+						 "WHERE s.schemaname = ");
+	appendStringLiteralAH(query, schemaname, fout);
+	appendPQExpBufferStr(query, " AND s.tablename = ");
+	appendStringLiteralAH(query, relname, fout);
+	appendPQExpBufferStr(query, " ORDER BY s.attname, s.inherited");
+}
+
+
+/*
+ * appendNamedArgument --
+ *
+ * Convenience routine for constructing parameters of the form:
+ * 'paraname', 'value'::type
+ */
+static void
+appendNamedArgument(PQExpBuffer out, Archive *fout, const char *argname,
+					const char *argval, const char *argtype)
+{
+	appendPQExpBufferStr(out, "\t");
+
+	appendStringLiteralAH(out, argname, fout);
+	appendPQExpBufferStr(out, ", ");
+
+	appendStringLiteralAH(out, argval, fout);
+	appendPQExpBuffer(out, "::%s", argtype);
+}
+
+/*
+ * appendRelStatsImport --
+ *
+ * Append a formatted pg_restore_relation_stats statement.
+ */
+static void
+appendRelStatsImport(PQExpBuffer out, Archive *fout, PGresult *res)
+{
+	const char *sep = "";
+
+	if (PQntuples(res) == 0)
+		return;
+
+	appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_relation_stats(\n");
+
+	for (int argno = 0; argno < lengthof(rel_stats_arginfo); argno++)
+	{
+		const char *argname = rel_stats_arginfo[argno][0];
+		const char *argtype = rel_stats_arginfo[argno][1];
+		int			fieldno = PQfnumber(res, argname);
+
+		if (fieldno < 0)
+			pg_fatal("relation stats export query missing field '%s'",
+					 argname);
+
+		if (PQgetisnull(res, 0, fieldno))
+			continue;
+
+		appendPQExpBufferStr(out, sep);
+		appendNamedArgument(out, fout, argname, PQgetvalue(res, 0, fieldno), argtype);
+
+		sep = ",\n";
+	}
+	appendPQExpBufferStr(out, "\n);\n");
+}
+
+/*
+ * appendAttStatsImport --
+ *
+ * Append a series of formatted pg_restore_attribute_stats statements.
+ */
+static void
+appendAttStatsImport(PQExpBuffer out, Archive *fout, PGresult *res)
+{
+	for (int rownum = 0; rownum < PQntuples(res); rownum++)
+	{
+		const char *sep = "";
+
+		appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_attribute_stats(\n");
+		for (int argno = 0; argno < lengthof(att_stats_arginfo); argno++)
+		{
+			const char *argname = att_stats_arginfo[argno][0];
+			const char *argtype = att_stats_arginfo[argno][1];
+			int			fieldno = PQfnumber(res, argname);
+
+			if (fieldno < 0)
+				pg_fatal("attribute stats export query missing field '%s'",
+						 argname);
+
+			if (PQgetisnull(res, rownum, fieldno))
+				continue;
+
+			appendPQExpBufferStr(out, sep);
+			appendNamedArgument(out, fout, argname, PQgetvalue(res, rownum, fieldno), argtype);
+			sep = ",\n";
+		}
+		appendPQExpBufferStr(out, "\n);\n");
+	}
+}
+
+/*
+ * Decide which section to use based on the relkind of the parent object.
+ *
+ * NB: materialized views may be postponed from SECTION_PRE_DATA to
+ * SECTION_POST_DATA to resolve some kinds of dependency problems. If so, the
+ * matview stats will also be postponed to SECTION_POST_DATA. See
+ * repairMatViewBoundaryMultiLoop().
+ */
+static teSection
+statisticsDumpSection(const RelStatsInfo *rsinfo)
+{
+	switch (rsinfo->relkind)
+	{
+		case RELKIND_RELATION:
+		case RELKIND_PARTITIONED_TABLE:
+		case RELKIND_MATVIEW:
+			return SECTION_DATA;
+		case RELKIND_INDEX:
+		case RELKIND_PARTITIONED_INDEX:
+			return SECTION_POST_DATA;
+		default:
+			pg_fatal("cannot dump statistics for relation kind '%c'",
+					 rsinfo->relkind);
+	}
+
+	return 0;				/* keep compiler quiet */
+}
+
+/*
+ * dumpRelationStats --
+ *
+ * Dump command to import stats into the relation on the new database.
+ */
+static void
+dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo)
+{
+	PGresult	   *res;
+	PQExpBuffer		query;
+	PQExpBuffer		out;
+	PQExpBuffer		tag;
+	DumpableObject *dobj = (DumpableObject *) &rsinfo->dobj;
+	DumpId		   *deps = NULL;
+	int				ndeps = 0;
+
+	/* nothing to do if we are not dumping statistics */
+	if (!fout->dopt->dumpStatistics)
+		return;
+
+	/* dependent on the relation definition, if doing schema */
+	if (fout->dopt->dumpSchema)
+	{
+		deps = dobj->dependencies;
+		ndeps = dobj->nDeps;
+	}
+
+	tag = createPQExpBuffer();
+	appendPQExpBuffer(tag, "%s %s", "STATISTICS DATA", fmtId(dobj->name));
+
+	query = createPQExpBuffer();
+	out = createPQExpBuffer();
+
+	getRelStatsExportQuery(query, fout, dobj->namespace->dobj.name,
+						   dobj->name);
+	res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
+	appendRelStatsImport(out, fout, res);
+	PQclear(res);
+
+	getAttStatsExportQuery(query, fout, dobj->namespace->dobj.name,
+						   dobj->name);
+	res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
+	appendAttStatsImport(out, fout, res);
+	PQclear(res);
+
+	ArchiveEntry(fout, nilCatalogId, createDumpId(),
+				 ARCHIVE_OPTS(.tag = tag->data,
+							  .namespace = dobj->namespace->dobj.name,
+							  .description = "STATISTICS DATA",
+							  .section = rsinfo->postponed_def ?
+								SECTION_POST_DATA :  statisticsDumpSection(rsinfo),
+							  .createStmt = out->data,
+							  .deps = deps,
+							  .nDeps = ndeps));
+
+	destroyPQExpBuffer(query);
+	destroyPQExpBuffer(out);
+	destroyPQExpBuffer(tag);
+}
+
 /*
  * dumpTableComment --
  *
@@ -10745,6 +11112,9 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj)
 		case DO_SUBSCRIPTION_REL:
 			dumpSubscriptionTable(fout, (const SubRelInfo *) dobj);
 			break;
+		case DO_REL_STATS:
+			dumpRelationStats(fout, (const RelStatsInfo *) dobj);
+			break;
 		case DO_PRE_DATA_BOUNDARY:
 		case DO_POST_DATA_BOUNDARY:
 			/* never dumped, nothing to do */
@@ -18971,6 +19341,16 @@ addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
 				/* must come after the pre-data boundary */
 				addObjectDependency(dobj, preDataBound->dumpId);
 				break;
+			case DO_REL_STATS:
+				/* stats section varies by parent object type, DATA or POST */
+				if (statisticsDumpSection((RelStatsInfo *) dobj) == SECTION_DATA)
+				{
+					addObjectDependency(dobj, preDataBound->dumpId);
+					addObjectDependency(postDataBound, dobj->dumpId);
+				}
+				else
+					addObjectDependency(dobj, postDataBound->dumpId);
+				break;
 		}
 	}
 }
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 7139c88a69..410c162a85 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -83,10 +83,13 @@ typedef enum
 	DO_PUBLICATION,
 	DO_PUBLICATION_REL,
 	DO_PUBLICATION_TABLE_IN_SCHEMA,
+	DO_REL_STATS,
 	DO_SUBSCRIPTION,
 	DO_SUBSCRIPTION_REL,		/* see note for SubRelInfo */
 } DumpableObjectType;
 
+#define NUM_DUMPABLE_OBJECT_TYPES (DO_SUBSCRIPTION_REL + 1)
+
 /*
  * DumpComponents is a bitmask of the potentially dumpable components of
  * a database object: its core definition, plus optional attributes such
@@ -110,6 +113,7 @@ typedef uint32 DumpComponents;
 #define DUMP_COMPONENT_ACL			(1 << 4)
 #define DUMP_COMPONENT_POLICY		(1 << 5)
 #define DUMP_COMPONENT_USERMAP		(1 << 6)
+#define DUMP_COMPONENT_STATISTICS	(1 << 7)
 #define DUMP_COMPONENT_ALL			(0xFFFF)
 
 /*
@@ -430,6 +434,13 @@ typedef struct _indexAttachInfo
 	IndxInfo   *partitionIdx;	/* link to index on partition */
 } IndexAttachInfo;
 
+typedef struct _relStatsInfo
+{
+	DumpableObject	dobj;
+	char			relkind;		/* 'r', 'm', 'i', etc */
+	bool			postponed_def;  /* stats must be postponed into post-data */
+} RelStatsInfo;
+
 typedef struct _statsExtInfo
 {
 	DumpableObject dobj;
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index dc9a28924b..201eeedde7 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -81,6 +81,7 @@ enum dbObjectTypePriorities
 	PRIO_TABLE_DATA,
 	PRIO_SEQUENCE_SET,
 	PRIO_LARGE_OBJECT_DATA,
+	PRIO_STATISTICS_DATA_DATA,
 	PRIO_POST_DATA_BOUNDARY,	/* boundary! */
 	PRIO_CONSTRAINT,
 	PRIO_INDEX,
@@ -148,11 +149,12 @@ static const int dbObjectTypePriority[] =
 	[DO_PUBLICATION] = PRIO_PUBLICATION,
 	[DO_PUBLICATION_REL] = PRIO_PUBLICATION_REL,
 	[DO_PUBLICATION_TABLE_IN_SCHEMA] = PRIO_PUBLICATION_TABLE_IN_SCHEMA,
+	[DO_REL_STATS] = PRIO_STATISTICS_DATA_DATA,
 	[DO_SUBSCRIPTION] = PRIO_SUBSCRIPTION,
 	[DO_SUBSCRIPTION_REL] = PRIO_SUBSCRIPTION_REL,
 };
 
-StaticAssertDecl(lengthof(dbObjectTypePriority) == (DO_SUBSCRIPTION_REL + 1),
+StaticAssertDecl(lengthof(dbObjectTypePriority) == NUM_DUMPABLE_OBJECT_TYPES,
 				 "array length mismatch");
 
 static DumpId preDataBoundId;
@@ -801,11 +803,21 @@ repairMatViewBoundaryMultiLoop(DumpableObject *boundaryobj,
 {
 	/* remove boundary's dependency on object after it in loop */
 	removeObjectDependency(boundaryobj, nextobj->dumpId);
-	/* if that object is a matview, mark it as postponed into post-data */
+	/*
+	 * If that object is a matview or matview status, mark it as postponed into
+	 * post-data.
+	 */
 	if (nextobj->objType == DO_TABLE)
 	{
 		TableInfo  *nextinfo = (TableInfo *) nextobj;
 
+		if (nextinfo->relkind == RELKIND_MATVIEW)
+			nextinfo->postponed_def = true;
+	}
+	else if (nextobj->objType == DO_REL_STATS)
+	{
+		RelStatsInfo *nextinfo = (RelStatsInfo *) nextobj;
+
 		if (nextinfo->relkind == RELKIND_MATVIEW)
 			nextinfo->postponed_def = true;
 	}
@@ -1018,6 +1030,21 @@ repairDependencyLoop(DumpableObject **loop,
 					{
 						DumpableObject *nextobj;
 
+						nextobj = (j < nLoop - 1) ? loop[j + 1] : loop[0];
+						repairMatViewBoundaryMultiLoop(loop[j], nextobj);
+						return;
+					}
+				}
+			}
+			else if (loop[i]->objType == DO_REL_STATS &&
+					 ((RelStatsInfo *) loop[i])->relkind == RELKIND_MATVIEW)
+			{
+				for (j = 0; j < nLoop; j++)
+				{
+					if (loop[j]->objType == DO_POST_DATA_BOUNDARY)
+					{
+						DumpableObject *nextobj;
+
 						nextobj = (j < nLoop - 1) ? loop[j + 1] : loop[0];
 						repairMatViewBoundaryMultiLoop(loop[j], nextobj);
 						return;
@@ -1500,6 +1527,11 @@ describeDumpableObject(DumpableObject *obj, char *buf, int bufsize)
 					 "POST-DATA BOUNDARY  (ID %d)",
 					 obj->dumpId);
 			return;
+		case DO_REL_STATS:
+			snprintf(buf, bufsize,
+					 "RELATION STATISTICS FOR %s  (ID %d OID %u)",
+					 obj->name, obj->dumpId, obj->catId.oid);
+			return;
 	}
 	/* shouldn't get here */
 	snprintf(buf, bufsize,
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 396f79781c..7effb70490 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -103,6 +103,7 @@ static int	use_setsessauth = 0;
 static int	no_comments = 0;
 static int	no_publications = 0;
 static int	no_security_labels = 0;
+static int	no_statistics = 0;
 static int	no_subscriptions = 0;
 static int	no_toast_compression = 0;
 static int	no_unlogged_table_data = 0;
@@ -172,6 +173,7 @@ main(int argc, char *argv[])
 		{"no-role-passwords", no_argument, &no_role_passwords, 1},
 		{"no-security-labels", no_argument, &no_security_labels, 1},
 		{"no-subscriptions", no_argument, &no_subscriptions, 1},
+		{"no-statistics", no_argument, &no_statistics, 1},
 		{"no-sync", no_argument, NULL, 4},
 		{"no-toast-compression", no_argument, &no_toast_compression, 1},
 		{"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1},
@@ -451,6 +453,8 @@ main(int argc, char *argv[])
 		appendPQExpBufferStr(pgdumpopts, " --no-publications");
 	if (no_security_labels)
 		appendPQExpBufferStr(pgdumpopts, " --no-security-labels");
+	if (no_statistics)
+		appendPQExpBufferStr(pgdumpopts, " --no-statistics");
 	if (no_subscriptions)
 		appendPQExpBufferStr(pgdumpopts, " --no-subscriptions");
 	if (no_toast_compression)
@@ -666,6 +670,7 @@ help(void)
 	printf(_("  --no-publications            do not dump publications\n"));
 	printf(_("  --no-role-passwords          do not dump passwords for roles\n"));
 	printf(_("  --no-security-labels         do not dump security label assignments\n"));
+	printf(_("  --no-statistics              do not dump statistics\n"));
 	printf(_("  --no-subscriptions           do not dump subscriptions\n"));
 	printf(_("  --no-sync                    do not wait for changes to be written safely to disk\n"));
 	printf(_("  --no-table-access-method     do not dump table access methods\n"));
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index c602272d7d..02af89bae1 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -63,6 +63,9 @@ main(int argc, char **argv)
 	int			numWorkers = 1;
 	Archive    *AH;
 	char	   *inputFileSpec;
+	bool		data_only = false;
+	bool		schema_only = false;
+	bool		statistics_only = false;
 	static int	disable_triggers = 0;
 	static int	enable_row_security = 0;
 	static int	if_exists = 0;
@@ -71,12 +74,13 @@ main(int argc, char **argv)
 	static int	outputNoTablespaces = 0;
 	static int	use_setsessauth = 0;
 	static int	no_comments = 0;
+	static int	no_data = 0;
 	static int	no_publications = 0;
+	static int	no_schema = 0;
 	static int	no_security_labels = 0;
+	static int  no_statistics = 0;
 	static int	no_subscriptions = 0;
 	static int	strict_names = 0;
-	bool		data_only = false;
-	bool		schema_only = false;
 
 	struct option cmdopts[] = {
 		{"clean", 0, NULL, 'c'},
@@ -108,6 +112,7 @@ main(int argc, char **argv)
 		{"username", 1, NULL, 'U'},
 		{"verbose", 0, NULL, 'v'},
 		{"single-transaction", 0, NULL, '1'},
+		{"statistics-only", no_argument, NULL, 'X'},
 
 		/*
 		 * the following options don't have an equivalent short option letter
@@ -124,9 +129,12 @@ main(int argc, char **argv)
 		{"transaction-size", required_argument, NULL, 5},
 		{"use-set-session-authorization", no_argument, &use_setsessauth, 1},
 		{"no-comments", no_argument, &no_comments, 1},
+		{"no-data", no_argument, &no_data, 1},
 		{"no-publications", no_argument, &no_publications, 1},
+		{"no-schema", no_argument, &no_schema, 1},
 		{"no-security-labels", no_argument, &no_security_labels, 1},
 		{"no-subscriptions", no_argument, &no_subscriptions, 1},
+		{"no-statistics", no_argument, &no_statistics, 1},
 		{"filter", required_argument, NULL, 4},
 
 		{NULL, 0, NULL, 0}
@@ -271,6 +279,10 @@ main(int argc, char **argv)
 				opts->aclsSkip = 1;
 				break;
 
+			case 'X':			/* Restore statistics only */
+				statistics_only = true;
+				break;
+
 			case '1':			/* Restore data in a single transaction */
 				opts->single_txn = true;
 				opts->exit_on_error = true;
@@ -343,6 +355,10 @@ main(int argc, char **argv)
 
 	if (data_only && schema_only)
 		pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
+	if (data_only && statistics_only)
+		pg_fatal("options -a/--data-only and -X/--statistics-only cannot be used together");
+	if (schema_only && statistics_only)
+		pg_fatal("options -s/--schema-only and -X/--statistics-only cannot be used together");
 
 	if (data_only && opts->dropSchema)
 		pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
@@ -362,8 +378,9 @@ main(int argc, char **argv)
 		pg_fatal("cannot specify both --single-transaction and multiple jobs");
 
 	/* set derivative flags */
-	opts->dumpSchema = (!data_only);
-	opts->dumpData = (!schema_only);
+	opts->dumpData = data_only || (!no_data && !schema_only && !statistics_only);
+	opts->dumpSchema = schema_only || (!no_schema && !data_only && !statistics_only);
+	opts->dumpStatistics = statistics_only || (!no_statistics && !data_only && !schema_only);
 
 	opts->disable_triggers = disable_triggers;
 	opts->enable_row_security = enable_row_security;
@@ -375,6 +392,8 @@ main(int argc, char **argv)
 	opts->no_publications = no_publications;
 	opts->no_security_labels = no_security_labels;
 	opts->no_subscriptions = no_subscriptions;
+	opts->no_statistics = no_statistics;
+	opts->no_data = no_data;
 
 	if (if_exists && !opts->dropSchema)
 		pg_fatal("option --if-exists requires option -c/--clean");
@@ -482,6 +501,7 @@ usage(const char *progname)
 	printf(_("  -t, --table=NAME             restore named relation (table, view, etc.)\n"));
 	printf(_("  -T, --trigger=NAME           restore named trigger\n"));
 	printf(_("  -x, --no-privileges          skip restoration of access privileges (grant/revoke)\n"));
+	printf(_("  -X, --statistics-only        restore only the statistics, not schema or data\n"));
 	printf(_("  -1, --single-transaction     restore as a single transaction\n"));
 	printf(_("  --disable-triggers           disable triggers during data-only restore\n"));
 	printf(_("  --enable-row-security        enable row security\n"));
@@ -489,10 +509,13 @@ usage(const char *progname)
 			 "                               in FILENAME\n"));
 	printf(_("  --if-exists                  use IF EXISTS when dropping objects\n"));
 	printf(_("  --no-comments                do not restore comment commands\n"));
+	printf(_("  --no-data                    do not restore data\n"));
 	printf(_("  --no-data-for-failed-tables  do not restore data of tables that could not be\n"
 			 "                               created\n"));
 	printf(_("  --no-publications            do not restore publications\n"));
+	printf(_("  --no-schema                  do not restore schema\n"));
 	printf(_("  --no-security-labels         do not restore security labels\n"));
+	printf(_("  --no-statistics              do not restore statistics\n"));
 	printf(_("  --no-subscriptions           do not restore subscriptions\n"));
 	printf(_("  --no-table-access-method     do not restore table access methods\n"));
 	printf(_("  --no-tablespaces             do not restore tablespace assignments\n"));
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 214240f1ae..f29da06ed2 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -50,12 +50,30 @@ command_fails_like(
 	'pg_dump: options -s/--schema-only and -a/--data-only cannot be used together'
 );
 
+command_fails_like(
+	[ 'pg_dump', '-s', '-X' ],
+	qr/\Qpg_dump: error: options -s\/--schema-only and -X\/--statistics-only cannot be used together\E/,
+	'pg_dump: error: options -s/--schema-only and -X/--statistics-only cannot be used together'
+);
+
+command_fails_like(
+	[ 'pg_dump', '-a', '-X' ],
+	qr/\Qpg_dump: error: options -a\/--data-only and -X\/--statistics-only cannot be used together\E/,
+	'pg_dump: error: options -a/--data-only and -X/--statistics-only cannot be used together'
+);
+
 command_fails_like(
 	[ 'pg_dump', '-s', '--include-foreign-data=xxx' ],
 	qr/\Qpg_dump: error: options -s\/--schema-only and --include-foreign-data cannot be used together\E/,
 	'pg_dump: options -s/--schema-only and --include-foreign-data cannot be used together'
 );
 
+command_fails_like(
+	[ 'pg_dump', '--statistics-only', '--no-statistics' ],
+	qr/\Qpg_dump: error: options -X\/--statistics-only and --no-statistics cannot be used together\E/,
+	'pg_dump: options -X\/--statistics-only and --no-statistics cannot be used together'
+);
+
 command_fails_like(
 	[ 'pg_dump', '-j2', '--include-foreign-data=xxx' ],
 	qr/\Qpg_dump: error: option --include-foreign-data is not supported with parallel backup\E/,
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 805ba9f49f..d5f0f92dfa 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -65,7 +65,7 @@ my %pgdump_runs = (
 			'--format' => 'custom',
 			'--file' => "$tempdir/binary_upgrade.dump",
 			'--no-password',
-			'--schema-only',
+			'--no-data',
 			'--binary-upgrade',
 			'--dbname' => 'postgres',    # alternative way to specify database
 		],
@@ -710,6 +710,39 @@ my %pgdump_runs = (
 			'--no-large-objects',
 			'postgres',
 		],
+	},
+	no_statistics => {
+		dump_cmd => [
+			'pg_dump', '--no-sync',
+			"--file=$tempdir/no_statistics.sql",
+			'--no-statistics',
+			'postgres',
+		],
+	},
+	no_data_no_schema => {
+		dump_cmd => [
+			'pg_dump', '--no-sync',
+			"--file=$tempdir/no_data_no_schema.sql",
+			'--no-data',
+			'--no-schema',
+			'postgres',
+		],
+	},
+	statistics_only => {
+		dump_cmd => [
+			'pg_dump', '--no-sync',
+			"--file=$tempdir/statistics_only.sql",
+			'--statistics-only',
+			'postgres',
+		],
+	},
+	no_schema => {
+		dump_cmd => [
+			'pg_dump', '--no-sync',
+			"--file=$tempdir/no_schema.sql",
+			'--no-schema',
+			'postgres',
+		],
 	},);
 
 ###############################################################
@@ -776,6 +809,7 @@ my %full_runs = (
 	no_large_objects => 1,
 	no_owner => 1,
 	no_privs => 1,
+	no_statistics => 1,
 	no_table_access_method => 1,
 	pg_dumpall_dbprivs => 1,
 	pg_dumpall_exclude => 1,
@@ -977,6 +1011,7 @@ my %tests = (
 			column_inserts => 1,
 			data_only => 1,
 			inserts => 1,
+			no_schema => 1,
 			section_data => 1,
 			test_schema_plus_large_objects => 1,
 		},
@@ -1390,6 +1425,7 @@ my %tests = (
 			column_inserts => 1,
 			data_only => 1,
 			inserts => 1,
+			no_schema => 1,
 			section_data => 1,
 			test_schema_plus_large_objects => 1,
 		},
@@ -1411,6 +1447,7 @@ my %tests = (
 			column_inserts => 1,
 			data_only => 1,
 			inserts => 1,
+			no_schema => 1,
 			section_data => 1,
 			test_schema_plus_large_objects => 1,
 		},
@@ -1432,6 +1469,7 @@ my %tests = (
 			column_inserts => 1,
 			data_only => 1,
 			inserts => 1,
+			no_schema => 1,
 			section_data => 1,
 			test_schema_plus_large_objects => 1,
 		},
@@ -1598,6 +1636,7 @@ my %tests = (
 			column_inserts => 1,
 			data_only => 1,
 			inserts => 1,
+			no_schema => 1,
 			section_data => 1,
 			test_schema_plus_large_objects => 1,
 		},
@@ -1751,6 +1790,7 @@ my %tests = (
 			%full_runs,
 			%dump_test_schema_runs,
 			data_only => 1,
+			no_schema => 1,
 			only_dump_test_table => 1,
 			section_data => 1,
 		},
@@ -1778,6 +1818,7 @@ my %tests = (
 			data_only => 1,
 			exclude_test_table => 1,
 			exclude_test_table_data => 1,
+			no_schema => 1,
 			section_data => 1,
 		},
 		unlike => {
@@ -1798,7 +1839,10 @@ my %tests = (
 			\QCOPY dump_test.fk_reference_test_table (col1) FROM stdin;\E
 			\n(?:\d\n){5}\\\.\n
 			/xms,
-		like => { data_only => 1, },
+		like => {
+			data_only => 1,
+			no_schema => 1,
+		},
 	},
 
 	'COPY test_second_table' => {
@@ -1814,6 +1858,7 @@ my %tests = (
 			%full_runs,
 			%dump_test_schema_runs,
 			data_only => 1,
+			no_schema => 1,
 			section_data => 1,
 		},
 		unlike => {
@@ -1836,6 +1881,7 @@ my %tests = (
 			%full_runs,
 			%dump_test_schema_runs,
 			data_only => 1,
+			no_schema => 1,
 			section_data => 1,
 		},
 		unlike => {
@@ -1859,6 +1905,7 @@ my %tests = (
 			%full_runs,
 			%dump_test_schema_runs,
 			data_only => 1,
+			no_schema => 1,
 			section_data => 1,
 		},
 		unlike => {
@@ -1881,6 +1928,7 @@ my %tests = (
 			%full_runs,
 			%dump_test_schema_runs,
 			data_only => 1,
+			no_schema => 1,
 			section_data => 1,
 		},
 		unlike => {
@@ -1903,6 +1951,7 @@ my %tests = (
 			%full_runs,
 			%dump_test_schema_runs,
 			data_only => 1,
+			no_schema => 1,
 			section_data => 1,
 		},
 		unlike => {
@@ -3299,6 +3348,7 @@ my %tests = (
 		like => {
 			%full_runs,
 			data_only => 1,
+			no_schema => 1,
 			section_data => 1,
 			only_dump_test_schema => 1,
 			test_schema_plus_large_objects => 1,
@@ -3469,6 +3519,7 @@ my %tests = (
 			%full_runs,
 			%dump_test_schema_runs,
 			data_only => 1,
+			no_schema => 1,
 			only_dump_measurement => 1,
 			section_data => 1,
 			only_dump_test_schema => 1,
@@ -4351,6 +4402,7 @@ my %tests = (
 			column_inserts => 1,
 			data_only => 1,
 			inserts => 1,
+			no_schema => 1,
 			section_data => 1,
 			test_schema_plus_large_objects => 1,
 			binary_upgrade => 1,
@@ -4651,6 +4703,61 @@ my %tests = (
 		},
 	},
 
+	#
+	# TABLE and MATVIEW stats will end up in SECTION_DATA.
+	# INDEX stats (expression columns only) will end up in SECTION_POST_DATA.
+	#
+	'statistics_import' => {
+		create_sql => '
+			CREATE TABLE dump_test.has_stats
+			AS SELECT g.g AS x, g.g / 2 AS y FROM generate_series(1,100) AS g(g);
+			CREATE MATERIALIZED VIEW dump_test.has_stats_mv AS SELECT * FROM dump_test.has_stats;
+			CREATE INDEX dup_test_post_data_ix ON dump_test.has_stats((x - 1));
+			ANALYZE dump_test.has_stats, dump_test.has_stats_mv;',
+		regexp => qr/pg_catalog.pg_restore_attribute_stats/,
+		like => {
+			%full_runs,
+			%dump_test_schema_runs,
+			no_data_no_schema => 1,
+			no_schema => 1,
+			section_data => 1,
+			section_post_data => 1,
+			statistics_only => 1,
+			},
+		unlike => {
+			exclude_dump_test_schema => 1,
+			no_statistics => 1,
+			only_dump_measurement => 1,
+			schema_only => 1,
+			},
+	},
+
+	#
+	# While attribute stats (aka pg_statistic stats) only appear for tables
+	# that have been analyzed, all tables will have relation stats because
+	# those come from pg_class.
+	#
+	'relstats_on_unanalyzed_tables' => {
+		regexp => qr/pg_catalog.pg_restore_relation_stats/,
+
+		like => {
+			%full_runs,
+			%dump_test_schema_runs,
+			no_data_no_schema => 1,
+			no_schema => 1,
+			only_dump_test_table => 1,
+			role => 1,
+			role_parallel => 1,
+			section_data => 1,
+			section_post_data => 1,
+			statistics_only => 1,
+			},
+		unlike => {
+			no_statistics => 1,
+			schema_only => 1,
+			},
+	},
+
 	# CREATE TABLE with partitioned table and various AMs.  One
 	# partition uses the same default as the parent, and a second
 	# uses its own AM.
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 8ce0fa3020..a29cd2cca9 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -21,10 +21,11 @@ generate_old_dump(void)
 
 	/* run new pg_dumpall binary for globals */
 	exec_prog(UTILITY_LOG_FILE, NULL, true, true,
-			  "\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers "
+			  "\"%s/pg_dumpall\" %s %s --globals-only --quote-all-identifiers "
 			  "--binary-upgrade %s --no-sync -f \"%s/%s\"",
 			  new_cluster.bindir, cluster_conn_opts(&old_cluster),
 			  log_opts.verbose ? "--verbose" : "",
+			  user_opts.do_statistics ? "" : "--no-statistics",
 			  log_opts.dumpdir,
 			  GLOBALS_DUMP_FILE);
 	check_ok();
@@ -52,10 +53,11 @@ generate_old_dump(void)
 		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
 
 		parallel_exec_prog(log_file_name, NULL,
-						   "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers "
+						   "\"%s/pg_dump\" %s --no-data %s --quote-all-identifiers "
 						   "--binary-upgrade --format=custom %s --no-sync --file=\"%s/%s\" %s",
 						   new_cluster.bindir, cluster_conn_opts(&old_cluster),
 						   log_opts.verbose ? "--verbose" : "",
+						   user_opts.do_statistics ? "" : "--no-statistics",
 						   log_opts.dumpdir,
 						   sql_file_name, escaped_connstr.data);
 
diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c
index 108eb7a1ba..3b6c7ec994 100644
--- a/src/bin/pg_upgrade/option.c
+++ b/src/bin/pg_upgrade/option.c
@@ -60,6 +60,8 @@ parseCommandLine(int argc, char *argv[])
 		{"copy", no_argument, NULL, 2},
 		{"copy-file-range", no_argument, NULL, 3},
 		{"sync-method", required_argument, NULL, 4},
+		{"with-statistics", no_argument, NULL, 5},
+		{"no-statistics", no_argument, NULL, 6},
 
 		{NULL, 0, NULL, 0}
 	};
@@ -70,6 +72,7 @@ parseCommandLine(int argc, char *argv[])
 
 	user_opts.do_sync = true;
 	user_opts.transfer_mode = TRANSFER_MODE_COPY;
+	user_opts.do_statistics = true;
 
 	os_info.progname = get_progname(argv[0]);
 
@@ -212,6 +215,13 @@ parseCommandLine(int argc, char *argv[])
 				user_opts.sync_method = pg_strdup(optarg);
 				break;
 
+			case 5:
+				user_opts.do_statistics = true;
+				break;
+			case 6:
+				user_opts.do_statistics = false;
+				break;
+
 			default:
 				fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
 						os_info.progname);
@@ -306,7 +316,9 @@ usage(void)
 	printf(_("  --clone                       clone instead of copying files to new cluster\n"));
 	printf(_("  --copy                        copy files to new cluster (default)\n"));
 	printf(_("  --copy-file-range             copy files to new cluster with copy_file_range\n"));
+	printf(_("  --no-statistics               do not import statistics from old cluster\n"));
 	printf(_("  --sync-method=METHOD          set method for syncing files to disk\n"));
+	printf(_("  --with-statistics             import statistics from old cluster (default)\n"));
 	printf(_("  -?, --help                    show this help, then exit\n"));
 	printf(_("\n"
 			 "Before running pg_upgrade you must:\n"
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index 0cdd675e4f..3fe111fbde 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -327,6 +327,7 @@ typedef struct
 	int			jobs;			/* number of processes/threads to use */
 	char	   *socketdir;		/* directory to use for Unix sockets */
 	char	   *sync_method;
+	bool		do_statistics;	/* carry over statistics from old cluster */
 } UserOpts;
 
 typedef struct
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index 24fcc76d72..ecf7d3d632 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -123,7 +123,7 @@ PostgreSQL documentation
       <term><option>--data-only</option></term>
       <listitem>
        <para>
-        Dump only the data, not the schema (data definitions).
+        Dump only the data, not the schema (data definitions) or statistics.
         Table data, large objects, and sequence values are dumped.
        </para>
 
@@ -141,13 +141,11 @@ PostgreSQL documentation
       <listitem>
        <para>
         Include large objects in the dump.  This is the default behavior
-        except when <option>--schema</option>, <option>--table</option>, or
-        <option>--schema-only</option> is specified.  The <option>-b</option>
-        switch is therefore only useful to add large objects to dumps
-        where a specific schema or table has been requested.  Note that
-        large objects are considered data and therefore will be included when
-        <option>--data-only</option> is used, but not
-        when <option>--schema-only</option> is.
+        except when <option>--schema</option>, <option>--table</option>,
+        <option>--schema-only</option>, or <option>--statistics-only</option>, or
+        <option>--no-data</option> is specified.  The <option>-b</option>
+        switch is therefore only useful to add large objects to dumps where a
+        specific schema or table has been requested.
        </para>
       </listitem>
      </varlistentry>
@@ -516,10 +514,11 @@ PostgreSQL documentation
       <term><option>--schema-only</option></term>
       <listitem>
        <para>
-        Dump only the object definitions (schema), not data.
+        Dump only the object definitions (schema), not data or statistics.
        </para>
        <para>
-        This option is the inverse of <option>--data-only</option>.
+        This option is mutually exclusive to <option>--data-only</option>
+        and <option>--statistics-only</option>.
         It is similar to, but for historical reasons not identical to,
         specifying
         <option>--section=pre-data --section=post-data</option>.
@@ -652,6 +651,18 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>-X</option></term>
+      <term><option>--statistics-only</option></term>
+      <listitem>
+       <para>
+        Dump only the statistics, not the schema (data definitions) or data.
+        Statistics for tables, materialized views, and indexes are dumped.
+       </para>
+
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>-Z <replaceable class="parameter">level</replaceable></option></term>
       <term><option>-Z <replaceable class="parameter">method</replaceable></option>[:<replaceable>detail</replaceable>]</term>
@@ -824,16 +835,17 @@ PostgreSQL documentation
       <term><option>--exclude-table-data=<replaceable class="parameter">pattern</replaceable></option></term>
       <listitem>
        <para>
-        Do not dump data for any tables matching <replaceable
+        Do not dump data or statistics for any tables matching <replaceable
         class="parameter">pattern</replaceable>. The pattern is
         interpreted according to the same rules as for <option>-t</option>.
         <option>--exclude-table-data</option> can be given more than once to
-        exclude tables matching any of several patterns. This option is
-        useful when you need the definition of a particular table even
-        though you do not need the data in it.
+        exclude tables matching any of several patterns. This option is useful
+        when you need the definition of a particular table even though you do
+        not need the data in it.
        </para>
        <para>
-        To exclude data for all tables in the database, see <option>--schema-only</option>.
+        To exclude data for all tables in the database, see <option>--schema-only</option>
+        or <option>--statistics-only</option>.
        </para>
       </listitem>
      </varlistentry>
@@ -1080,6 +1092,15 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-data</option></term>
+      <listitem>
+       <para>
+        Do not dump data.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-publications</option></term>
       <listitem>
@@ -1098,6 +1119,24 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-schema</option></term>
+      <listitem>
+       <para>
+        Do not dump schema (data definitions).
+       </para>
+      </listitem>
+     </varlistentry>
+
+     <varlistentry>
+      <term><option>--no-statistics</option></term>
+      <listitem>
+       <para>
+        Do not dump statistics.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-subscriptions</option></term>
       <listitem>
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index 39d93c2c0e..3834756973 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -265,6 +265,17 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>-X</option></term>
+      <term><option>--statistics-only</option></term>
+      <listitem>
+       <para>
+        Dump only the statistics, not the schema (data definitions) or data.
+        Statistics for tables, materialized views, and indexes are dumped.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--binary-upgrade</option></term>
       <listitem>
@@ -422,6 +433,15 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-data</option></term>
+      <listitem>
+       <para>
+        Do not dump data.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-publications</option></term>
       <listitem>
@@ -447,6 +467,15 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-schema</option></term>
+      <listitem>
+       <para>
+        Do not dump schema (data definitions).
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-security-labels</option></term>
       <listitem>
@@ -456,6 +485,15 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-statistics</option></term>
+      <listitem>
+       <para>
+        Do not dump statistics.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-subscriptions</option></term>
       <listitem>
diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml
index b8b27e1719..3c381db1aa 100644
--- a/doc/src/sgml/ref/pg_restore.sgml
+++ b/doc/src/sgml/ref/pg_restore.sgml
@@ -94,7 +94,7 @@ PostgreSQL documentation
       <term><option>--data-only</option></term>
       <listitem>
        <para>
-        Restore only the data, not the schema (data definitions).
+        Restore only the data, not the schema (data definitions) or statistics.
         Table data, large objects, and sequence values are restored,
         if present in the archive.
        </para>
@@ -483,10 +483,11 @@ PostgreSQL documentation
         to the extent that schema entries are present in the archive.
        </para>
        <para>
-        This option is the inverse of <option>--data-only</option>.
+        This option is mutually exclusive of <option>--data-only</option>
+        and <option>--statistics-only</option>.
         It is similar to, but for historical reasons not identical to,
         specifying
-        <option>--section=pre-data --section=post-data</option>.
+        <option>--section=pre-data --section=post-data --no-statistics</option>.
        </para>
        <para>
         (Do not confuse this with the <option>--schema</option> option, which
@@ -599,6 +600,16 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>-X</option></term>
+      <term><option>--statistics-only</option></term>
+      <listitem>
+       <para>
+        Restore only the statistics, not schema (data definitions) or data.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>-1</option></term>
       <term><option>--single-transaction</option></term>
@@ -681,6 +692,16 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-data</option></term>
+      <listitem>
+       <para>
+        Do not output commands to restore data, even if the archive
+        contains them.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-data-for-failed-tables</option></term>
       <listitem>
@@ -713,6 +734,16 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-schema</option></term>
+      <listitem>
+       <para>
+        Do not output commands to restore schema (data definitions), even if
+        the archive contains them.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-security-labels</option></term>
       <listitem>
@@ -723,6 +754,16 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--no-statistics</option></term>
+      <listitem>
+       <para>
+        Do not output commands to restore statistics, even if the archive
+        contains them.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--no-subscriptions</option></term>
       <listitem>
diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml
index 4777381dac..64a1ebd613 100644
--- a/doc/src/sgml/ref/pgupgrade.sgml
+++ b/doc/src/sgml/ref/pgupgrade.sgml
@@ -145,6 +145,24 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>--with-statistics</option></term>
+      <listitem>
+       <para>
+        Restore statistics from the old cluster into the new cluster.
+       </para>
+      </listitem>
+     </varlistentry>
+
+     <varlistentry>
+      <term><option>--no-statistics</option></term>
+      <listitem>
+       <para>
+        Do not restore statistics from the old cluster into the new cluster.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>-o</option> <replaceable class="parameter">options</replaceable></term>
       <term><option>--old-options</option> <replaceable class="parameter">options</replaceable></term>
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 9a3bee93de..f3351342a2 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -2400,6 +2400,7 @@ RelMapFile
 RelMapping
 RelOptInfo
 RelOptKind
+RelStatsInfo
 RelToCheck
 RelToCluster
 RelabelType
-- 
2.48.1

Reply via email to