Am 03.01.22 um 20:11 schrieb Alvaro Herrera:
On 2022-Jan-03, Gunnar "Nick" Bluth wrote:
9:38 $ git format-patch PGDG/master -v5 -o ..
../v5-0001-ping-pong-of-thougths.patch
../v5-0002-ping-pong-of-thougths.patch
../v5-0003-adds-some-debugging-messages-in-toast_helper.c.patch
...
Hmm, in such cases I would suggest to create a separate branch and then
"git merge --squash" for submission. You can keep your development
branch separate, with other merges if you want.
I've found this to be easier to manage, though I don't always follow
that workflow myself.
Using --stdout does help ;-)
I wonder why "track_toast.sql" test fails on Windows (with "ERROR:
compression method lz4 not supported"), but "compression.sql" doesn't.
Any hints?
Anyway, I shamelessly copied "wait_for_stats()" from the "stats.sql"
file and the tests _should_ now work at least on the platforms with lz4.
v6 attached!
--
Gunnar "Nick" Bluth
Eimermacherweg 106
D-48159 Münster
Mobil +49 172 8853339
Email: gunnar.bl...@pro-open.de
__________________________________________________________________________
"Ceterum censeo SystemD esse delendam" - Cato
From e743587fbd8f6592bbfa15f53733f79c405000e2 Mon Sep 17 00:00:00 2001
From: "Gunnar \"Nick\" Bluth" <gunnar.bl...@pro-open.de>
Date: Mon, 3 Jan 2022 20:35:05 +0100
Subject: [PATCH v6] pg_stat_toast v6
---
doc/src/sgml/config.sgml | 26 ++
doc/src/sgml/monitoring.sgml | 163 +++++++++
doc/src/sgml/storage.sgml | 12 +-
src/backend/access/table/toast_helper.c | 24 ++
src/backend/catalog/system_views.sql | 20 ++
src/backend/postmaster/pgstat.c | 309 +++++++++++++++++-
src/backend/utils/adt/pgstatfuncs.c | 72 ++++
src/backend/utils/misc/guc.c | 9 +
src/backend/utils/misc/postgresql.conf.sample | 1 +
src/include/catalog/pg_proc.dat | 25 ++
src/include/pgstat.h | 108 ++++++
src/test/regress/expected/rules.out | 17 +
src/test/regress/expected/track_toast.out | 102 ++++++
src/test/regress/parallel_schedule | 2 +-
src/test/regress/sql/track_toast.sql | 64 ++++
15 files changed, 946 insertions(+), 8 deletions(-)
create mode 100644 src/test/regress/expected/track_toast.out
create mode 100644 src/test/regress/sql/track_toast.sql
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index afbb6c35e3..fa40befc16 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -7668,6 +7668,32 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
</listitem>
</varlistentry>
+ <varlistentry id="guc-track-toast" xreflabel="track_toast">
+ <term><varname>track_toast</varname> (<type>boolean</type>)
+ <indexterm>
+ <primary><varname>track_toast</varname> configuration parameter</primary>
+ </indexterm>
+ </term>
+ <listitem>
+ <para>
+ Enables tracking of <link linkend="storage-toast">TOAST</link> activities.
+ Compressions and externalizations are tracked.
+ The default is <literal>off</literal>.
+ Only superusers can change this setting.
+ </para>
+
+ <note>
+ <para>
+ Be aware that this feature, depending on the amount of TOASTable columns in
+ your databases, may significantly increase the size of the statistics files
+ and the workload of the statistics collector. It is recommended to only
+ temporarily activate this to assess the right compression and storage method
+ for (a) column(s).
+ </para>
+ </note>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="guc-stats-temp-directory" xreflabel="stats_temp_directory">
<term><varname>stats_temp_directory</varname> (<type>string</type>)
<indexterm>
diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index 62f2a3332b..32d7818096 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -610,6 +610,17 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
yet included in <structname>pg_stat_user_functions</structname>).</entry>
</row>
+ <row>
+ <entry><structname>pg_stat_toast</structname><indexterm><primary>pg_stat_toast</primary></indexterm></entry>
+ <entry>
+ One row for each column that has ever been TOASTed (compressed and/or externalized).
+ Showing the number of externalizations, compression attempts / successes, compressed and
+ uncompressed sizes etc.
+ <link linkend="monitoring-pg-stat-toast-view">
+ <structname>pg_stat_toast</structname></link> for details.
+ </entry>
+ </row>
+
<row>
<entry><structname>pg_stat_slru</structname><indexterm><primary>pg_stat_slru</primary></indexterm></entry>
<entry>One row per SLRU, showing statistics of operations. See
@@ -4969,6 +4980,158 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
</sect2>
+ <sect2 id="monitoring-pg-stat-toast-view">
+ <title><structname>pg_stat_toast</structname></title>
+
+ <indexterm>
+ <primary>pg_stat_toast</primary>
+ </indexterm>
+
+ <para>
+ The <structname>pg_stat_toast</structname> view will contain
+ one row for each column of variable size that has been TOASTed since
+ the last statistics reset. The <xref linkend="guc-track-toast"/> parameter
+ controls whether TOAST activities are tracked or not.
+ </para>
+
+ <table id="pg-stat-toast-view" xreflabel="pg_stat_toast">
+ <title><structname>pg_stat_toast</structname> View</title>
+ <tgroup cols="1">
+ <thead>
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ Column Type
+ </para>
+ <para>
+ Description
+ </para></entry>
+ </row>
+ </thead>
+
+ <tbody>
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>schemaname</structfield> <type>name</type>
+ </para>
+ <para>
+ Name of the schema the relation is in
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>reloid</structfield> <type>oid</type>
+ </para>
+ <para>
+ OID of the relation
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>attnum</structfield> <type>int</type>
+ </para>
+ <para>
+ Attribute (column) number in the relation
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>relname</structfield> <type>name</type>
+ </para>
+ <para>
+ Name of the relation
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>attname</structfield> <type>name</type>
+ </para>
+ <para>
+ Name of the attribute (column)
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>storagemethod</structfield> <type>char</type>
+ </para>
+ <para>
+ Storage method of the attribute
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>externalized</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Number of times this attribute was externalized (pushed to TOAST relation)
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>compressmethod</structfield> <type>char</type>
+ </para>
+ <para>
+ Compression method of the attribute (empty means default)
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>compressattempts</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Number of times this attribute was compressed
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>compresssuccesses</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Number of times the compression was successful (gained a size reduction)
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>compressedsize</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Total size of all compressed datums
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>originalsize</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Total size of all compressed datums before compression
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>total_time</structfield> <type>double precision</type>
+ </para>
+ <para>
+ Total time spent TOASTing this attribute, in microseconds.
+ </para></entry>
+ </row>
+
+ </tbody>
+ </tgroup>
+ </table>
+
+ </sect2>
+
<sect2 id="monitoring-pg-stat-slru-view">
<title><structname>pg_stat_slru</structname></title>
diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml
index 7136bbe7a3..2a47922573 100644
--- a/doc/src/sgml/storage.sgml
+++ b/doc/src/sgml/storage.sgml
@@ -517,6 +517,15 @@ pages). There was no run time difference compared to an un-<acronym>TOAST</acron
comparison table, in which all the HTML pages were cut down to 7 kB to fit.
</para>
+<para>
+When you enable <xref linkend="guc-track-toast"/>, the system view
+<link linkend="monitoring-pg-stat-toast-view"><structname>pg_stat_toast</structname>
+</link> provides details on the number and effect of compression attempts,
+number of externalizations and some more useful information that enables you
+to decide if a different storage method and/or compression method would suite a
+column better.
+</para>
+
</sect2>
<sect2 id="storage-toast-inmemory">
@@ -1068,7 +1077,8 @@ data. Empty in ordinary tables.</entry>
<type>struct varlena</type>, which includes the total length of the stored
value and some flag bits. Depending on the flags, the data can be either
inline or in a <acronym>TOAST</acronym> table;
- it might be compressed, too (see <xref linkend="storage-toast"/>).
+ it might be compressed, too (see <xref linkend="storage-toast"/> and
+ <xref linkend="monitoring-pg-stat-toast-view"/>).
</para>
</sect2>
diff --git a/src/backend/access/table/toast_helper.c b/src/backend/access/table/toast_helper.c
index 013236b73d..f6b06e3329 100644
--- a/src/backend/access/table/toast_helper.c
+++ b/src/backend/access/table/toast_helper.c
@@ -19,6 +19,7 @@
#include "access/toast_helper.h"
#include "access/toast_internals.h"
#include "catalog/pg_type_d.h"
+#include "pgstat.h"
/*
@@ -229,7 +230,9 @@ toast_tuple_try_compression(ToastTupleContext *ttc, int attribute)
Datum *value = &ttc->ttc_values[attribute];
Datum new_value;
ToastAttrInfo *attr = &ttc->ttc_attr[attribute];
+ instr_time start_time;
+ INSTR_TIME_SET_CURRENT(start_time);
new_value = toast_compress_datum(*value, attr->tai_compression);
if (DatumGetPointer(new_value) != NULL)
@@ -239,6 +242,12 @@ toast_tuple_try_compression(ToastTupleContext *ttc, int attribute)
pfree(DatumGetPointer(*value));
*value = new_value;
attr->tai_colflags |= TOASTCOL_NEEDS_FREE;
+ pgstat_report_toast_activity(ttc->ttc_rel->rd_rel->oid, attribute,
+ false,
+ true,
+ attr->tai_size,
+ VARSIZE(DatumGetPointer(*value)),
+ start_time);
attr->tai_size = VARSIZE(DatumGetPointer(*value));
ttc->ttc_flags |= (TOAST_NEEDS_CHANGE | TOAST_NEEDS_FREE);
}
@@ -246,6 +255,12 @@ toast_tuple_try_compression(ToastTupleContext *ttc, int attribute)
{
/* incompressible, ignore on subsequent compression passes */
attr->tai_colflags |= TOASTCOL_INCOMPRESSIBLE;
+ pgstat_report_toast_activity(ttc->ttc_rel->rd_rel->oid, attribute,
+ false,
+ true,
+ 0,
+ 0,
+ start_time);
}
}
@@ -258,6 +273,9 @@ toast_tuple_externalize(ToastTupleContext *ttc, int attribute, int options)
Datum *value = &ttc->ttc_values[attribute];
Datum old_value = *value;
ToastAttrInfo *attr = &ttc->ttc_attr[attribute];
+ instr_time start_time;
+
+ INSTR_TIME_SET_CURRENT(start_time);
attr->tai_colflags |= TOASTCOL_IGNORE;
*value = toast_save_datum(ttc->ttc_rel, old_value, attr->tai_oldexternal,
@@ -266,6 +284,12 @@ toast_tuple_externalize(ToastTupleContext *ttc, int attribute, int options)
pfree(DatumGetPointer(old_value));
attr->tai_colflags |= TOASTCOL_NEEDS_FREE;
ttc->ttc_flags |= (TOAST_NEEDS_CHANGE | TOAST_NEEDS_FREE);
+ pgstat_report_toast_activity(ttc->ttc_rel->rd_rel->oid, attribute,
+ true,
+ false,
+ 0,
+ 0,
+ start_time);
}
/*
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index 61b515cdb8..3de3025488 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -1039,6 +1039,26 @@ CREATE VIEW pg_stat_user_functions AS
WHERE P.prolang != 12 -- fast check to eliminate built-in functions
AND pg_stat_get_function_calls(P.oid) IS NOT NULL;
+
+CREATE OR REPLACE VIEW pg_stat_toast AS
+ SELECT
+ n.nspname AS schemaname,
+ a.attrelid AS reloid,
+ a.attnum AS attnum,
+ c.relname AS relname,
+ a.attname AS attname,
+ attstorage AS storagemethod,
+ pg_stat_get_toast_externalizations(a.attrelid,a.attnum) AS externalized,
+ attcompression AS compressmethod,
+ pg_stat_get_toast_compressions(a.attrelid,a.attnum) AS compressattempts,
+ pg_stat_get_toast_compressionsuccesses(a.attrelid,a.attnum) AS compresssuccesses,
+ pg_stat_get_toast_compressedsizesum(a.attrelid,a.attnum) AS compressedsize,
+ pg_stat_get_toast_originalsizesum(a.attrelid,a.attnum) AS originalsize,
+ pg_stat_get_toast_total_time(a.attrelid,a.attnum) AS total_time
+ FROM pg_attribute a
+ JOIN pg_class c ON c.oid = a.attrelid
+ LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
+ WHERE pg_stat_get_toast_externalizations(a.attrelid,a.attnum) IS NOT NULL;
CREATE VIEW pg_stat_xact_user_functions AS
SELECT
P.oid AS funcid,
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 7264d2c727..c4ce619257 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -106,6 +106,7 @@
#define PGSTAT_DB_HASH_SIZE 16
#define PGSTAT_TAB_HASH_SIZE 512
#define PGSTAT_FUNCTION_HASH_SIZE 512
+#define PGSTAT_TOAST_HASH_SIZE 64
#define PGSTAT_SUBWORKER_HASH_SIZE 32
#define PGSTAT_REPLSLOT_HASH_SIZE 32
@@ -116,6 +117,7 @@
*/
bool pgstat_track_counts = false;
int pgstat_track_functions = TRACK_FUNC_OFF;
+bool pgstat_track_toast = false;
/* ----------
* Built from GUC parameter
@@ -228,6 +230,19 @@ static HTAB *pgStatFunctions = NULL;
*/
static bool have_function_stats = false;
+/*
+ * Backends store per-toast-column info that's waiting to be sent to the collector
+ * in this hash table (indexed by column's PgStat_BackendAttrIdentifier).
+ */
+static HTAB *pgStatToastActions = NULL;
+
+
+/*
+ * Indicates if backend has some toast stats that it hasn't yet
+ * sent to the collector.
+ */
+static bool have_toast_stats = false;
+
/*
* Tuple insertion/deletion counts for an open transaction can't be propagated
* into PgStat_TableStatus counters until we know if it is going to commit
@@ -328,7 +343,7 @@ static PgStat_StatSubWorkerEntry *pgstat_get_subworker_entry(PgStat_StatDBEntry
static void pgstat_write_statsfiles(bool permanent, bool allDbs);
static void pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent);
static HTAB *pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep);
-static void pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash,
+static void pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, HTAB *toasthash,
HTAB *subworkerhash, bool permanent);
static void backend_read_statsfile(void);
@@ -340,6 +355,7 @@ static void pgstat_reset_replslot(PgStat_StatReplSlotEntry *slotstats, Timestamp
static void pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg, TimestampTz now);
static void pgstat_send_funcstats(void);
+static void pgstat_send_toaststats(void);
static void pgstat_send_slru(void);
static void pgstat_send_subscription_purge(PgStat_MsgSubscriptionPurge *msg);
static HTAB *pgstat_collect_oids(Oid catalogid, AttrNumber anum_oid);
@@ -373,6 +389,7 @@ static void pgstat_recv_wal(PgStat_MsgWal *msg, int len);
static void pgstat_recv_slru(PgStat_MsgSLRU *msg, int len);
static void pgstat_recv_funcstat(PgStat_MsgFuncstat *msg, int len);
static void pgstat_recv_funcpurge(PgStat_MsgFuncpurge *msg, int len);
+static void pgstat_recv_toaststat(PgStat_MsgToaststat *msg, int len);
static void pgstat_recv_recoveryconflict(PgStat_MsgRecoveryConflict *msg, int len);
static void pgstat_recv_deadlock(PgStat_MsgDeadlock *msg, int len);
static void pgstat_recv_checksum_failure(PgStat_MsgChecksumFailure *msg, int len);
@@ -891,7 +908,7 @@ pgstat_report_stat(bool disconnect)
pgStatXactCommit == 0 && pgStatXactRollback == 0 &&
pgWalUsage.wal_records == prevWalUsage.wal_records &&
WalStats.m_wal_write == 0 && WalStats.m_wal_sync == 0 &&
- !have_function_stats && !disconnect)
+ !have_function_stats && !have_toast_stats && !disconnect)
return;
/*
@@ -983,6 +1000,9 @@ pgstat_report_stat(bool disconnect)
/* Now, send function statistics */
pgstat_send_funcstats();
+ /* Now, send TOAST statistics */
+ pgstat_send_toaststats();
+
/* Send WAL statistics */
pgstat_send_wal(true);
@@ -1116,6 +1136,64 @@ pgstat_send_funcstats(void)
have_function_stats = false;
}
+/*
+ * Subroutine for pgstat_report_stat: populate and send a toast stat message
+ */
+static void
+pgstat_send_toaststats(void)
+{
+ /* we assume this inits to all zeroes: */
+ static const PgStat_ToastCounts all_zeroes;
+
+ PgStat_MsgToaststat msg;
+ PgStat_BackendToastEntry *entry;
+ HASH_SEQ_STATUS tstat;
+
+ if (pgStatToastActions == NULL)
+ return;
+
+ pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_TOASTSTAT);
+ msg.m_databaseid = MyDatabaseId;
+ msg.m_nentries = 0;
+
+ hash_seq_init(&tstat, pgStatToastActions);
+ while ((entry = (PgStat_BackendToastEntry *) hash_seq_search(&tstat)) != NULL)
+ {
+ PgStat_ToastEntry *m_ent;
+
+ /* Skip it if no counts accumulated since last time */
+ if (memcmp(&entry->t_counts, &all_zeroes,
+ sizeof(PgStat_ToastCounts)) == 0)
+ continue;
+
+ /* need to convert format of time accumulators */
+ m_ent = &msg.m_entry[msg.m_nentries];
+ m_ent->attr = entry->attr;
+ m_ent->t_numexternalized = entry->t_counts.t_numexternalized;
+ m_ent->t_numcompressed = entry->t_counts.t_numcompressed;
+ m_ent->t_numcompressionsuccess = entry->t_counts.t_numcompressionsuccess;
+ m_ent->t_size_orig = entry->t_counts.t_size_orig;
+ m_ent->t_size_compressed = entry->t_counts.t_size_compressed;
+ m_ent->t_comp_time = INSTR_TIME_GET_MICROSEC(entry->t_counts.t_comp_time);
+
+ if (++msg.m_nentries >= PGSTAT_NUM_TOASTENTRIES)
+ {
+ pgstat_send(&msg, offsetof(PgStat_MsgToaststat, m_entry[0]) +
+ msg.m_nentries * sizeof(PgStat_ToastEntry));
+ msg.m_nentries = 0;
+ }
+
+ /* reset the entry's counts */
+ MemSet(&entry->t_counts, 0, sizeof(PgStat_ToastCounts));
+ }
+
+ if (msg.m_nentries > 0)
+ pgstat_send(&msg, offsetof(PgStat_MsgToaststat, m_entry[0]) +
+ msg.m_nentries * sizeof(PgStat_ToastEntry));
+
+ have_toast_stats = false;
+}
+
/* ----------
* pgstat_vacuum_stat() -
@@ -2151,6 +2229,75 @@ pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu, bool finalize)
have_function_stats = true;
}
+/*
+ * Report TOAST activity
+ * Called by toast_helper functions.
+ */
+void
+pgstat_report_toast_activity(Oid relid, int attr,
+ bool externalized,
+ bool compressed,
+ int32 old_size,
+ int32 new_size,
+ instr_time start_time)
+{
+ PgStat_BackendAttrIdentifier toastattr = { relid, attr };
+ PgStat_BackendToastEntry *htabent;
+ instr_time time_spent;
+ bool found;
+
+ if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_toast)
+ return;
+
+ INSTR_TIME_SET_CURRENT(time_spent);
+ INSTR_TIME_SUBTRACT(time_spent, start_time);
+
+ if (!pgStatToastActions)
+ {
+ /* First time through - initialize toast stat table */
+ HASHCTL hash_ctl;
+
+ hash_ctl.keysize = sizeof(PgStat_BackendAttrIdentifier);
+ hash_ctl.entrysize = sizeof(PgStat_BackendToastEntry);
+ pgStatToastActions = hash_create("TOAST stat entries",
+ PGSTAT_TOAST_HASH_SIZE,
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS);
+ }
+
+ /* Get the stats entry for this TOAST attribute, create if necessary */
+ htabent = hash_search(pgStatToastActions, &toastattr,
+ HASH_ENTER, &found);
+ if (!found)
+ {
+ MemSet(&htabent->t_counts, 0, sizeof(PgStat_ToastCounts));
+ }
+
+ /* update counters */
+ if (externalized)
+ {
+ htabent->t_counts.t_numexternalized++;
+ }
+ if (compressed)
+ {
+ htabent->t_counts.t_numcompressed++;
+ if (new_size)
+ {
+ htabent->t_counts.t_size_orig+=old_size;
+ if (new_size)
+ {
+ htabent->t_counts.t_numcompressionsuccess++;
+ htabent->t_counts.t_size_compressed+=new_size;
+ }
+ }
+ }
+ /* record time spent */
+ INSTR_TIME_ADD(htabent->t_counts.t_comp_time, time_spent);
+
+ /* indicate that we have something to send */
+ have_toast_stats = true;
+}
+
/* ----------
* pgstat_initstats() -
@@ -3028,6 +3175,35 @@ pgstat_fetch_stat_subworker_entry(Oid subid, Oid subrelid)
return wentry;
}
+/* ----------
+ * pgstat_fetch_stat_toastentry() -
+ *
+ * Support function for the SQL-callable pgstat* functions. Returns
+ * the collected statistics for one TOAST attribute or NULL.
+ * ----------
+ */
+PgStat_StatToastEntry *
+pgstat_fetch_stat_toastentry(Oid rel_id, int attr)
+{
+ PgStat_StatDBEntry *dbentry;
+ PgStat_BackendAttrIdentifier toast_id = { rel_id, attr };
+ PgStat_StatToastEntry *toastentry = NULL;
+
+ /* load the stats file if needed */
+ backend_read_statsfile();
+
+ /* Lookup our database, then find the requested TOAST activity stats. */
+ dbentry = pgstat_fetch_stat_dbentry(MyDatabaseId);
+ if (dbentry != NULL && dbentry->toastactivity != NULL)
+ {
+ toastentry = (PgStat_StatToastEntry *) hash_search(dbentry->toastactivity,
+ (void *) &toast_id,
+ HASH_FIND, NULL);
+ }
+
+ return toastentry;
+}
+
/*
* ---------
* pgstat_fetch_stat_archiver() -
@@ -3708,6 +3884,10 @@ PgstatCollectorMain(int argc, char *argv[])
pgstat_recv_funcpurge(&msg.msg_funcpurge, len);
break;
+ case PGSTAT_MTYPE_TOASTSTAT:
+ pgstat_recv_toaststat(&msg.msg_toaststat, len);
+ break;
+
case PGSTAT_MTYPE_RECOVERYCONFLICT:
pgstat_recv_recoveryconflict(&msg.msg_recoveryconflict,
len);
@@ -3852,6 +4032,14 @@ reset_dbentry_counters(PgStat_StatDBEntry *dbentry)
PGSTAT_SUBWORKER_HASH_SIZE,
&hash_ctl,
HASH_ELEM | HASH_BLOBS);
+
+ hash_ctl.keysize = sizeof(PgStat_BackendAttrIdentifier);
+ hash_ctl.entrysize = sizeof(PgStat_StatToastEntry);
+ dbentry->toastactivity = hash_create("Per-database TOAST",
+ PGSTAT_TOAST_HASH_SIZE,
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS);
+
}
/*
@@ -4059,8 +4247,8 @@ pgstat_write_statsfiles(bool permanent, bool allDbs)
while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Write out the table, function, and subscription-worker stats for
- * this DB into the appropriate per-DB stat file, if required.
+ * Write out the table, function, TOAST and subscription-worker stats for this DB into the
+ * appropriate per-DB stat file, if required.
*/
if (allDbs || pgstat_db_requested(dbentry->databaseid))
{
@@ -4175,9 +4363,11 @@ pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent)
HASH_SEQ_STATUS tstat;
HASH_SEQ_STATUS fstat;
HASH_SEQ_STATUS sstat;
+ HASH_SEQ_STATUS ostat;
PgStat_StatTabEntry *tabentry;
PgStat_StatFuncEntry *funcentry;
PgStat_StatSubWorkerEntry *subwentry;
+ PgStat_StatToastEntry *toastentry;
FILE *fpout;
int32 format_id;
Oid dbid = dbentry->databaseid;
@@ -4243,6 +4433,17 @@ pgstat_write_db_statsfile(PgStat_StatDBEntry *dbentry, bool permanent)
(void) rc; /* we'll check for error with ferror */
}
+ /*
+ * Walk through the database's TOAST stats table.
+ */
+ hash_seq_init(&ostat, dbentry->toastactivity);
+ while ((toastentry = (PgStat_StatToastEntry *) hash_seq_search(&ostat)) != NULL)
+ {
+ fputc('O', fpout);
+ rc = fwrite(toastentry, sizeof(PgStat_StatToastEntry), 1, fpout);
+ (void) rc; /* we'll check for error with ferror */
+ }
+
/*
* No more output to be done. Close the temp file and replace the old
* pgstat.stat with it. The ferror() check replaces testing for error
@@ -4483,6 +4684,7 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
dbentry->tables = NULL;
dbentry->functions = NULL;
dbentry->subworkers = NULL;
+ dbentry->toastactivity = NULL;
/*
* In the collector, disregard the timestamp we read from the
@@ -4528,6 +4730,14 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
&hash_ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+ hash_ctl.keysize = sizeof(PgStat_BackendAttrIdentifier);
+ hash_ctl.entrysize = sizeof(PgStat_StatToastEntry);
+ hash_ctl.hcxt = pgStatLocalContext;
+ dbentry->toastactivity = hash_create("Per-database toast information",
+ PGSTAT_TOAST_HASH_SIZE,
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
/*
* If requested, read the data from the database-specific
* file. Otherwise we just leave the hashtables empty.
@@ -4536,6 +4746,7 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
pgstat_read_db_statsfile(dbentry->databaseid,
dbentry->tables,
dbentry->functions,
+ dbentry->toastactivity,
dbentry->subworkers,
permanent);
@@ -4620,7 +4831,7 @@ done:
* ----------
*/
static void
-pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash,
+pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, HTAB *toasthash,
HTAB *subworkerhash, bool permanent)
{
PgStat_StatTabEntry *tabentry;
@@ -4629,6 +4840,8 @@ pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash,
PgStat_StatFuncEntry *funcentry;
PgStat_StatSubWorkerEntry subwbuf;
PgStat_StatSubWorkerEntry *subwentry;
+ PgStat_StatToastEntry toastbuf;
+ PgStat_StatToastEntry *toastentry;
FILE *fpin;
int32 format_id;
bool found;
@@ -4777,6 +4990,32 @@ pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash,
memcpy(subwentry, &subwbuf, sizeof(subwbuf));
break;
+
+ /*
+ * 'O' A PgStat_StatToastEntry follows (tOast)
+ */
+ case 'O':
+ if (fread(&toastbuf, 1, sizeof(PgStat_StatToastEntry),
+ fpin) != sizeof(PgStat_StatToastEntry))
+ {
+ ereport(pgStatRunningInCollector ? LOG : WARNING,
+ (errmsg("corrupted statistics file \"%s\"",
+ statfile)));
+ goto done;
+ }
+
+ /*
+ * Skip if TOAST data not wanted.
+ */
+ if (toasthash == NULL)
+ break;
+
+ toastentry = (PgStat_StatToastEntry *) hash_search(toasthash,
+ (void *) &toastbuf.t_id,
+ HASH_ENTER, &found);
+ memcpy(toastentry, &toastbuf, sizeof(toastbuf));
+ break;
+
/*
* 'E' The EOF marker of a complete stats file.
*/
@@ -5452,6 +5691,8 @@ pgstat_recv_dropdb(PgStat_MsgDropdb *msg, int len)
hash_destroy(dbentry->functions);
if (dbentry->subworkers != NULL)
hash_destroy(dbentry->subworkers);
+ if (dbentry->toastactivity != NULL)
+ hash_destroy(dbentry->toastactivity);
if (hash_search(pgStatDBHash,
(void *) &dbid,
@@ -5491,10 +5732,12 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
hash_destroy(dbentry->functions);
if (dbentry->subworkers != NULL)
hash_destroy(dbentry->subworkers);
-
+ if (dbentry->toastactivity != NULL)
+ hash_destroy(dbentry->toastactivity);
dbentry->tables = NULL;
dbentry->functions = NULL;
dbentry->subworkers = NULL;
+ dbentry->toastactivity = NULL;
/*
* Reset database-level stats, too. This creates empty hash tables for
@@ -6152,6 +6395,60 @@ pgstat_recv_subscription_purge(PgStat_MsgSubscriptionPurge *msg, int len)
}
}
+/* ----------
+ * pgstat_recv_toaststat() -
+ *
+ * Count what the backend has done.
+ * ----------
+ */
+static void
+pgstat_recv_toaststat(PgStat_MsgToaststat *msg, int len)
+{
+ PgStat_ToastEntry *toastmsg = &(msg->m_entry[0]);
+ PgStat_StatDBEntry *dbentry;
+ PgStat_StatToastEntry *toastentry;
+ int i;
+ bool found;
+
+ dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
+
+ /*
+ * Process all TOAST entries in the message.
+ */
+ for (i = 0; i < msg->m_nentries; i++, toastmsg++)
+ {
+ toastentry = (PgStat_StatToastEntry *) hash_search(dbentry->toastactivity,
+ (void *) &(toastmsg->attr),
+ HASH_ENTER, &found);
+
+ if (!found)
+ {
+ /*
+ * If it's a new entry, initialize counters to the values
+ * we just got.
+ */
+ toastentry->t_numexternalized = toastmsg->t_numexternalized;
+ toastentry->t_numcompressed = toastmsg->t_numcompressed;
+ toastentry->t_numcompressionsuccess = toastmsg->t_numcompressionsuccess;
+ toastentry->t_size_orig = toastmsg->t_size_orig;
+ toastentry->t_size_compressed = toastmsg->t_size_compressed;
+ toastentry->t_comp_time = toastmsg->t_comp_time;
+ }
+ else
+ {
+ /*
+ * Otherwise add the values to the existing entry.
+ */
+ toastentry->t_numexternalized += toastmsg->t_numexternalized;
+ toastentry->t_numcompressed += toastmsg->t_numcompressed;
+ toastentry->t_numcompressionsuccess += toastmsg->t_numcompressionsuccess;
+ toastentry->t_size_orig += toastmsg->t_size_orig;
+ toastentry->t_size_compressed += toastmsg->t_size_compressed;
+ toastentry->t_comp_time += toastmsg->t_comp_time;
+ }
+ }
+}
+
/* ----------
* pgstat_recv_subworker_error() -
*
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index f529c1561a..8b40c43626 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -410,6 +410,78 @@ pg_stat_get_function_self_time(PG_FUNCTION_ARGS)
PG_RETURN_FLOAT8(((double) funcentry->f_self_time) / 1000.0);
}
+Datum
+pg_stat_get_toast_externalizations(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ int attr = PG_GETARG_INT16(1);
+ PgStat_StatToastEntry *toastentry;
+
+ if ((toastentry = pgstat_fetch_stat_toastentry(relid,attr - 1)) == NULL)
+ PG_RETURN_NULL();
+ PG_RETURN_INT64(toastentry->t_numexternalized);
+}
+
+Datum
+pg_stat_get_toast_compressions(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ int attr = PG_GETARG_INT16(1);
+ PgStat_StatToastEntry *toastentry;
+
+ if ((toastentry = pgstat_fetch_stat_toastentry(relid,attr - 1)) == NULL)
+ PG_RETURN_NULL();
+ PG_RETURN_INT64(toastentry->t_numcompressed);
+}
+
+Datum
+pg_stat_get_toast_compressionsuccesses(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ int attr = PG_GETARG_INT16(1);
+ PgStat_StatToastEntry *toastentry;
+
+ if ((toastentry = pgstat_fetch_stat_toastentry(relid,attr - 1)) == NULL)
+ PG_RETURN_NULL();
+ PG_RETURN_INT64(toastentry->t_numcompressionsuccess);
+}
+
+Datum
+pg_stat_get_toast_originalsizesum(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ int attr = PG_GETARG_INT16(1);
+ PgStat_StatToastEntry *toastentry;
+
+ if ((toastentry = pgstat_fetch_stat_toastentry(relid,attr - 1)) == NULL)
+ PG_RETURN_NULL();
+ PG_RETURN_INT64(toastentry->t_size_orig);
+}
+
+Datum
+pg_stat_get_toast_compressedsizesum(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ int attr = PG_GETARG_INT16(1);
+ PgStat_StatToastEntry *toastentry;
+
+ if ((toastentry = pgstat_fetch_stat_toastentry(relid,attr - 1)) == NULL)
+ PG_RETURN_NULL();
+ PG_RETURN_INT64(toastentry->t_size_compressed);
+}
+
+Datum
+pg_stat_get_toast_total_time(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ int attr = PG_GETARG_INT16(1);
+ PgStat_StatToastEntry *toastentry;
+
+ if ((toastentry = pgstat_fetch_stat_toastentry(relid,attr - 1)) == NULL)
+ PG_RETURN_NULL();
+ PG_RETURN_INT64(toastentry->t_comp_time);
+}
+
Datum
pg_stat_get_backend_idset(PG_FUNCTION_ARGS)
{
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index f9504d3aec..f7ff934d4e 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -1538,6 +1538,15 @@ static struct config_bool ConfigureNamesBool[] =
true,
NULL, NULL, NULL
},
+ {
+ {"track_toast", PGC_SUSET, STATS_COLLECTOR,
+ gettext_noop("Collects statistics on TOAST activity."),
+ NULL
+ },
+ &pgstat_track_toast,
+ false,
+ NULL, NULL, NULL
+ },
{
{"track_io_timing", PGC_SUSET, STATS_COLLECTOR,
gettext_noop("Collects timing statistics for database I/O activity."),
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index a1acd46b61..12f4bb1a38 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -607,6 +607,7 @@
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
+#track_toast = off
#stats_temp_directory = 'pg_stat_tmp'
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 4d992dc224..22c34b96a6 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -5668,6 +5668,31 @@
proparallel => 'r', prorettype => 'float8', proargtypes => 'oid',
prosrc => 'pg_stat_get_function_self_time' },
+{ oid => '9700', descr => 'statistics: number of TOAST externalizations',
+ proname => 'pg_stat_get_toast_externalizations', provolatile => 's',
+ proparallel => 'r', prorettype => 'int8', proargtypes => 'oid int4',
+ prosrc => 'pg_stat_get_toast_externalizations' },
+{ oid => '9701', descr => 'statistics: number of TOAST compressions',
+ proname => 'pg_stat_get_toast_compressions', provolatile => 's',
+ proparallel => 'r', prorettype => 'int8', proargtypes => 'oid int4',
+ prosrc => 'pg_stat_get_toast_compressions' },
+ { oid => '9702', descr => 'statistics: number of successful TOAST compressions',
+ proname => 'pg_stat_get_toast_compressionsuccesses', provolatile => 's',
+ proparallel => 'r', prorettype => 'int8', proargtypes => 'oid int4',
+ prosrc => 'pg_stat_get_toast_compressionsuccesses' },
+{ oid => '9703', descr => 'statistics: total original size of compressed TOAST data',
+ proname => 'pg_stat_get_toast_originalsizesum', provolatile => 's',
+ proparallel => 'r', prorettype => 'int8', proargtypes => 'oid int4',
+ prosrc => 'pg_stat_get_toast_originalsizesum' },
+{ oid => '9704', descr => 'statistics: total compressed size of compressed TOAST data',
+ proname => 'pg_stat_get_toast_compressedsizesum', provolatile => 's',
+ proparallel => 'r', prorettype => 'int8', proargtypes => 'oid int4',
+ prosrc => 'pg_stat_get_toast_compressedsizesum' },
+{ oid => '9705', descr => 'statistics: total time spend TOASTing data',
+ proname => 'pg_stat_get_toast_total_time', provolatile => 's',
+ proparallel => 'r', prorettype => 'int8', proargtypes => 'oid int4',
+ prosrc => 'pg_stat_get_toast_total_time' },
+
{ oid => '3037',
descr => 'statistics: number of scans done for table/index in current transaction',
proname => 'pg_stat_get_xact_numscans', provolatile => 'v',
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 5b51b58e5a..ff26aec404 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -82,10 +82,12 @@ typedef enum StatMsgType
PGSTAT_MTYPE_DEADLOCK,
PGSTAT_MTYPE_CHECKSUMFAILURE,
PGSTAT_MTYPE_REPLSLOT,
+ PGSTAT_MTYPE_CONNECTION,
PGSTAT_MTYPE_CONNECT,
PGSTAT_MTYPE_DISCONNECT,
PGSTAT_MTYPE_SUBSCRIPTIONPURGE,
PGSTAT_MTYPE_SUBWORKERERROR,
+ PGSTAT_MTYPE_TOASTSTAT,
} StatMsgType;
/* ----------
@@ -733,6 +735,80 @@ typedef struct PgStat_MsgDisconnect
SessionEndType m_cause;
} PgStat_MsgDisconnect;
+/* ----------
+ * PgStat_BackendAttrIdentifier Identifier for a single attribute/column (OID + attr)
+ * Used as a hashable identifier for (e.g.) TOAST columns
+ * ----------
+ */
+typedef struct PgStat_BackendAttrIdentifier
+{
+ Oid relid;
+ int attr;
+} PgStat_BackendAttrIdentifier;
+
+/* ----------
+ * PgStat_ToastCounts The actual per-TOAST counts kept by a backend
+ *
+ * This struct should contain only actual event counters, because we memcmp
+ * it against zeroes to detect whether there are any counts to transmit.
+ *
+ * Note that the time counters are in instr_time format here. We convert to
+ * microseconds in PgStat_Counter format when transmitting to the collector.
+ * ----------
+ */
+typedef struct PgStat_ToastCounts
+{
+ PgStat_Counter t_numexternalized;
+ PgStat_Counter t_numcompressed;
+ PgStat_Counter t_numcompressionsuccess;
+ uint64 t_size_orig;
+ uint64 t_size_compressed;
+ instr_time t_comp_time;
+} PgStat_ToastCounts;
+
+/* ----------
+ * PgStat_BackendToastEntry Entry in backend's per-toast-attr hash table
+ * ----------
+ */
+typedef struct PgStat_BackendToastEntry
+{
+ PgStat_BackendAttrIdentifier attr;
+ PgStat_ToastCounts t_counts;
+} PgStat_BackendToastEntry;
+
+/* ----------
+ * PgStat_ToastEntry Per-TOAST-column info in a MsgFuncstat
+ * ----------
+ */
+typedef struct PgStat_ToastEntry
+{
+ PgStat_BackendAttrIdentifier attr;
+ PgStat_Counter t_numexternalized;
+ PgStat_Counter t_numcompressed;
+ PgStat_Counter t_numcompressionsuccess;
+ uint64 t_size_orig;
+ uint64 t_size_compressed;
+ PgStat_Counter t_comp_time; /* time in microseconds */
+} PgStat_ToastEntry;
+
+/* ----------
+ * PgStat_MsgToaststat Sent by the backend to report function
+ * usage statistics.
+ * ----------
+ */
+#define PGSTAT_NUM_TOASTENTRIES \
+ ((PGSTAT_MSG_PAYLOAD - sizeof(Oid) - sizeof(int)) \
+ / sizeof(PgStat_ToastEntry))
+
+typedef struct PgStat_MsgToaststat
+{
+ PgStat_MsgHdr m_hdr;
+ Oid m_databaseid;
+ int m_nentries;
+ PgStat_ToastEntry m_entry[PGSTAT_NUM_TOASTENTRIES];
+} PgStat_MsgToaststat;
+
+
/* ----------
* PgStat_Msg Union over all possible messages.
* ----------
@@ -760,6 +836,7 @@ typedef union PgStat_Msg
PgStat_MsgSLRU msg_slru;
PgStat_MsgFuncstat msg_funcstat;
PgStat_MsgFuncpurge msg_funcpurge;
+ PgStat_MsgToaststat msg_toaststat;
PgStat_MsgRecoveryConflict msg_recoveryconflict;
PgStat_MsgDeadlock msg_deadlock;
PgStat_MsgTempFile msg_tempfile;
@@ -833,6 +910,7 @@ typedef struct PgStat_StatDBEntry
HTAB *tables;
HTAB *functions;
HTAB *subworkers;
+ HTAB *toastactivity;
} PgStat_StatDBEntry;
@@ -1022,6 +1100,22 @@ typedef struct PgStat_StatSubWorkerEntry
char last_error_message[PGSTAT_SUBWORKERERROR_MSGLEN];
} PgStat_StatSubWorkerEntry;
+/* ----------
+ * PgStat_StatToastEntry The collector's data per TOAST attribute
+ * ----------
+ */
+typedef struct PgStat_StatToastEntry
+{
+ PgStat_BackendAttrIdentifier t_id;
+ PgStat_Counter t_numexternalized;
+ PgStat_Counter t_numcompressed;
+ PgStat_Counter t_numcompressionsuccess;
+ uint64 t_size_orig;
+ uint64 t_size_compressed;
+ PgStat_Counter t_comp_time; /* time in microseconds */
+} PgStat_StatToastEntry;
+
+
/*
* Working state needed to accumulate per-function-call timing statistics.
*/
@@ -1045,6 +1139,7 @@ typedef struct PgStat_FunctionCallUsage
*/
extern PGDLLIMPORT bool pgstat_track_counts;
extern PGDLLIMPORT int pgstat_track_functions;
+extern PGDLLIMPORT bool pgstat_track_toast;
extern char *pgstat_stat_directory;
extern char *pgstat_stat_tmpname;
extern char *pgstat_stat_filename;
@@ -1196,12 +1291,22 @@ extern void pgstat_count_heap_delete(Relation rel);
extern void pgstat_count_truncate(Relation rel);
extern void pgstat_update_heap_dead_tuples(Relation rel, int delta);
+extern void pgstat_count_toast_insert(Relation rel, PgStat_Counter n);
+
struct FunctionCallInfoBaseData;
extern void pgstat_init_function_usage(struct FunctionCallInfoBaseData *fcinfo,
PgStat_FunctionCallUsage *fcu);
extern void pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu,
bool finalize);
+extern void
+pgstat_report_toast_activity(Oid relid, int attr,
+ bool externalized,
+ bool compressed,
+ int32 old_size,
+ int32 new_size,
+ instr_time start_time);
+
extern void AtEOXact_PgStat(bool isCommit, bool parallel);
extern void AtEOSubXact_PgStat(bool isCommit, int nestDepth);
@@ -1228,9 +1333,12 @@ extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry(Oid relid);
extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid);
extern PgStat_StatSubWorkerEntry *pgstat_fetch_stat_subworker_entry(Oid subid,
Oid subrelid);
+extern PgStat_StatToastEntry *pgstat_fetch_stat_toastentry(Oid rel_id, int attr);
+extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void);
extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void);
extern PgStat_BgWriterStats *pgstat_fetch_stat_bgwriter(void);
extern PgStat_CheckpointerStats *pgstat_fetch_stat_checkpointer(void);
+
extern PgStat_GlobalStats *pgstat_fetch_global(void);
extern PgStat_WalStats *pgstat_fetch_stat_wal(void);
extern PgStat_SLRUStats *pgstat_fetch_slru(void);
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index b58b062b10..2b49bebcb8 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -2147,6 +2147,23 @@ pg_stat_sys_tables| SELECT pg_stat_all_tables.relid,
pg_stat_all_tables.autoanalyze_count
FROM pg_stat_all_tables
WHERE ((pg_stat_all_tables.schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (pg_stat_all_tables.schemaname ~ '^pg_toast'::text));
+pg_stat_toast| SELECT n.nspname AS schemaname,
+ a.attrelid AS reloid,
+ a.attnum,
+ c.relname,
+ a.attname,
+ a.attstorage AS storagemethod,
+ pg_stat_get_toast_externalizations(a.attrelid, (a.attnum)::integer) AS externalized,
+ a.attcompression AS compressmethod,
+ pg_stat_get_toast_compressions(a.attrelid, (a.attnum)::integer) AS compressattempts,
+ pg_stat_get_toast_compressionsuccesses(a.attrelid, (a.attnum)::integer) AS compresssuccesses,
+ pg_stat_get_toast_compressedsizesum(a.attrelid, (a.attnum)::integer) AS compressedsize,
+ pg_stat_get_toast_originalsizesum(a.attrelid, (a.attnum)::integer) AS originalsize,
+ pg_stat_get_toast_total_time(a.attrelid, (a.attnum)::integer) AS total_time
+ FROM ((pg_attribute a
+ JOIN pg_class c ON ((c.oid = a.attrelid)))
+ LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace)))
+ WHERE (pg_stat_get_toast_externalizations(a.attrelid, (a.attnum)::integer) IS NOT NULL);
pg_stat_user_functions| SELECT p.oid AS funcid,
n.nspname AS schemaname,
p.proname AS funcname,
diff --git a/src/test/regress/expected/track_toast.out b/src/test/regress/expected/track_toast.out
new file mode 100644
index 0000000000..8e9815fbb8
--- /dev/null
+++ b/src/test/regress/expected/track_toast.out
@@ -0,0 +1,102 @@
+SHOW track_toast;
+ track_toast
+-------------
+ off
+(1 row)
+
+SET track_toast TO on;
+SHOW track_toast;
+ track_toast
+-------------
+ on
+(1 row)
+
+TABLE pg_stat_toast; -- view exists
+ schemaname | reloid | attnum | relname | attname | storagemethod | externalized | compressmethod | compressattempts | compresssuccesses | compressedsize | originalsize | total_time
+------------+--------+--------+---------+---------+---------------+--------------+----------------+------------------+-------------------+----------------+--------------+------------
+(0 rows)
+
+-- function to wait for counters to advance
+create function wait_for_stats() returns void as $$
+declare
+ start_time timestamptz := clock_timestamp();
+ updated1 bool;
+begin
+ -- we don't want to wait forever; loop will exit after 30 seconds
+ for i in 1 .. 300 loop
+
+ -- check to see if seqscan has been sensed
+ SELECT (st.n_tup_ins > 0) INTO updated1
+ FROM pg_stat_user_tables AS st
+ WHERE st.relname='toast_test';
+
+ exit when updated1;
+
+ -- wait a little
+ perform pg_sleep_for('100 milliseconds');
+
+ -- reset stats snapshot so we can test again
+ perform pg_stat_clear_snapshot();
+
+ end loop;
+
+ -- report time waited in postmaster log (where it won't change test output)
+ raise log 'wait_for_stats delayed % seconds',
+ extract(epoch from clock_timestamp() - start_time);
+end
+$$ language plpgsql;
+CREATE TABLE toast_test (cola TEXT, colb TEXT COMPRESSION lz4, colc TEXT , cold TEXT, cole TEXT);
+ALTER TABLE toast_test ALTER colc SET STORAGE EXTERNAL;
+ALTER TABLE toast_test ALTER cold SET STORAGE MAIN;
+ALTER TABLE toast_test ALTER cole SET STORAGE PLAIN;
+INSERT INTO toast_test VALUES (repeat(md5('a'),100), repeat(md5('a'),100), repeat(md5('a'),100), repeat(md5('a'),100), repeat(md5('a'),100) );
+-- We can't just call wait_for_stats() at this point, because we only
+-- transmit stats when the session goes idle, and we probably didn't
+-- transmit the last couple of counts yet thanks to the rate-limiting logic
+-- in pgstat_report_stat(). But instead of waiting for the rate limiter's
+-- timeout to elapse, let's just start a new session. The old one will
+-- then send its stats before dying.
+\c -
+SELECT wait_for_stats();
+ wait_for_stats
+----------------
+
+(1 row)
+
+SELECT attname
+ ,storagemethod
+ ,externalized
+ ,compressmethod
+ ,compressattempts
+ ,compresssuccesses
+ ,compressedsize < originalsize AS compression_works
+ , total_time > 0 AS takes_time
+FROM pg_stat_toast WHERE relname = 'toast_test' ORDER BY attname;
+ attname | storagemethod | externalized | compressmethod | compressattempts | compresssuccesses | compression_works | takes_time
+---------+---------------+--------------+----------------+------------------+-------------------+-------------------+------------
+ cola | x | 1 | | 1 | 1 | t | t
+ colb | x | 1 | l | 1 | 1 | t | t
+ colc | e | 1 | | 0 | 0 | f | t
+ cold | m | 0 | | 1 | 1 | t | t
+(4 rows)
+
+SELECT compressattempts=0 AS external_doesnt_compress FROM pg_stat_toast WHERE relname = 'toast_test' AND storagemethod = 'e';
+ external_doesnt_compress
+--------------------------
+ t
+(1 row)
+
+SELECT externalized=0 AS main_doesnt_externalize FROM pg_stat_toast WHERE relname = 'toast_test' AND storagemethod = 'm';
+ main_doesnt_externalize
+-------------------------
+ t
+(1 row)
+
+DROP TABLE toast_test;
+SELECT count(*) FROM pg_stat_toast WHERE relname = 'toast_test';
+ count
+-------
+ 0
+(1 row)
+
+DROP FUNCTION wait_for_stats();
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index 5b0c73d7e3..ec5fa7c562 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -124,7 +124,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr
# ----------
# Another group of parallel tests
# ----------
-test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize
+test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize track_toast
# event triggers cannot run concurrently with any test that runs DDL
# oidjoins is read-only, though, and should run late for best coverage
diff --git a/src/test/regress/sql/track_toast.sql b/src/test/regress/sql/track_toast.sql
new file mode 100644
index 0000000000..7096719ae8
--- /dev/null
+++ b/src/test/regress/sql/track_toast.sql
@@ -0,0 +1,64 @@
+SHOW track_toast;
+SET track_toast TO on;
+SHOW track_toast;
+TABLE pg_stat_toast; -- view exists
+
+-- function to wait for counters to advance
+create function wait_for_stats() returns void as $$
+declare
+ start_time timestamptz := clock_timestamp();
+ updated1 bool;
+begin
+ -- we don't want to wait forever; loop will exit after 30 seconds
+ for i in 1 .. 300 loop
+
+ -- check to see if seqscan has been sensed
+ SELECT (st.n_tup_ins > 0) INTO updated1
+ FROM pg_stat_user_tables AS st
+ WHERE st.relname='toast_test';
+
+ exit when updated1;
+
+ -- wait a little
+ perform pg_sleep_for('100 milliseconds');
+
+ -- reset stats snapshot so we can test again
+ perform pg_stat_clear_snapshot();
+
+ end loop;
+
+ -- report time waited in postmaster log (where it won't change test output)
+ raise log 'wait_for_stats delayed % seconds',
+ extract(epoch from clock_timestamp() - start_time);
+end
+$$ language plpgsql;
+
+CREATE TABLE toast_test (cola TEXT, colb TEXT COMPRESSION lz4, colc TEXT , cold TEXT, cole TEXT);
+ALTER TABLE toast_test ALTER colc SET STORAGE EXTERNAL;
+ALTER TABLE toast_test ALTER cold SET STORAGE MAIN;
+ALTER TABLE toast_test ALTER cole SET STORAGE PLAIN;
+INSERT INTO toast_test VALUES (repeat(md5('a'),100), repeat(md5('a'),100), repeat(md5('a'),100), repeat(md5('a'),100), repeat(md5('a'),100) );
+
+-- We can't just call wait_for_stats() at this point, because we only
+-- transmit stats when the session goes idle, and we probably didn't
+-- transmit the last couple of counts yet thanks to the rate-limiting logic
+-- in pgstat_report_stat(). But instead of waiting for the rate limiter's
+-- timeout to elapse, let's just start a new session. The old one will
+-- then send its stats before dying.
+\c -
+SELECT wait_for_stats();
+
+SELECT attname
+ ,storagemethod
+ ,externalized
+ ,compressmethod
+ ,compressattempts
+ ,compresssuccesses
+ ,compressedsize < originalsize AS compression_works
+ , total_time > 0 AS takes_time
+FROM pg_stat_toast WHERE relname = 'toast_test' ORDER BY attname;
+SELECT compressattempts=0 AS external_doesnt_compress FROM pg_stat_toast WHERE relname = 'toast_test' AND storagemethod = 'e';
+SELECT externalized=0 AS main_doesnt_externalize FROM pg_stat_toast WHERE relname = 'toast_test' AND storagemethod = 'm';
+DROP TABLE toast_test;
+SELECT count(*) FROM pg_stat_toast WHERE relname = 'toast_test';
+DROP FUNCTION wait_for_stats();
--
2.32.0