Here's the diff from a pgindent run. The results look kosher to me - I
had to do a little surgery on queryjumble.h due to it having an unused
typedef.


cheers


andrew

--
Andrew Dunstan
EDB: https://www.enterprisedb.com

diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index ffc89685bf..d34edb4190 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -410,8 +410,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
 	}
 
 	/*
-	 * If the FSM knows nothing of the rel, try the last page before we
-	 * give up and extend.  This avoids one-tuple-per-page syndrome during
+	 * If the FSM knows nothing of the rel, try the last page before we give
+	 * up and extend.  This avoids one-tuple-per-page syndrome during
 	 * bootstrapping or in a recently-started system.
 	 */
 	if (targetBlock == InvalidBlockNumber)
diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl
index 81363a0710..f023cb1209 100644
--- a/src/backend/catalog/genbki.pl
+++ b/src/backend/catalog/genbki.pl
@@ -890,11 +890,11 @@ sub morph_row_for_pgattr
 	# Copy the type data from pg_type, and add some type-dependent items
 	my $type = $types{$atttype};
 
-	$row->{atttypid}   = $type->{oid};
-	$row->{attlen}     = $type->{typlen};
-	$row->{attbyval}   = $type->{typbyval};
-	$row->{attalign}   = $type->{typalign};
-	$row->{attstorage} = $type->{typstorage};
+	$row->{atttypid}       = $type->{oid};
+	$row->{attlen}         = $type->{typlen};
+	$row->{attbyval}       = $type->{typbyval};
+	$row->{attalign}       = $type->{typalign};
+	$row->{attstorage}     = $type->{typstorage};
 	$row->{attcompression} = '\0';
 
 	# set attndims if it's an array type
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 1293dc04ca..09370a8a5a 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -2294,7 +2294,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum,
 		valuesAtt[Anum_pg_attribute_atthasdef - 1] = true;
 		replacesAtt[Anum_pg_attribute_atthasdef - 1] = true;
 
-		if (rel->rd_rel->relkind == RELKIND_RELATION  && add_column_mode &&
+		if (rel->rd_rel->relkind == RELKIND_RELATION && add_column_mode &&
 			!attgenerated)
 		{
 			expr2 = expression_planner(expr2);
diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c
index fc27fd013e..cebf6bcd0f 100644
--- a/src/backend/commands/policy.c
+++ b/src/backend/commands/policy.c
@@ -587,65 +587,65 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
 		/* If any roles remain, update the policy entry. */
 		if (num_roles > 0)
 		{
-		/* This is the array for the new tuple */
-		role_ids = construct_array(role_oids, num_roles, OIDOID,
-								   sizeof(Oid), true, TYPALIGN_INT);
+			/* This is the array for the new tuple */
+			role_ids = construct_array(role_oids, num_roles, OIDOID,
+									   sizeof(Oid), true, TYPALIGN_INT);
 
-		replaces[Anum_pg_policy_polroles - 1] = true;
-		values[Anum_pg_policy_polroles - 1] = PointerGetDatum(role_ids);
+			replaces[Anum_pg_policy_polroles - 1] = true;
+			values[Anum_pg_policy_polroles - 1] = PointerGetDatum(role_ids);
 
-		new_tuple = heap_modify_tuple(tuple,
-									  RelationGetDescr(pg_policy_rel),
-									  values, isnull, replaces);
-		CatalogTupleUpdate(pg_policy_rel, &new_tuple->t_self, new_tuple);
+			new_tuple = heap_modify_tuple(tuple,
+										  RelationGetDescr(pg_policy_rel),
+										  values, isnull, replaces);
+			CatalogTupleUpdate(pg_policy_rel, &new_tuple->t_self, new_tuple);
 
-		/* Remove all old dependencies. */
-		deleteDependencyRecordsFor(PolicyRelationId, policy_id, false);
+			/* Remove all old dependencies. */
+			deleteDependencyRecordsFor(PolicyRelationId, policy_id, false);
 
-		/* Record the new set of dependencies */
-		target.classId = RelationRelationId;
-		target.objectId = relid;
-		target.objectSubId = 0;
+			/* Record the new set of dependencies */
+			target.classId = RelationRelationId;
+			target.objectId = relid;
+			target.objectSubId = 0;
 
-		myself.classId = PolicyRelationId;
-		myself.objectId = policy_id;
-		myself.objectSubId = 0;
+			myself.classId = PolicyRelationId;
+			myself.objectId = policy_id;
+			myself.objectSubId = 0;
 
-		recordDependencyOn(&myself, &target, DEPENDENCY_AUTO);
+			recordDependencyOn(&myself, &target, DEPENDENCY_AUTO);
 
-		if (qual_expr)
-			recordDependencyOnExpr(&myself, qual_expr, qual_parse_rtable,
-								   DEPENDENCY_NORMAL);
+			if (qual_expr)
+				recordDependencyOnExpr(&myself, qual_expr, qual_parse_rtable,
+									   DEPENDENCY_NORMAL);
 
-		if (with_check_qual)
-			recordDependencyOnExpr(&myself, with_check_qual,
-								   with_check_parse_rtable,
-								   DEPENDENCY_NORMAL);
+			if (with_check_qual)
+				recordDependencyOnExpr(&myself, with_check_qual,
+									   with_check_parse_rtable,
+									   DEPENDENCY_NORMAL);
 
-		/* Remove all the old shared dependencies (roles) */
-		deleteSharedDependencyRecordsFor(PolicyRelationId, policy_id, 0);
+			/* Remove all the old shared dependencies (roles) */
+			deleteSharedDependencyRecordsFor(PolicyRelationId, policy_id, 0);
 
-		/* Record the new shared dependencies (roles) */
-		target.classId = AuthIdRelationId;
-		target.objectSubId = 0;
-		for (i = 0; i < num_roles; i++)
-		{
-			target.objectId = DatumGetObjectId(role_oids[i]);
-			/* no need for dependency on the public role */
-			if (target.objectId != ACL_ID_PUBLIC)
-				recordSharedDependencyOn(&myself, &target,
-										 SHARED_DEPENDENCY_POLICY);
-		}
+			/* Record the new shared dependencies (roles) */
+			target.classId = AuthIdRelationId;
+			target.objectSubId = 0;
+			for (i = 0; i < num_roles; i++)
+			{
+				target.objectId = DatumGetObjectId(role_oids[i]);
+				/* no need for dependency on the public role */
+				if (target.objectId != ACL_ID_PUBLIC)
+					recordSharedDependencyOn(&myself, &target,
+											 SHARED_DEPENDENCY_POLICY);
+			}
 
-		InvokeObjectPostAlterHook(PolicyRelationId, policy_id, 0);
+			InvokeObjectPostAlterHook(PolicyRelationId, policy_id, 0);
 
-		heap_freetuple(new_tuple);
+			heap_freetuple(new_tuple);
 
-		/* Make updates visible */
-		CommandCounterIncrement();
+			/* Make updates visible */
+			CommandCounterIncrement();
 
-		/* Invalidate Relation Cache */
-		CacheInvalidateRelcache(rel);
+			/* Invalidate Relation Cache */
+			CacheInvalidateRelcache(rel);
 		}
 		else
 		{
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 143517bc76..c24684aa6f 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -704,16 +704,16 @@ ExecInsert(ModifyTableState *mtstate,
 			}
 
 			/*
-			 * Initialize the batch slots. We don't know how many slots will be
-			 * needed, so we initialize them as the batch grows, and we keep
-			 * them across batches. To mitigate an inefficiency in how resource
-			 * owner handles objects with many references (as with many slots
-			 * all referencing the same tuple descriptor) we copy the tuple
-			 * descriptor for each slot.
+			 * Initialize the batch slots. We don't know how many slots will
+			 * be needed, so we initialize them as the batch grows, and we
+			 * keep them across batches. To mitigate an inefficiency in how
+			 * resource owner handles objects with many references (as with
+			 * many slots all referencing the same tuple descriptor) we copy
+			 * the tuple descriptor for each slot.
 			 */
 			if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
 			{
-				TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
+				TupleDesc	tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
 
 				resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
 					MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
@@ -3173,7 +3173,7 @@ ExecEndModifyTable(ModifyTableState *node)
 	 */
 	for (i = 0; i < node->mt_nrels; i++)
 	{
-		int j;
+		int			j;
 		ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
 
 		if (!resultRelInfo->ri_usesFdwDirectModify &&
@@ -3183,8 +3183,9 @@ ExecEndModifyTable(ModifyTableState *node)
 														   resultRelInfo);
 
 		/*
-		 * Cleanup the initialized batch slots. This only matters for FDWs with
-		 * batching, but the other cases will have ri_NumSlotsInitialized == 0.
+		 * Cleanup the initialized batch slots. This only matters for FDWs
+		 * with batching, but the other cases will have ri_NumSlotsInitialized
+		 * == 0.
 		 */
 		for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
 		{
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 19e96f3fd9..ad1c2bad01 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -2215,8 +2215,8 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
 			change_done:
 
 					/*
-					 * If speculative insertion was confirmed, the record isn't
-					 * needed anymore.
+					 * If speculative insertion was confirmed, the record
+					 * isn't needed anymore.
 					 */
 					if (specinsert != NULL)
 					{
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index cc50eb875b..682c107e74 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -759,7 +759,7 @@ fetch_remote_table_info(char *nspname, char *relname,
 					 " ORDER BY a.attnum",
 					 lrel->remoteid,
 					 (walrcv_server_version(LogRepWorkerWalRcvConn) >= 120000 ?
-						 "AND a.attgenerated = ''" : ""),
+					  "AND a.attgenerated = ''" : ""),
 					 lrel->remoteid);
 	res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data,
 					  lengthof(attrRow), attrRow);
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 63f108f960..abd5217ab1 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -1031,7 +1031,8 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
 		entry->pubactions.pubinsert = entry->pubactions.pubupdate =
 			entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false;
 		entry->publish_as_relid = InvalidOid;
-		entry->map = NULL;	/* will be set by maybe_send_schema() if needed */
+		entry->map = NULL;		/* will be set by maybe_send_schema() if
+								 * needed */
 	}
 
 	/* Validate the entry */
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index e4c008e443..793df973b4 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1974,7 +1974,7 @@ GetOldestNonRemovableTransactionId(Relation rel)
 	if (rel == NULL || rel->rd_rel->relisshared || RecoveryInProgress())
 		return horizons.shared_oldest_nonremovable;
 	else if (IsCatalogRelation(rel) ||
-		 RelationIsAccessibleInLogicalDecoding(rel))
+			 RelationIsAccessibleInLogicalDecoding(rel))
 		return horizons.catalog_oldest_nonremovable;
 	else if (RELATION_IS_LOCAL(rel))
 		return horizons.temp_oldest_nonremovable;
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 7f473018cc..c40dc58834 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -1093,7 +1093,7 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
 	],
 	[ 'gset alone', 1, [qr{gset must follow an SQL command}], q{\gset} ],
 	[
-		'gset no SQL',                        1,
+		'gset no SQL',                         1,
 		[qr{gset must follow an SQL command}], q{\set i +1
 \gset}
 	],
@@ -1102,7 +1102,7 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
 		[qr{too many arguments}],  q{SELECT 1 \gset a b}
 	],
 	[
-		'gset after gset',                    1,
+		'gset after gset',                     1,
 		[qr{gset must follow an SQL command}], q{SELECT 1 AS i \gset
 \gset}
 	],
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 9a5ca7b3db..0ec5509e7e 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -462,7 +462,7 @@ typedef struct ResultRelInfo
 
 	/* batch insert stuff */
 	int			ri_NumSlots;	/* number of slots in the array */
-	int			ri_NumSlotsInitialized;	/* number of initialized slots */
+	int			ri_NumSlotsInitialized; /* number of initialized slots */
 	int			ri_BatchSize;	/* max slots inserted in a single batch */
 	TupleTableSlot **ri_Slots;	/* input tuples for batch insert */
 	TupleTableSlot **ri_PlanSlots;
diff --git a/src/include/utils/queryjumble.h b/src/include/utils/queryjumble.h
index 1f4d062bab..7af6652f3e 100644
--- a/src/include/utils/queryjumble.h
+++ b/src/include/utils/queryjumble.h
@@ -53,12 +53,12 @@ typedef struct JumbleState
 } JumbleState;
 
 /* Values for the compute_query_id GUC */
-typedef enum
+enum ComputeQueryIdType
 {
 	COMPUTE_QUERY_ID_OFF,
 	COMPUTE_QUERY_ID_ON,
 	COMPUTE_QUERY_ID_AUTO
-} ComputeQueryIdType;
+};
 
 /* GUC parameters */
 extern int	compute_query_id;
diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm
index 0528ae147b..0fbb665be1 100644
--- a/src/test/perl/PostgresNode.pm
+++ b/src/test/perl/PostgresNode.pm
@@ -539,7 +539,7 @@ sub adjust_conf
 	my $conffile = $self->data_dir . '/' . $filename;
 
 	my $contents = TestLib::slurp_file($conffile);
-	my @lines    = split(/\n/, $contents);
+	my @lines = split(/\n/, $contents);
 	my @result;
 	my $eq = $skip_equals ? '' : '= ';
 	foreach my $line (@lines)
@@ -827,8 +827,10 @@ sub start
 	# sub init) so that it does not get copied to standbys.
 	# -w is now the default but having it here does no harm and helps
 	# compatibility with older versions.
-	$ret = TestLib::system_log('pg_ctl', '-w', '-D', $self->data_dir, '-l',
-		$self->logfile, '-o', "--cluster-name=$name", 'start');
+	$ret = TestLib::system_log(
+		'pg_ctl', '-w',           '-D', $self->data_dir,
+		'-l',     $self->logfile, '-o', "--cluster-name=$name",
+		'start');
 
 	if ($ret != 0)
 	{
@@ -2161,8 +2163,8 @@ sub poll_query_until
 	$expected = 't' unless defined($expected);    # default value
 
 	my $cmd = [
-		$self->installed_command('psql'),
-		'-XAt', '-d', $self->connstr($dbname)
+		$self->installed_command('psql'), '-XAt',
+		'-d',                             $self->connstr($dbname)
 	];
 	my ($stdout, $stderr);
 	my $max_attempts = 180 * 10;
diff --git a/src/test/perl/PostgresVersion.pm b/src/test/perl/PostgresVersion.pm
index 5ff701ce11..71af44fa94 100644
--- a/src/test/perl/PostgresVersion.pm
+++ b/src/test/perl/PostgresVersion.pm
@@ -151,14 +151,14 @@ a dot unless the separator argument is given.
 
 sub major
 {
-    my ($self, %params) = @_;
-    my $result = $self->{num}->[0];
-    if ($result + 0 < 10)
-    {
-        my $sep = $params{separator} || '.';
-        $result .= "$sep$self->{num}->[1]";
-    }
-    return $result;
+	my ($self, %params) = @_;
+	my $result = $self->{num}->[0];
+	if ($result + 0 < 10)
+	{
+		my $sep = $params{separator} || '.';
+		$result .= "$sep$self->{num}->[1]";
+	}
+	return $result;
 }
 
 1;
diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl
index 0bb67a935e..0b56380e0a 100644
--- a/src/test/recovery/t/005_replay_delay.pl
+++ b/src/test/recovery/t/005_replay_delay.pl
@@ -64,9 +64,10 @@ $node_standby2->init_from_backup($node_primary, $backup_name,
 $node_standby2->start;
 
 # Recovery is not yet paused.
-is($node_standby2->safe_psql('postgres',
-	"SELECT pg_get_wal_replay_pause_state()"),
-	'not paused', 'pg_get_wal_replay_pause_state() reports not paused');
+is( $node_standby2->safe_psql(
+		'postgres', "SELECT pg_get_wal_replay_pause_state()"),
+	'not paused',
+	'pg_get_wal_replay_pause_state() reports not paused');
 
 # Request to pause recovery and wait until it's actually paused.
 $node_standby2->safe_psql('postgres', "SELECT pg_wal_replay_pause()");
@@ -74,28 +75,28 @@ $node_primary->safe_psql('postgres',
 	"INSERT INTO tab_int VALUES (generate_series(21,30))");
 $node_standby2->poll_query_until('postgres',
 	"SELECT pg_get_wal_replay_pause_state() = 'paused'")
-	or die "Timed out while waiting for recovery to be paused";
+  or die "Timed out while waiting for recovery to be paused";
 
 # Even if new WAL records are streamed from the primary,
 # recovery in the paused state doesn't replay them.
-my $receive_lsn = $node_standby2->safe_psql('postgres',
-	"SELECT pg_last_wal_receive_lsn()");
-my $replay_lsn = $node_standby2->safe_psql('postgres',
-	"SELECT pg_last_wal_replay_lsn()");
+my $receive_lsn =
+  $node_standby2->safe_psql('postgres', "SELECT pg_last_wal_receive_lsn()");
+my $replay_lsn =
+  $node_standby2->safe_psql('postgres', "SELECT pg_last_wal_replay_lsn()");
 $node_primary->safe_psql('postgres',
 	"INSERT INTO tab_int VALUES (generate_series(31,40))");
 $node_standby2->poll_query_until('postgres',
 	"SELECT '$receive_lsn'::pg_lsn < pg_last_wal_receive_lsn()")
-	or die "Timed out while waiting for new WAL to be streamed";
-is($node_standby2->safe_psql('postgres',
-	"SELECT pg_last_wal_replay_lsn()"),
-	qq($replay_lsn), 'no WAL is replayed in the paused state');
+  or die "Timed out while waiting for new WAL to be streamed";
+is( $node_standby2->safe_psql('postgres', "SELECT pg_last_wal_replay_lsn()"),
+	qq($replay_lsn),
+	'no WAL is replayed in the paused state');
 
 # Request to resume recovery and wait until it's actually resumed.
 $node_standby2->safe_psql('postgres', "SELECT pg_wal_replay_resume()");
 $node_standby2->poll_query_until('postgres',
-	"SELECT pg_get_wal_replay_pause_state() = 'not paused' AND pg_last_wal_replay_lsn() > '$replay_lsn'::pg_lsn")
-	or die "Timed out while waiting for recovery to be resumed";
+	"SELECT pg_get_wal_replay_pause_state() = 'not paused' AND pg_last_wal_replay_lsn() > '$replay_lsn'::pg_lsn"
+) or die "Timed out while waiting for recovery to be resumed";
 
 # Check that the paused state ends and promotion continues if a promotion
 # is triggered while recovery is paused.
@@ -107,6 +108,5 @@ $node_standby2->poll_query_until('postgres',
   or die "Timed out while waiting for recovery to be paused";
 
 $node_standby2->promote;
-$node_standby2->poll_query_until('postgres',
-	"SELECT NOT pg_is_in_recovery()")
+$node_standby2->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
   or die "Timed out while waiting for promotion to finish";
diff --git a/src/test/recovery/t/025_stuck_on_old_timeline.pl b/src/test/recovery/t/025_stuck_on_old_timeline.pl
index 375cce4479..00ee9fcaed 100644
--- a/src/test/recovery/t/025_stuck_on_old_timeline.pl
+++ b/src/test/recovery/t/025_stuck_on_old_timeline.pl
@@ -32,13 +32,14 @@ my $perlbin = TestLib::perl2host($^X);
 $perlbin =~ s!\\!/!g if $TestLib::windows_os;
 my $archivedir_primary = $node_primary->archive_dir;
 $archivedir_primary =~ s!\\!/!g if $TestLib::windows_os;
-$node_primary->append_conf('postgresql.conf', qq(
+$node_primary->append_conf(
+	'postgresql.conf', qq(
 archive_command = '"$perlbin" "$FindBin::RealBin/cp_history_files" "%p" "$archivedir_primary/%f"'
 wal_keep_size=128MB
 ));
 # Make sure that Msys perl doesn't complain about difficulty in setting locale
 # when called from the archive_command.
-local $ENV{PERL_BADLANG}=0;
+local $ENV{PERL_BADLANG} = 0;
 $node_primary->start;
 
 # Take backup from primary
@@ -47,8 +48,11 @@ $node_primary->backup($backup_name);
 
 # Create streaming standby linking to primary
 my $node_standby = PostgresNode->new('standby');
-$node_standby->init_from_backup($node_primary, $backup_name,
-	allows_streaming => 1, has_streaming => 1, has_archiving => 1);
+$node_standby->init_from_backup(
+	$node_primary, $backup_name,
+	allows_streaming => 1,
+	has_streaming    => 1,
+	has_archiving    => 1);
 $node_standby->start;
 
 # Take backup of standby, use -Xnone so that pg_wal is empty.
@@ -60,7 +64,8 @@ my $node_cascade = PostgresNode->new('cascade');
 $node_cascade->init_from_backup($node_standby, $backup_name,
 	has_streaming => 1);
 $node_cascade->enable_restoring($node_primary);
-$node_cascade->append_conf('postgresql.conf', qq(
+$node_cascade->append_conf(
+	'postgresql.conf', qq(
 recovery_target_timeline='latest'
 ));
 
@@ -68,9 +73,8 @@ recovery_target_timeline='latest'
 $node_standby->promote;
 
 # Wait for promotion to complete
-$node_standby->poll_query_until('postgres',
-								"SELECT NOT pg_is_in_recovery();")
-	or die "Timed out while waiting for promotion";
+$node_standby->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery();")
+  or die "Timed out while waiting for promotion";
 
 # Find next WAL segment to be archived
 my $walfile_to_be_archived = $node_standby->safe_psql('postgres',
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index 3fdd9c917b..0c84d87873 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -51,8 +51,11 @@ $node_publisher->safe_psql('postgres',
 	"ALTER TABLE tab_nothing REPLICA IDENTITY NOTHING");
 
 # Replicate the changes without replica identity index
-$node_publisher->safe_psql('postgres', "CREATE TABLE tab_no_replidentity_index(c1 int)");
-$node_publisher->safe_psql('postgres', "CREATE INDEX idx_no_replidentity_index ON tab_no_replidentity_index(c1)");
+$node_publisher->safe_psql('postgres',
+	"CREATE TABLE tab_no_replidentity_index(c1 int)");
+$node_publisher->safe_psql('postgres',
+	"CREATE INDEX idx_no_replidentity_index ON tab_no_replidentity_index(c1)"
+);
 
 # Setup structure on subscriber
 $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)");
@@ -78,8 +81,11 @@ $node_subscriber->safe_psql('postgres',
 );
 
 # replication of the table without replica identity index
-$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_no_replidentity_index(c1 int)");
-$node_subscriber->safe_psql('postgres', "CREATE INDEX idx_no_replidentity_index ON tab_no_replidentity_index(c1)");
+$node_subscriber->safe_psql('postgres',
+	"CREATE TABLE tab_no_replidentity_index(c1 int)");
+$node_subscriber->safe_psql('postgres',
+	"CREATE INDEX idx_no_replidentity_index ON tab_no_replidentity_index(c1)"
+);
 
 # Setup logical replication
 my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@@ -137,7 +143,8 @@ $node_publisher->safe_psql('postgres',
 	"DELETE FROM tab_include WHERE a > 20");
 $node_publisher->safe_psql('postgres', "UPDATE tab_include SET a = -a");
 
-$node_publisher->safe_psql('postgres', "INSERT INTO tab_no_replidentity_index VALUES(1)");
+$node_publisher->safe_psql('postgres',
+	"INSERT INTO tab_no_replidentity_index VALUES(1)");
 
 $node_publisher->wait_for_catchup('tap_sub');
 
@@ -162,8 +169,10 @@ $result = $node_subscriber->safe_psql('postgres',
 is($result, qq(20|-20|-1),
 	'check replicated changes with primary key index with included columns');
 
-is($node_subscriber->safe_psql('postgres', q(SELECT c1 FROM tab_no_replidentity_index)),
-   1, "value replicated to subscriber without replica identity index");
+is( $node_subscriber->safe_psql(
+		'postgres', q(SELECT c1 FROM tab_no_replidentity_index)),
+	1,
+	"value replicated to subscriber without replica identity index");
 
 # insert some duplicate rows
 $node_publisher->safe_psql('postgres',
diff --git a/src/test/subscription/t/010_truncate.pl b/src/test/subscription/t/010_truncate.pl
index d2902cda81..0e6ecf9c2f 100644
--- a/src/test/subscription/t/010_truncate.pl
+++ b/src/test/subscription/t/010_truncate.pl
@@ -197,11 +197,9 @@ $node_publisher->safe_psql('postgres', "SELECT pg_reload_conf()");
 # test that truncate works for logical replication when there are multiple
 # subscriptions for a single table
 
-$node_publisher->safe_psql('postgres',
-	"CREATE TABLE tab5 (a int)");
+$node_publisher->safe_psql('postgres', "CREATE TABLE tab5 (a int)");
 
-$node_subscriber->safe_psql('postgres',
-	"CREATE TABLE tab5 (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab5 (a int)");
 
 $node_publisher->safe_psql('postgres',
 	"CREATE PUBLICATION pub5 FOR TABLE tab5");
@@ -235,8 +233,7 @@ $node_publisher->wait_for_catchup('sub5_2');
 
 $result = $node_subscriber->safe_psql('postgres',
 	"SELECT count(*), min(a), max(a) FROM tab5");
-is($result, qq(0||),
-	'truncate replicated for multiple subscriptions');
+is($result, qq(0||), 'truncate replicated for multiple subscriptions');
 
 # check deadlocks
 $result = $node_subscriber->safe_psql('postgres',
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index dfadb5b18e..c89d495221 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -673,7 +673,8 @@ is($result, qq(), 'truncate of tab3_1 replicated');
 # check that the map to convert tuples from leaf partition to the root
 # table is correctly rebuilt when a new column is added
 $node_publisher->safe_psql('postgres',
-	"ALTER TABLE tab2 DROP b, ADD COLUMN c text DEFAULT 'pub_tab2', ADD b text");
+	"ALTER TABLE tab2 DROP b, ADD COLUMN c text DEFAULT 'pub_tab2', ADD b text"
+);
 $node_publisher->safe_psql('postgres',
 	"INSERT INTO tab2 (a, b) VALUES (1, 'xxx'), (3, 'yyy'), (5, 'zzz')");
 $node_publisher->safe_psql('postgres',
diff --git a/src/test/subscription/t/020_messages.pl b/src/test/subscription/t/020_messages.pl
index cee7f912bd..ecf9b192a3 100644
--- a/src/test/subscription/t/020_messages.pl
+++ b/src/test/subscription/t/020_messages.pl
@@ -11,8 +11,7 @@ use Test::More tests => 5;
 # Create publisher node
 my $node_publisher = PostgresNode->new('publisher');
 $node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf',
-	'autovacuum = off');
+$node_publisher->append_conf('postgresql.conf', 'autovacuum = off');
 $node_publisher->start;
 
 # Create subscriber node
@@ -43,8 +42,10 @@ $node_publisher->wait_for_catchup('tap_sub');
 $node_subscriber->safe_psql('postgres', "ALTER SUBSCRIPTION tap_sub DISABLE");
 
 # wait for the replication slot to become inactive in the publisher
-$node_publisher->poll_query_until('postgres',
-	"SELECT COUNT(*) FROM pg_catalog.pg_replication_slots WHERE slot_name = 'tap_sub' AND active='f'", 1);
+$node_publisher->poll_query_until(
+	'postgres',
+	"SELECT COUNT(*) FROM pg_catalog.pg_replication_slots WHERE slot_name = 'tap_sub' AND active='f'",
+	1);
 
 $node_publisher->safe_psql('postgres',
 	"SELECT pg_logical_emit_message(true, 'pgoutput', 'a transactional message')"
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index abdb08319c..1b3da85421 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -113,6 +113,7 @@ Append
 AppendPath
 AppendRelInfo
 AppendState
+ApplyExecutionData
 ApplySubXactData
 Archive
 ArchiveEntryPtrType
@@ -1163,6 +1164,7 @@ IpcSemaphoreKey
 IsForeignPathAsyncCapable_function
 IsForeignRelUpdatable_function
 IsForeignScanParallelSafe_function
+IsoConnInfo
 IspellDict
 Item
 ItemId
@@ -1683,7 +1685,6 @@ PLpgSQL_stmt_return
 PLpgSQL_stmt_return_next
 PLpgSQL_stmt_return_query
 PLpgSQL_stmt_rollback
-PLpgSQL_stmt_set
 PLpgSQL_stmt_type
 PLpgSQL_stmt_while
 PLpgSQL_trigtype
@@ -1872,6 +1873,9 @@ PerlInterpreter
 Perl_check_t
 Perl_ppaddr_t
 Permutation
+PermutationStep
+PermutationStepBlocker
+PermutationStepBlockerType
 PgArchData
 PgBackendGSSStatus
 PgBackendSSLStatus
@@ -2416,7 +2420,6 @@ SlabBlock
 SlabChunk
 SlabContext
 SlabSlot
-SlotAcquireBehavior
 SlotErrCallbackArg
 SlotNumber
 SlruCtl
@@ -2496,6 +2499,7 @@ StatsData
 StatsElem
 StatsExtInfo
 StdAnalyzeData
+StdRdOptIndexCleanup
 StdRdOptions
 Step
 StopList
@@ -2777,7 +2781,7 @@ UserOpts
 VacAttrStats
 VacAttrStatsP
 VacErrPhase
-VacOptTernaryValue
+VacOptValue
 VacuumParams
 VacuumRelation
 VacuumStmt

Reply via email to