From edd281079c65ab821f28416a3ef497d047cdbd98 Mon Sep 17 00:00:00 2001
From: SadhuPrasad <b.sadhuprasadp@enterprisedb.com>
Date: Thu, 9 Oct 2025 17:53:27 +0000
Subject: [PATCH v1] [PATCH V1] Improve TAP test uses of Test::More functions

Many TAP tests make sub-optimal use of 'ok()' function. This
function should almost never be used for expressions involving
a comparison operator.

The reason is that other Test::More functions will give better
diagnostics about expected values if the test fails.

Here we replace the ok() with appropriate uses of is(), isnt(),
like(), unlike() and cmp_ok().
---
 contrib/amcheck/t/004_verify_nbtree_unique.pl |  15 ++-
 contrib/pg_visibility/t/002_corrupt_vm.pl     |   2 +-
 src/bin/initdb/t/001_initdb.pl                |   6 +-
 src/bin/pg_basebackup/t/010_pg_basebackup.pl  |  13 ++-
 .../t/040_pg_createsubscriber.pl              |   7 +-
 src/bin/pg_combinebackup/t/010_hardlink.pl    |   4 +-
 src/bin/pg_dump/t/002_pg_dump.pl              |   6 +-
 src/bin/pg_dump/t/005_pg_dump_filterfile.pl   | 102 ++++++++---------
 src/bin/pgbench/t/001_pgbench_with_server.pl  |  22 ++--
 src/bin/psql/t/001_basic.pl                   |   4 +-
 src/bin/scripts/t/020_createdb.pl             |   4 +-
 .../libpq/t/003_load_balance_host_list.pl     |   8 +-
 .../libpq/t/004_load_balance_dns.pl           |   8 +-
 src/test/modules/test_aio/t/002_io_workers.pl |   4 +-
 .../t/004_test_parser_perf.pl                 |   4 +-
 .../test_misc/t/001_constraint_validation.pl  | 103 +++++++++---------
 .../modules/test_misc/t/002_tablespace.pl     |  40 +++----
 src/test/modules/test_pg_dump/t/001_base.pl   |   4 +-
 .../modules/xid_wraparound/t/002_limits.pl    |   2 +-
 src/test/recovery/t/001_stream_rep.pl         |  18 +--
 src/test/recovery/t/003_recovery_targets.pl   |   4 +-
 src/test/recovery/t/005_replay_delay.pl       |   6 +-
 src/test/recovery/t/006_logical_decoding.pl   |   8 +-
 src/test/recovery/t/020_archive_status.pl     |   2 +-
 src/test/recovery/t/024_archive_recovery.pl   |   4 +-
 .../t/035_standby_logical_decoding.pl         |  14 +--
 .../t/040_standby_failover_slots_sync.pl      |  40 +++----
 src/test/recovery/t/042_low_level_backup.pl   |   6 +-
 .../t/044_invalidate_inactive_slots.pl        |   2 +-
 src/test/ssl/t/001_ssltests.pl                |   2 +-
 src/test/subscription/t/001_rep_changes.pl    |  18 +--
 src/test/subscription/t/007_ddl.pl            |  12 +-
 src/test/subscription/t/013_partition.pl      |  14 +--
 src/test/subscription/t/027_nosuperuser.pl    |   4 +-
 src/test/subscription/t/031_column_list.pl    |   2 +-
 src/test/subscription/t/035_conflicts.pl      |  42 +++----
 36 files changed, 279 insertions(+), 277 deletions(-)

diff --git a/contrib/amcheck/t/004_verify_nbtree_unique.pl b/contrib/amcheck/t/004_verify_nbtree_unique.pl
index 6be08e3f38f..d2cc81aac6a 100644
--- a/contrib/amcheck/t/004_verify_nbtree_unique.pl
+++ b/contrib/amcheck/t/004_verify_nbtree_unique.pl
@@ -159,7 +159,8 @@ $node->safe_psql(
 	'postgres', q(
 	SELECT bt_index_check('bttest_unique_idx1', true, true);
 ));
-ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx1"/,
+like($stderr,
+	qr/index uniqueness is violated for index "bttest_unique_idx1"/,
 	'detected uniqueness violation for index "bttest_unique_idx1"');
 
 #
@@ -177,7 +178,8 @@ ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx1"/,
 	'postgres', q(
 	SELECT bt_index_check('bttest_unique_idx2', true, true);
 ));
-ok( $stderr =~ /item order invariant violated for index "bttest_unique_idx2"/,
+like($stderr,
+	qr/item order invariant violated for index "bttest_unique_idx2"/,
 	'detected item order invariant violation for index "bttest_unique_idx2"');
 
 $node->safe_psql(
@@ -191,7 +193,8 @@ $node->safe_psql(
 	'postgres', q(
 	SELECT bt_index_check('bttest_unique_idx2', true, true);
 ));
-ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx2"/,
+like($stderr,
+	qr/index uniqueness is violated for index "bttest_unique_idx2"/,
 	'detected uniqueness violation for index "bttest_unique_idx2"');
 
 #
@@ -208,7 +211,8 @@ ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx2"/,
 	'postgres', q(
 	SELECT bt_index_check('bttest_unique_idx3', true, true);
 ));
-ok( $stderr =~ /item order invariant violated for index "bttest_unique_idx3"/,
+like($stderr,
+	qr/item order invariant violated for index "bttest_unique_idx3"/,
 	'detected item order invariant violation for index "bttest_unique_idx3"');
 
 # For unique index deduplication is possible only for same values, but
@@ -237,7 +241,8 @@ $node->safe_psql(
 	'postgres', q(
 	SELECT bt_index_check('bttest_unique_idx3', true, true);
 ));
-ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx3"/,
+like($stderr,
+	qr/index uniqueness is violated for index "bttest_unique_idx3"/,
 	'detected uniqueness violation for index "bttest_unique_idx3"');
 
 $node->stop;
diff --git a/contrib/pg_visibility/t/002_corrupt_vm.pl b/contrib/pg_visibility/t/002_corrupt_vm.pl
index b9b31956466..e558b2c13dc 100644
--- a/contrib/pg_visibility/t/002_corrupt_vm.pl
+++ b/contrib/pg_visibility/t/002_corrupt_vm.pl
@@ -40,7 +40,7 @@ my $npages = $node->safe_psql(
 	"SELECT relpages FROM pg_class
 		WHERE relname = 'corruption_test';"
 );
-ok($npages >= 10, 'table has at least 10 pages');
+cmp_ok($npages, '>=', 10, 'table has at least 10 pages');
 
 my $file = $node->safe_psql("postgres",
 	"SELECT pg_relation_filepath('corruption_test');");
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index b7ef7ed8d06..1e9543c2585 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -308,9 +308,9 @@ command_ok(
 	'multiple --set options with different case');
 
 my $conf = slurp_file("$tempdir/dataY/postgresql.conf");
-ok($conf !~ qr/^WORK_MEM = /m, "WORK_MEM should not be configured");
-ok($conf !~ qr/^Work_Mem = /m, "Work_Mem should not be configured");
-ok($conf =~ qr/^work_mem = 512/m, "work_mem should be in config");
+unlike($conf, qr/^WORK_MEM = /m, "WORK_MEM should not be configured");
+unlike($conf, qr/^Work_Mem = /m, "Work_Mem should not be configured");
+like($conf, qr/^work_mem = 512/m, "work_mem should be in config");
 
 # Test the no-data-checksums flag
 my $datadir_nochecksums = "$tempdir/data_no_checksums";
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 7cdd4442755..0c9669eebcc 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -531,12 +531,13 @@ SKIP:
 	skip "symlink check not implemented on Windows", 1
 	  if ($windows_os);
 	opendir(my $dh, "$pgdata/pg_tblspc") or die;
-	ok( (   grep {
-				-l "$tempdir/backup1/pg_tblspc/$_"
-				  and readlink "$tempdir/backup1/pg_tblspc/$_" eq
-				  "$tempdir/tbackup/tblspc1"
-			} readdir($dh)),
-		"tablespace symlink was updated");
+	is(scalar(grep {
+        -l "$tempdir/backup1/pg_tblspc/$_"
+          and readlink "$tempdir/backup1/pg_tblspc/$_" eq
+          "$tempdir/tbackup/tblspc1"
+    } readdir($dh)),
+    1,
+    "tablespace symlink count and target correct");
 	closedir $dh;
 }
 
diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
index 229fef5b3b5..bf963e54412 100644
--- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
+++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
@@ -341,8 +341,9 @@ $node_p->safe_psql(
 
 $node_p->wait_for_replay_catchup($node_s);
 
-ok($node_s->safe_psql($db1, "SELECT COUNT(*) = 2 FROM pg_publication"),
-	'two pre-existing publications on subscriber');
+is($node_s->safe_psql($db1, "SELECT COUNT(*) = 2 FROM pg_publication"),
+   't',
+   'two pre-existing publications on subscriber');
 
 $node_s->stop;
 
@@ -535,7 +536,7 @@ my $sysid_p = $node_p->safe_psql('postgres',
 	'SELECT system_identifier FROM pg_control_system()');
 my $sysid_s = $node_s->safe_psql('postgres',
 	'SELECT system_identifier FROM pg_control_system()');
-ok($sysid_p != $sysid_s, 'system identifier was changed');
+isnt($sysid_p, $sysid_s, 'system identifier was changed');
 
 # clean up
 $node_p->teardown_node;
diff --git a/src/bin/pg_combinebackup/t/010_hardlink.pl b/src/bin/pg_combinebackup/t/010_hardlink.pl
index 4f92d6676bd..23acf72d25f 100644
--- a/src/bin/pg_combinebackup/t/010_hardlink.pl
+++ b/src/bin/pg_combinebackup/t/010_hardlink.pl
@@ -144,12 +144,12 @@ sub check_data_file
 	{
 		# Get the file's stat information of each segment
 		my $nlink_count = get_hard_link_count($segment);
-		ok($nlink_count == 2, "File '$segment' has 2 hard links");
+		is($nlink_count, 2, "File '$segment' has 2 hard links");
 	}
 
 	# Get the file's stat information of the last segment
 	my $nlink_count = get_hard_link_count($last_segment);
-	ok($nlink_count == $last_segment_nlinks,
+	is($nlink_count, $last_segment_nlinks,
 		"File '$last_segment' has $last_segment_nlinks hard link(s)");
 }
 
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index e7a2d64f741..21a2d1e5fe1 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -5477,15 +5477,15 @@ foreach my $run (sort keys %pgdump_runs)
 		if (($tests{$test}->{like}->{$test_key} || $tests{$test}->{all_runs})
 			&& !defined($tests{$test}->{unlike}->{$test_key}))
 		{
-			if (!ok($output_file =~ $tests{$test}->{regexp},
-					"$run: should dump $test"))
+			if (!like($output_file, $tests{$test}->{regexp},
+				"$run: should dump $test"))
 			{
 				diag("Review $run results in $tempdir");
 			}
 		}
 		else
 		{
-			if (!ok($output_file !~ $tests{$test}->{regexp},
+			if (!unlike($output_file, $tests{$test}->{regexp},
 					"$run: should not dump $test"))
 			{
 				diag("Review $run results in $tempdir");
diff --git a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
index 5c69ec31c39..5e48fa58d60 100644
--- a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
+++ b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
@@ -100,10 +100,10 @@ command_ok(
 
 my $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "table one dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "table two dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "table three dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
+like($dump, qr/^CREATE TABLE public\.table_one/m, "table one dumped");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "table two dumped");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "table three dumped");
+like($dump, qr/^CREATE TABLE public\.table_three_one/m,
 	"table three one dumped");
 
 # Test various combinations of whitespace, comments and correct filters
@@ -130,14 +130,14 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "dumped table one");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
-ok($dump !~ qr/^CREATE TABLE public\.table_three/m, "table three not dumped");
-ok($dump !~ qr/^CREATE TABLE public\.table_three_one/m,
+like($dump, qr/^CREATE TABLE public\.table_one/m, "dumped table one");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
+unlike($dump, qr/^CREATE TABLE public\.table_three/m, "table three not dumped");
+unlike($dump, qr/^CREATE TABLE public\.table_three_one/m,
 	"table three_one not dumped");
-ok( $dump !~ qr/^COPY public\.table_one/m,
+unlike($dump, qr/^COPY public\.table_one/m,
 	"content of table one is not included");
-ok($dump =~ qr/^COPY public\.table_two/m, "content of table two is included");
+like($dump, qr/^COPY public\.table_two/m, "content of table two is included");
 
 # Test dumping tables specified by qualified names
 open $inputfile, '>', "$tempdir/inputfile.txt"
@@ -159,9 +159,9 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "dumped table one");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
+like($dump, qr/^CREATE TABLE public\.table_one/m, "dumped table one");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
 
 # Test dumping all tables except one
 open $inputfile, '>', "$tempdir/inputfile.txt"
@@ -181,10 +181,10 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
-ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
+unlike($dump, qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
+like($dump, qr/^CREATE TABLE public\.table_three_one/m,
 	"dumped table three_one");
 
 # Test dumping tables with a wildcard pattern
@@ -205,10 +205,10 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
-ok($dump !~ qr/^CREATE TABLE public\.table_two/m, "table two not dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
-ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
+unlike($dump, qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
+unlike($dump, qr/^CREATE TABLE public\.table_two/m, "table two not dumped");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
+like($dump, qr/^CREATE TABLE public\.table_three_one/m,
 	"dumped table three_one");
 
 # Test dumping table with multiline quoted tablename
@@ -230,7 +230,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public.\"strange aaa/m,
+like($dump, qr/^CREATE TABLE public.\"strange aaa/m,
 	"dump table with new line in name");
 
 # Test excluding multiline quoted tablename from dump
@@ -251,7 +251,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^CREATE TABLE public.\"strange aaa/m,
+unlike($dump, qr/^CREATE TABLE public.\"strange aaa/m,
 	"dump table with new line in name");
 
 # Test excluding an entire schema
@@ -272,7 +272,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^CREATE TABLE/m, "no table dumped");
+unlike($dump, qr/^CREATE TABLE/m, "no table dumped");
 
 # Test including and excluding an entire schema by multiple filterfiles
 open $inputfile, '>', "$tempdir/inputfile.txt"
@@ -298,7 +298,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^CREATE TABLE/m, "no table dumped");
+unlike($dump, qr/^CREATE TABLE/m, "no table dumped");
 
 # Test dumping a table with a single leading newline on a row
 open $inputfile, '>', "$tempdir/inputfile.txt"
@@ -321,7 +321,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
+like($dump, qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
 	"dump table with multiline strange name");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
@@ -341,7 +341,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
+like($dump, qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
 	"dump table with multiline strange name");
 
 #########################################
@@ -380,7 +380,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE SERVER dummyserver/m, "dump foreign server");
+like($dump, qr/^CREATE SERVER dummyserver/m, "dump foreign server");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
   or die "unable to open filterfile for writing";
@@ -497,7 +497,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "no table dumped");
+like($dump, qr/^CREATE TABLE public\.table_one/m, "no table dumped");
 
 # Now append a pattern to the filter file which doesn't resolve
 open $inputfile, '>>', "$tempdir/inputfile.txt"
@@ -537,8 +537,8 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^\\connect postgres/m, "database postgres is not dumped");
-ok($dump =~ qr/^\\connect template1/m, "database template1 is dumped");
+unlike($dump, qr/^\\connect postgres/m, "database postgres is not dumped");
+like($dump, qr/^\\connect template1/m, "database template1 is dumped");
 
 # Make sure this option dont break the existing limitation of using
 # --globals-only with exclusions
@@ -632,8 +632,8 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "wanted table restored");
-ok($dump !~ qr/^CREATE TABLE public\.table_one/m,
+like($dump, qr/^CREATE TABLE public\.table_two/m, "wanted table restored");
+unlike($dump, qr/^CREATE TABLE public\.table_one/m,
 	"unwanted table is not restored");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
@@ -727,8 +727,8 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE FUNCTION public\.foo1/m, "wanted function restored");
-ok( $dump !~ qr/^CREATE TABLE public\.foo2/m,
+like($dump, qr/^CREATE FUNCTION public\.foo1/m, "wanted function restored");
+unlike($dump, qr/^CREATE TABLE public\.foo2/m,
 	"unwanted function is not restored");
 
 # this should be white space tolerant (against the -P argument)
@@ -751,7 +751,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE FUNCTION public\.foo3/m, "wanted function restored");
+like($dump, qr/^CREATE FUNCTION public\.foo3/m, "wanted function restored");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
   or die "unable to open filterfile for writing";
@@ -775,10 +775,10 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE INDEX t1_idx1/m, "wanted index restored");
-ok($dump !~ qr/^CREATE INDEX t2_idx2/m, "unwanted index are not restored");
-ok($dump =~ qr/^CREATE TRIGGER trg1/m, "wanted trigger restored");
-ok($dump !~ qr/^CREATE TRIGGER trg2/m, "unwanted trigger is not restored");
+like($dump, qr/^CREATE INDEX t1_idx1/m, "wanted index restored");
+unlike($dump, qr/^CREATE INDEX t2_idx2/m, "unwanted index are not restored");
+like($dump, qr/^CREATE TRIGGER trg1/m, "wanted trigger restored");
+unlike($dump, qr/^CREATE TRIGGER trg2/m, "unwanted trigger is not restored");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
   or die "unable to open filterfile for writing";
@@ -798,10 +798,10 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE s1\.t1/m, "wanted table from schema restored");
-ok( $dump =~ qr/^CREATE SEQUENCE s1\.s1/m,
+like($dump, qr/^CREATE TABLE s1\.t1/m, "wanted table from schema restored");
+like( $dump, qr/^CREATE SEQUENCE s1\.s1/m,
 	"wanted sequence from schema restored");
-ok($dump !~ qr/^CREATE TABLE s2\t2/m, "unwanted table is not restored");
+unlike($dump, qr/^CREATE TABLE s2\t2/m, "unwanted table is not restored");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
   or die "unable to open filterfile for writing";
@@ -821,12 +821,12 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^CREATE TABLE s1\.t1/m,
+unlike($dump, qr/^CREATE TABLE s1\.t1/m,
 	"unwanted table from schema is not restored");
-ok($dump !~ qr/^CREATE SEQUENCE s1\.s1/m,
+unlike($dump, qr/^CREATE SEQUENCE s1\.s1/m,
 	"unwanted sequence from schema is not restored");
-ok($dump =~ qr/^CREATE TABLE s2\.t2/m, "wanted table restored");
-ok($dump =~ qr/^CREATE TABLE public\.t1/m, "wanted table restored");
+like($dump, qr/^CREATE TABLE s2\.t2/m, "wanted table restored");
+like($dump, qr/^CREATE TABLE public\.t1/m, "wanted table restored");
 
 #########################################
 # test of supported syntax
@@ -849,7 +849,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public\.bootab/m, "dumped children table");
+like($dump, qr/^CREATE TABLE public\.bootab/m, "dumped children table");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
   or die "unable to open filterfile for writing";
@@ -869,7 +869,7 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump !~ qr/^CREATE TABLE public\.bootab/m,
+unlike($dump, qr/^CREATE TABLE public\.bootab/m,
 	"exclude dumped children table");
 
 open $inputfile, '>', "$tempdir/inputfile.txt"
@@ -890,8 +890,8 @@ command_ok(
 
 $dump = slurp_file($plainfile);
 
-ok($dump =~ qr/^CREATE TABLE public\.bootab/m, "dumped children table");
-ok($dump !~ qr/^COPY public\.bootab/m, "exclude dumped children table");
+like($dump, qr/^CREATE TABLE public\.bootab/m, "dumped children table");
+unlike($dump, qr/^COPY public\.bootab/m, "exclude dumped children table");
 
 #########################################
 # Test extension
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 7dd78940300..0e075bb40bf 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -753,15 +753,15 @@ my ($ret, $out, $err) = $node->psql('postgres',
 	'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val'
 );
 
-ok($ret == 0, "psql seeded_random count ok");
-ok($err eq '', "psql seeded_random count stderr is empty");
-ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/,
+is($ret, 0, "psql seeded_random count ok");
+is($err, '', "psql seeded_random count stderr is empty");
+like($out, qr/\b$seed\|uniform\|1\d\d\d\|2/,
 	"psql seeded_random count uniform");
-ok( $out =~ /\b$seed\|exponential\|2\d\d\d\|2/,
+like($out,  qr/\b$seed\|exponential\|2\d\d\d\|2/,
 	"psql seeded_random count exponential");
-ok( $out =~ /\b$seed\|gaussian\|3\d\d\d\|2/,
+like($out, qr/\b$seed\|gaussian\|3\d\d\d\|2/,
 	"psql seeded_random count gaussian");
-ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/,
+like($out, qr/\b$seed\|zipfian\|4\d\d\d\|2/,
 	"psql seeded_random count zipfian");
 
 $node->safe_psql('postgres', 'DROP TABLE seeded_random;');
@@ -1521,8 +1521,8 @@ sub check_pgbench_logs
 
 	# $prefix is simple enough, thus does not need escaping
 	my @logs = list_files($dir, qr{^$prefix\..*$});
-	ok(@logs == $nb, "number of log files");
-	ok(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs) == $nb, "file name format");
+	is(scalar @logs, $nb, "number of log files");
+	is(scalar(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs)), $nb, "file name format");
 
 	my $log_number = 0;
 	for my $log (sort @logs)
@@ -1532,10 +1532,10 @@ sub check_pgbench_logs
 
 		my @contents = split(/\n/, $contents_raw);
 		my $clen = @contents;
-		ok( $min <= $clen && $clen <= $max,
-			"transaction count for $log ($clen)");
+		cmp_ok($clen, '>=', $min, "transaction count for $log ($clen) is above min");
+		cmp_ok($clen, '<=', $max, "transaction count for $log ($clen) is below max");
 		my $clen_match = grep(/$re/, @contents);
-		ok($clen_match == $clen, "transaction format for $prefix");
+		is($clen_match, $clen, "transaction format for $prefix");
 
 		# Show more information if some logs don't match
 		# to help with debugging.
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index cf07a9dbd5e..677e9df3f22 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -57,7 +57,7 @@ foreach my $arg (qw(commands variables))
 	$result = IPC::Run::run [ 'psql', "--help=$arg" ],
 	  '>' => \$stdout,
 	  '2>' => \$stderr;
-	ok($result, "psql --help=$arg exit code 0");
+	is($result, 1, "psql --help=$arg exit code 0");
 	isnt($stdout, '', "psql --help=$arg goes to stdout");
 	is($stderr, '', "psql --help=$arg nothing to stderr");
 }
@@ -141,7 +141,7 @@ my ($ret, $out, $err) = $node->psql('postgres',
 
 is($ret, 2, 'server crash: psql exit code');
 like($out, qr/before/, 'server crash: output before crash');
-ok($out !~ qr/AFTER/, 'server crash: no output after crash');
+unlike($out, qr/AFTER/, 'server crash: no output after crash');
 is( $err,
 	'psql:<stdin>:2: FATAL:  terminating connection due to administrator command
 psql:<stdin>:2: server closed the connection unexpectedly
diff --git a/src/bin/scripts/t/020_createdb.pl b/src/bin/scripts/t/020_createdb.pl
index a8293390ede..1eb45faafab 100644
--- a/src/bin/scripts/t/020_createdb.pl
+++ b/src/bin/scripts/t/020_createdb.pl
@@ -351,9 +351,9 @@ $node->issues_sql_like(
 	'create database with owner role_foobar');
 ($ret, $stdout, $stderr) =
   $node->psql('foobar2', 'DROP OWNED BY role_foobar;', on_error_die => 1,);
-ok($ret == 0, "DROP OWNED BY role_foobar");
+is($ret, 0, "DROP OWNED BY role_foobar");
 ($ret, $stdout, $stderr) =
   $node->psql('foobar2', 'DROP DATABASE foobar8;', on_error_die => 1,);
-ok($ret == 0, "DROP DATABASE foobar8");
+is($ret, 0, "DROP DATABASE foobar8");
 
 done_testing();
diff --git a/src/interfaces/libpq/t/003_load_balance_host_list.pl b/src/interfaces/libpq/t/003_load_balance_host_list.pl
index 6e859c49351..a291588f34b 100644
--- a/src/interfaces/libpq/t/003_load_balance_host_list.pl
+++ b/src/interfaces/libpq/t/003_load_balance_host_list.pl
@@ -61,10 +61,10 @@ my $node3_occurrences = () =
 my $total_occurrences =
   $node1_occurrences + $node2_occurrences + $node3_occurrences;
 
-ok($node1_occurrences > 1, "received at least one connection on node1");
-ok($node2_occurrences > 1, "received at least one connection on node2");
-ok($node3_occurrences > 1, "received at least one connection on node3");
-ok($total_occurrences == 50, "received 50 connections across all nodes");
+cmp_ok($node1_occurrences, '>', 1, "received at least one connection on node1");
+cmp_ok($node2_occurrences, '>', 1, "received at least one connection on node2");
+cmp_ok($node3_occurrences, '>', 1, "received at least one connection on node3");
+is($total_occurrences, 50, "received 50 connections across all nodes");
 
 $node1->stop();
 $node2->stop();
diff --git a/src/interfaces/libpq/t/004_load_balance_dns.pl b/src/interfaces/libpq/t/004_load_balance_dns.pl
index 19a4f80fd7f..feb5e71b2c6 100644
--- a/src/interfaces/libpq/t/004_load_balance_dns.pl
+++ b/src/interfaces/libpq/t/004_load_balance_dns.pl
@@ -111,10 +111,10 @@ my $node3_occurrences = () =
 my $total_occurrences =
   $node1_occurrences + $node2_occurrences + $node3_occurrences;
 
-ok($node1_occurrences > 1, "received at least one connection on node1");
-ok($node2_occurrences > 1, "received at least one connection on node2");
-ok($node3_occurrences > 1, "received at least one connection on node3");
-ok($total_occurrences == 50, "received 50 connections across all nodes");
+cmp_ok($node1_occurrences, '>', 1, "received at least one connection on node1");
+cmp_ok($node2_occurrences, '>', 1, "received at least one connection on node2");
+cmp_ok($node3_occurrences, '>', 1, "received at least one connection on node3");
+is($total_occurrences, 50, "received 50 connections across all nodes");
 
 $node1->stop();
 $node2->stop();
diff --git a/src/test/modules/test_aio/t/002_io_workers.pl b/src/test/modules/test_aio/t/002_io_workers.pl
index af5fae15ea7..c8e919c62b3 100644
--- a/src/test/modules/test_aio/t/002_io_workers.pl
+++ b/src/test/modules/test_aio/t/002_io_workers.pl
@@ -67,8 +67,8 @@ sub change_number_of_io_workers
 
 	if ($expect_failure)
 	{
-		ok( $stderr =~
-			  /$worker_count is outside the valid range for parameter "io_workers"/,
+		like($stderr,
+			qr/$worker_count is outside the valid range for parameter "io_workers"/,
 			"updating number of io_workers to $worker_count failed, as expected"
 		);
 
diff --git a/src/test/modules/test_json_parser/t/004_test_parser_perf.pl b/src/test/modules/test_json_parser/t/004_test_parser_perf.pl
index 9ea21ee8990..fc8c1a42864 100644
--- a/src/test/modules/test_json_parser/t/004_test_parser_perf.pl
+++ b/src/test/modules/test_json_parser/t/004_test_parser_perf.pl
@@ -34,10 +34,10 @@ close($fh);
 
 my ($result) = run_log([ $exe, "1", $fname ]);
 
-ok($result == 0, "perf test runs with recursive descent parser");
+is($result, '', "perf test runs with recursive descent parser");
 
 $result = run_log([ $exe, "-i", "1", $fname ]);
 
-ok($result == 0, "perf test runs with table driven parser");
+is($result, '', "perf test runs with table driven parser");
 
 done_testing();
diff --git a/src/test/modules/test_misc/t/001_constraint_validation.pl b/src/test/modules/test_misc/t/001_constraint_validation.pl
index 1d86936ec69..2ffcb317a41 100644
--- a/src/test/modules/test_misc/t/001_constraint_validation.pl
+++ b/src/test/modules/test_misc/t/001_constraint_validation.pl
@@ -58,8 +58,8 @@ run_sql_command(
 # normal run will verify table data
 $output = run_sql_command('alter table atacc1 alter test_a set not null;');
 ok(!is_table_verified($output), 'with constraint will not scan table');
-ok( $output =~
-	  m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
+like($output,
+	 qr/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
 	'test_a proved by constraints');
 
 run_sql_command('alter table atacc1 alter test_a drop not null;');
@@ -70,9 +70,8 @@ $output = run_sql_command(
 );
 ok(is_table_verified($output), 'table was scanned');
 # we may miss debug message for test_a constraint because we need verify table due test_b
-ok( !(  $output =~
-		m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/
-	),
+unlike($output,
+	qr/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
 	'test_b not proved by wrong constraints');
 run_sql_command(
 	'alter table atacc1 alter test_a drop not null, alter test_b drop not null;'
@@ -86,11 +85,11 @@ $output = run_sql_command(
 	'alter table atacc1 alter test_b set not null, alter test_a set not null;'
 );
 ok(!is_table_verified($output), 'table was not scanned for both columns');
-ok( $output =~
-	  m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
+like($output,
+	qr/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
 	'test_a proved by constraints');
-ok( $output =~
-	  m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
+like($output,
+	qr/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
 	'test_b proved by constraints');
 run_sql_command('drop table atacc1;');
 
@@ -119,8 +118,8 @@ $output = run_sql_command(
 	'ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);'
 );
 ok(!is_table_verified($output), 'table part_3_4 not scanned');
-ok( $output =~
-	  m/partition constraint for table "part_3_4" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "part_3_4" is implied by existing constraints/,
 	'part_3_4 verified by existing constraints');
 
 # test attach default partition
@@ -131,16 +130,16 @@ run_sql_command(
 $output = run_sql_command(
 	'ALTER TABLE list_parted2 ATTACH PARTITION list_parted2_def default;');
 ok(!is_table_verified($output), 'table list_parted2_def not scanned');
-ok( $output =~
-	  m/partition constraint for table "list_parted2_def" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "list_parted2_def" is implied by existing constraints/,
 	'list_parted2_def verified by existing constraints');
 
 $output = run_sql_command(
 	'CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66);'
 );
 ok(!is_table_verified($output), 'table list_parted2_def not scanned');
-ok( $output =~
-	  m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
+like($output,
+	qr/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
 	'updated partition constraint for default partition list_parted2_def');
 
 # test attach another partitioned table
@@ -153,11 +152,11 @@ run_sql_command(
 );
 $output = run_sql_command(
 	'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
-ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
-ok($output =~ m/verifying table "list_parted2_def"/,
+unlike($output, qr/verifying table "part_5"/, 'table part_5 not scanned');
+like($output, qr/verifying table "list_parted2_def"/,
 	'list_parted2_def scanned');
-ok( $output =~
-	  m/partition constraint for table "part_5" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "part_5" is implied by existing constraints/,
 	'part_5 verified by existing constraints');
 
 run_sql_command(
@@ -171,11 +170,11 @@ run_sql_command(
 );
 $output = run_sql_command(
 	'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
-ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
-ok($output =~ m/verifying table "list_parted2_def"/,
+unlike($output, qr/verifying table "part_5"/, 'table part_5 not scanned');
+like($output, qr/verifying table "list_parted2_def"/,
 	'list_parted2_def scanned');
-ok( $output =~
-	  m/partition constraint for table "part_5" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "part_5" is implied by existing constraints/,
 	'part_5 verified by existing constraints');
 
 # Check the case where attnos of the partitioning columns in the table being
@@ -190,11 +189,11 @@ run_sql_command(
 	ALTER TABLE part_6 DROP c;');
 $output = run_sql_command(
 	'ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6);');
-ok(!($output =~ m/verifying table "part_6"/), 'table part_6 not scanned');
-ok($output =~ m/verifying table "list_parted2_def"/,
+unlike($output, qr/verifying table "part_6"/, 'table part_6 not scanned');
+like($output, qr/verifying table "list_parted2_def"/,
 	'list_parted2_def scanned');
-ok( $output =~
-	  m/partition constraint for table "part_6" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "part_6" is implied by existing constraints/,
 	'part_6 verified by existing constraints');
 
 # Similar to above, but the table being attached is a partitioned table
@@ -219,17 +218,17 @@ $output = run_sql_command(
 	'ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN (\'a\', null);'
 );
 ok(!is_table_verified($output), 'table not scanned');
-ok( $output =~
-	  m/partition constraint for table "part_7_a_null" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "part_7_a_null" is implied by existing constraints/,
 	'part_7_a_null verified by existing constraints');
 $output = run_sql_command(
 	'ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);');
 ok(!is_table_verified($output), 'tables not scanned');
-ok( $output =~
-	  m/partition constraint for table "part_7" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "part_7" is implied by existing constraints/,
 	'part_7 verified by existing constraints');
-ok( $output =~
-	  m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
+like($output,
+	qr/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
 	'updated partition constraint for default partition list_parted2_def');
 
 run_sql_command(
@@ -245,10 +244,9 @@ $output = run_sql_command(
 	'ALTER TABLE range_parted ATTACH PARTITION range_part1 FOR VALUES FROM (1, 1) TO (1, 10);'
 );
 ok(is_table_verified($output), 'table range_part1 scanned');
-ok( !(  $output =~
-		m/partition constraint for table "range_part1" is implied by existing constraints/
-	),
-	'range_part1 not verified by existing constraints');
+unlike($output,
+	   qr/partition constraint for table "range_part1" is implied by existing constraints/,
+	   'range_part1 not verified by existing constraints');
 
 run_sql_command(
 	'CREATE TABLE range_part2 (
@@ -259,8 +257,8 @@ $output = run_sql_command(
 	'ALTER TABLE range_parted ATTACH PARTITION range_part2 FOR VALUES FROM (1, 10) TO (1, 20);'
 );
 ok(!is_table_verified($output), 'table range_part2 not scanned');
-ok( $output =~
-	  m/partition constraint for table "range_part2" is implied by existing constraints/,
+like($output,
+	qr/partition constraint for table "range_part2" is implied by existing constraints/,
 	'range_part2 verified by existing constraints');
 
 # If a partitioned table being created or an existing table being attached
@@ -278,19 +276,17 @@ run_sql_command(
 $output = run_sql_command(
 	'ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1);');
 ok(is_table_verified($output), 'quuux1 table scanned');
-ok( !(  $output =~
-		m/partition constraint for table "quuux1" is implied by existing constraints/
-	),
-	'quuux1 verified by existing constraints');
+unlike($output,
+	   qr/partition constraint for table "quuux1" is implied by existing constraints/,
+	   'quuux1 verified by existing constraints');
 
 run_sql_command('CREATE TABLE quuux2 (a int, b text);');
 $output = run_sql_command(
 	'ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2);');
-ok(!($output =~ m/verifying table "quuux_default1"/),
-	'quuux_default1 not scanned');
-ok($output =~ m/verifying table "quuux2"/, 'quuux2 scanned');
-ok( $output =~
-	  m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
+unlike($output, qr/verifying table "quuux_default1"/, 'quuux_default1 not scanned');
+like($output, qr/verifying table "quuux2"/, 'quuux2 scanned');
+like($output,
+	qr/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
 	'updated partition constraint for default partition quuux_default1');
 run_sql_command('DROP TABLE quuux1, quuux2;');
 
@@ -298,15 +294,14 @@ run_sql_command('DROP TABLE quuux1, quuux2;');
 $output = run_sql_command(
 	'CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1);');
 ok(!is_table_verified($output), 'tables not scanned');
-ok( !(  $output =~
-		m/partition constraint for table "quuux1" is implied by existing constraints/
-	),
-	'quuux1 verified by existing constraints');
+unlike($output,
+	   qr/partition constraint for table "quuux1" is implied by existing constraints/,
+	   'quuux1 verified by existing constraints');
 $output = run_sql_command(
 	'CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2);');
 ok(!is_table_verified($output), 'tables not scanned');
-ok( $output =~
-	  m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
+like($output,
+	qr/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
 	'updated partition constraint for default partition quuux_default1');
 run_sql_command('DROP TABLE quuux;');
 
diff --git a/src/test/modules/test_misc/t/002_tablespace.pl b/src/test/modules/test_misc/t/002_tablespace.pl
index b8a5617c788..972215b76c6 100644
--- a/src/test/modules/test_misc/t/002_tablespace.pl
+++ b/src/test/modules/test_misc/t/002_tablespace.pl
@@ -29,69 +29,69 @@ my $result;
 # Create a tablespace with an absolute path
 $result = $node->psql('postgres',
 	"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
-ok($result == 0, 'create tablespace with absolute path');
+is($result, 0, 'create tablespace with absolute path');
 
 # Can't create a tablespace where there is one already
 $result = $node->psql('postgres',
 	"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
-ok($result != 0, 'clobber tablespace with absolute path');
+isnt($result, 0, 'clobber tablespace with absolute path');
 
 # Create table in it
 $result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
-ok($result == 0, 'create table in tablespace with absolute path');
+is($result, 0, 'create table in tablespace with absolute path');
 
 # Can't drop a tablespace that still has a table in it
 $result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
-ok($result != 0, 'drop tablespace with absolute path');
+isnt($result, 0, 'drop tablespace with absolute path');
 
 # Drop the table
 $result = $node->psql('postgres', "DROP TABLE t");
-ok($result == 0, 'drop table in tablespace with absolute path');
+is($result, 0, 'drop table in tablespace with absolute path');
 
 # Drop the tablespace
 $result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
-ok($result == 0, 'drop tablespace with absolute path');
+is($result, 0, 'drop tablespace with absolute path');
 
 # Create two absolute tablespaces and two in-place tablespaces, so we can
 # testing various kinds of tablespace moves.
 $result = $node->psql('postgres',
 	"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
-ok($result == 0, 'create tablespace 1 with absolute path');
+is($result, 0, 'create tablespace 1 with absolute path');
 $result = $node->psql('postgres',
 	"CREATE TABLESPACE regress_ts2 LOCATION '$TS2_LOCATION'");
-ok($result == 0, 'create tablespace 2 with absolute path');
+is($result, 0, 'create tablespace 2 with absolute path');
 $result = $node->psql('postgres',
 	"SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''"
 );
-ok($result == 0, 'create tablespace 3 with in-place directory');
+is($result, 0, 'create tablespace 3 with in-place directory');
 $result = $node->psql('postgres',
 	"SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''"
 );
-ok($result == 0, 'create tablespace 4 with in-place directory');
+is($result, 0, 'create tablespace 4 with in-place directory');
 
 # Create a table and test moving between absolute and in-place tablespaces
 $result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
-ok($result == 0, 'create table in tablespace 1');
+is($result, 0, 'create table in tablespace 1');
 $result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts2");
-ok($result == 0, 'move table abs->abs');
+is($result, 0, 'move table abs->abs');
 $result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts3");
-ok($result == 0, 'move table abs->in-place');
+is($result, 0, 'move table abs->in-place');
 $result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts4");
-ok($result == 0, 'move table in-place->in-place');
+is($result, 0, 'move table in-place->in-place');
 $result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts1");
-ok($result == 0, 'move table in-place->abs');
+is($result, 0, 'move table in-place->abs');
 
 # Drop everything
 $result = $node->psql('postgres', "DROP TABLE t");
-ok($result == 0, 'create table in tablespace 1');
+is($result, 0, 'create table in tablespace 1');
 $result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
-ok($result == 0, 'drop tablespace 1');
+is($result, 0, 'drop tablespace 1');
 $result = $node->psql('postgres', "DROP TABLESPACE regress_ts2");
-ok($result == 0, 'drop tablespace 2');
+is($result, 0, 'drop tablespace 2');
 $result = $node->psql('postgres', "DROP TABLESPACE regress_ts3");
-ok($result == 0, 'drop tablespace 3');
+is($result, 0, 'drop tablespace 3');
 $result = $node->psql('postgres', "DROP TABLESPACE regress_ts4");
-ok($result == 0, 'drop tablespace 4');
+is($result, 0, 'drop tablespace 4');
 
 $node->stop;
 
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
index adcaa419616..8127f7f01b0 100644
--- a/src/test/modules/test_pg_dump/t/001_base.pl
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -981,7 +981,7 @@ foreach my $run (sort keys %pgdump_runs)
 		if ($tests{$test}->{like}->{$test_key}
 			&& !defined($tests{$test}->{unlike}->{$test_key}))
 		{
-			if (!ok($output_file =~ $tests{$test}->{regexp},
+			if (!like($output_file, $tests{$test}->{regexp},
 					"$run: should dump $test"))
 			{
 				diag("Review $run results in $tempdir");
@@ -989,7 +989,7 @@ foreach my $run (sort keys %pgdump_runs)
 		}
 		else
 		{
-			if (!ok($output_file !~ $tests{$test}->{regexp},
+			if (!unlike($output_file, $tests{$test}->{regexp},
 					"$run: should not dump $test"))
 			{
 				diag("Review $run results in $tempdir");
diff --git a/src/test/modules/xid_wraparound/t/002_limits.pl b/src/test/modules/xid_wraparound/t/002_limits.pl
index aa1d8765d3a..8dd7f89a7d3 100644
--- a/src/test/modules/xid_wraparound/t/002_limits.pl
+++ b/src/test/modules/xid_wraparound/t/002_limits.pl
@@ -90,7 +90,7 @@ for my $i (1 .. 15)
 		last;
 	}
 }
-ok($warn_limit == 1, "warn-limit reached");
+is($warn_limit, 1, "warn-limit reached");
 
 # We can still INSERT, despite the warnings.
 $node->safe_psql('postgres',
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index 2cbcc509d76..c9c213001ee 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -265,26 +265,26 @@ my ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW ALL;',
 	on_error_die => 1,
 	extra_params => [ '--dbname' => $connstr_rep ]);
-ok($ret == 0, "SHOW ALL with replication role and physical replication");
+is($ret, 0, "SHOW ALL with replication role and physical replication");
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW ALL;',
 	on_error_die => 1,
 	extra_params => [ '--dbname' => $connstr_db ]);
-ok($ret == 0, "SHOW ALL with replication role and logical replication");
+is($ret, 0, "SHOW ALL with replication role and logical replication");
 
 # Test SHOW with a user-settable parameter
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW work_mem;',
 	on_error_die => 1,
 	extra_params => [ '--dbname' => $connstr_rep ]);
-ok( $ret == 0,
+is($ret, 0,
 	"SHOW with user-settable parameter, replication role and physical replication"
 );
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW work_mem;',
 	on_error_die => 1,
 	extra_params => [ '--dbname' => $connstr_db ]);
-ok( $ret == 0,
+is($ret, 0,
 	"SHOW with user-settable parameter, replication role and logical replication"
 );
 
@@ -293,14 +293,14 @@ ok( $ret == 0,
 	'postgres', 'SHOW primary_conninfo;',
 	on_error_die => 1,
 	extra_params => [ '--dbname' => $connstr_rep ]);
-ok( $ret == 0,
+is($ret, 0,
 	"SHOW with superuser-settable parameter, replication role and physical replication"
 );
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW primary_conninfo;',
 	on_error_die => 1,
 	extra_params => [ '--dbname' => $connstr_db ]);
-ok( $ret == 0,
+is($ret, 0,
 	"SHOW with superuser-settable parameter, replication role and logical replication"
 );
 
@@ -312,7 +312,7 @@ my $slotname = 'test_read_replication_slot_physical';
 	'postgres',
 	'READ_REPLICATION_SLOT non_existent_slot;',
 	extra_params => [ '--dbname' => $connstr_rep ]);
-ok($ret == 0, "READ_REPLICATION_SLOT exit code 0 on success");
+is($ret, 0, "READ_REPLICATION_SLOT exit code 0 on success");
 like($stdout, qr/^\|\|$/,
 	"READ_REPLICATION_SLOT returns NULL values if slot does not exist");
 
@@ -325,7 +325,7 @@ $node_primary->psql(
 	'postgres',
 	"READ_REPLICATION_SLOT $slotname;",
 	extra_params => [ '--dbname' => $connstr_rep ]);
-ok($ret == 0, "READ_REPLICATION_SLOT success with existing slot");
+is($ret, 0, "READ_REPLICATION_SLOT success with existing slot");
 like($stdout, qr/^physical\|[^|]*\|1$/,
 	"READ_REPLICATION_SLOT returns tuple with slot information");
 
@@ -577,7 +577,7 @@ my $phys_restart_lsn_post = $node_primary->safe_psql('postgres',
 	"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
 );
 chomp($phys_restart_lsn_post);
-ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0,
+is($phys_restart_lsn_pre, $phys_restart_lsn_post,
 	"physical slot advance persists across restarts");
 
 # Check if the previous segment gets correctly recycled after the
diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl
index f2109efa9b1..c539eac230a 100644
--- a/src/test/recovery/t/003_recovery_targets.pl
+++ b/src/test/recovery/t/003_recovery_targets.pl
@@ -155,7 +155,7 @@ my $res = run_log(
 ok(!$res, 'invalid recovery startup fails');
 
 my $logfile = slurp_file($node_standby->logfile());
-ok($logfile =~ qr/multiple recovery targets specified/,
+like($logfile, qr/multiple recovery targets specified/,
 	'multiple conflicting settings');
 
 # Check behavior when recovery ends before target is reached
@@ -183,7 +183,7 @@ foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default)
 	usleep(100_000);
 }
 $logfile = slurp_file($node_standby->logfile());
-ok( $logfile =~
+like($logfile,
 	  qr/FATAL: .* recovery ended before configured recovery target was reached/,
 	'recovery end before target reached is a fatal error');
 
diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl
index be05fcea8a7..00d20729ce9 100644
--- a/src/test/recovery/t/005_replay_delay.pl
+++ b/src/test/recovery/t/005_replay_delay.pl
@@ -53,10 +53,8 @@ $node_standby->poll_query_until('postgres',
 
 # This test is successful if and only if the LSN has been applied with at least
 # the configured apply delay.
-ok(time() - $primary_insert_time >= $delay,
-	"standby applies WAL only after replication delay");
-
-
+cmp_ok(time() - $primary_insert_time, '>=', $delay,
+    "standby applies WAL only after replication delay");
 # Check that recovery can be paused or resumed expectedly.
 my $node_standby2 = PostgreSQL::Test::Cluster->new('standby2');
 $node_standby2->init_from_backup($node_primary, $backup_name,
diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl
index 2137c4e5e30..15ef646f901 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -35,8 +35,8 @@ my ($result, $stdout, $stderr) = $node_primary->psql(
 	'template1',
 	qq[START_REPLICATION SLOT test_slot LOGICAL 0/0],
 	replication => 'database');
-ok( $stderr =~
-	  m/replication slot "test_slot" was not created in this database/,
+like($stderr,
+	qr/replication slot "test_slot" was not created in this database/,
 	"Logical decoding correctly fails to start");
 
 ($result, $stdout, $stderr) = $node_primary->psql(
@@ -54,7 +54,7 @@ like(
 	'template1',
 	qq[START_REPLICATION SLOT s1 LOGICAL 0/1],
 	replication => 'true');
-ok($stderr =~ /ERROR:  logical decoding requires a database connection/,
+like($stderr, qr/ERROR:  logical decoding requires a database connection/,
 	"Logical decoding fails on non-database connection");
 
 $node_primary->safe_psql('postgres',
@@ -201,7 +201,7 @@ my $logical_restart_lsn_post = $node_primary->safe_psql('postgres',
 	"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"
 );
 chomp($logical_restart_lsn_post);
-ok(($logical_restart_lsn_pre cmp $logical_restart_lsn_post) == 0,
+is($logical_restart_lsn_pre, $logical_restart_lsn_post,
 	"logical slot advance persists across restarts");
 
 my $stats_test_slot1 = 'test_slot';
diff --git a/src/test/recovery/t/020_archive_status.pl b/src/test/recovery/t/020_archive_status.pl
index 5d1fd191243..14db4985bde 100644
--- a/src/test/recovery/t/020_archive_status.pl
+++ b/src/test/recovery/t/020_archive_status.pl
@@ -245,7 +245,7 @@ my $log_location = -s $standby2->logfile;
 
 $standby2->stop;
 my $logfile = slurp_file($standby2->logfile, $log_location);
-ok( $logfile =~ qr/archiver process shutting down/,
+like($logfile, qr/archiver process shutting down/,
 	'check shutdown callback of shell archive module');
 
 # Test that we can enter and leave backup mode without crashes
diff --git a/src/test/recovery/t/024_archive_recovery.pl b/src/test/recovery/t/024_archive_recovery.pl
index b4527ec0843..b80b60a4c0b 100644
--- a/src/test/recovery/t/024_archive_recovery.pl
+++ b/src/test/recovery/t/024_archive_recovery.pl
@@ -91,8 +91,8 @@ sub test_recovery_wal_level_minimal
 
 	# Confirm that the archive recovery fails with an expected error
 	my $logfile = slurp_file($recovery_node->logfile());
-	ok( $logfile =~
-		  qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
+	like($logfile,
+		qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
 		"$node_text ends with an error because it finds WAL generated with \"wal_level=minimal\""
 	);
 }
diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl
index c9c182892cf..b74ed31c4e5 100644
--- a/src/test/recovery/t/035_standby_logical_decoding.pl
+++ b/src/test/recovery/t/035_standby_logical_decoding.pl
@@ -394,8 +394,8 @@ foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default)
 
 # Confirm that the server startup fails with an expected error
 my $logfile = slurp_file($node_standby->logfile());
-ok( $logfile =~
-	  qr/FATAL: .* logical replication slot ".*" exists on the standby, but "hot_standby" = "off"/,
+like($logfile,
+	qr/FATAL: .* logical replication slot ".*" exists on the standby, but "hot_standby" = "off"/,
 	"the standby ends with an error during startup because hot_standby was disabled"
 );
 $node_standby->adjust_conf('postgresql.conf', 'hot_standby', 'on');
@@ -487,8 +487,8 @@ $node_primary->wait_for_replay_catchup($node_standby);
 ($result, $stdout, $stderr) = $node_standby->psql('otherdb',
 	"SELECT lsn FROM pg_logical_slot_peek_changes('behaves_ok_activeslot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
 );
-ok( $stderr =~
-	  m/replication slot "behaves_ok_activeslot" was not created in this database/,
+like($stderr,
+	qr/replication slot "behaves_ok_activeslot" was not created in this database/,
 	"replaying logical slot from another database fails");
 
 ##################################################
@@ -620,9 +620,9 @@ check_pg_recvlogical_stderr($handle,
 	'postgres',
 	qq[select pg_copy_logical_replication_slot('vacuum_full_inactiveslot', 'vacuum_full_inactiveslot_copy');],
 	replication => 'database');
-ok( $stderr =~
-	  /ERROR:  cannot copy invalidated replication slot "vacuum_full_inactiveslot"/,
-	"invalidated slot cannot be copied");
+like($stderr,
+	 qr/ERROR:  cannot copy invalidated replication slot "vacuum_full_inactiveslot"/,
+	 "invalidated slot cannot be copied");
 
 # Set hot_standby_feedback to on
 change_hot_standby_feedback_and_wait_for_xmins(1, 1);
diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl
index 2c61c51e914..ed093b456a1 100644
--- a/src/test/recovery/t/040_standby_failover_slots_sync.pl
+++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl
@@ -100,9 +100,9 @@ $subscriber1->safe_psql('postgres',
 # Disable failover for enabled subscription
 my ($result, $stdout, $stderr) = $subscriber1->psql('postgres',
 	"ALTER SUBSCRIPTION regress_mysub1 SET (failover = false)");
-ok( $stderr =~
-	  /ERROR:  cannot set option "failover" for enabled subscription/,
-	"altering failover is not allowed for enabled subscription");
+like($stderr,
+	 qr/ERROR:  cannot set option "failover" for enabled subscription/,
+	 "altering failover is not allowed for enabled subscription");
 
 ##################################################
 # Test that pg_sync_replication_slots() cannot be executed on a non-standby server.
@@ -110,9 +110,9 @@ ok( $stderr =~
 
 ($result, $stdout, $stderr) =
   $publisher->psql('postgres', "SELECT pg_sync_replication_slots();");
-ok( $stderr =~
-	  /ERROR:  replication slots can only be synchronized to a standby server/,
-	"cannot sync slots on a non-standby server");
+like($stderr,
+	 qr/ERROR:  replication slots can only be synchronized to a standby server/,
+	 "cannot sync slots on a non-standby server");
 
 ##################################################
 # Test logical failover slots corresponding to different plugins can be
@@ -313,23 +313,25 @@ $standby1->reload;
 # Attempting to perform logical decoding on a synced slot should result in an error
 ($result, $stdout, $stderr) = $standby1->psql('postgres',
 	"select * from pg_logical_slot_get_changes('lsub1_slot', NULL, NULL);");
-ok( $stderr =~
-	  /ERROR:  cannot use replication slot "lsub1_slot" for logical decoding/,
-	"logical decoding is not allowed on synced slot");
+like($stderr,
+	 qr/ERROR:  cannot use replication slot "lsub1_slot" for logical decoding/,
+	 "logical decoding is not allowed on synced slot");
 
 # Attempting to alter a synced slot should result in an error
 ($result, $stdout, $stderr) = $standby1->psql(
 	'postgres',
 	qq[ALTER_REPLICATION_SLOT lsub1_slot (failover);],
 	replication => 'database');
-ok($stderr =~ /ERROR:  cannot alter replication slot "lsub1_slot"/,
-	"synced slot on standby cannot be altered");
+like($stderr,
+	 qr/ERROR:  cannot alter replication slot "lsub1_slot"/,
+	 "synced slot on standby cannot be altered");
 
 # Attempting to drop a synced slot should result in an error
 ($result, $stdout, $stderr) = $standby1->psql('postgres',
 	"SELECT pg_drop_replication_slot('lsub1_slot');");
-ok($stderr =~ /ERROR:  cannot drop replication slot "lsub1_slot"/,
-	"synced slot on standby cannot be dropped");
+like($stderr,
+	 qr/ERROR:  cannot drop replication slot "lsub1_slot"/,
+	 "synced slot on standby cannot be dropped");
 
 ##################################################
 # Test that we cannot synchronize slots if dbname is not specified in the
@@ -341,9 +343,9 @@ $standby1->reload;
 
 ($result, $stdout, $stderr) =
   $standby1->psql('postgres', "SELECT pg_sync_replication_slots();");
-ok( $stderr =~
-	  /ERROR:  replication slot synchronization requires "dbname" to be specified in "primary_conninfo"/,
-	"cannot sync slots if dbname is not specified in primary_conninfo");
+like($stderr,
+	 qr/ERROR:  replication slot synchronization requires "dbname" to be specified in "primary_conninfo"/,
+	 "cannot sync slots if dbname is not specified in primary_conninfo");
 
 # Add the dbname back to the primary_conninfo for further tests
 $standby1->append_conf('postgresql.conf',
@@ -379,9 +381,9 @@ $cascading_standby->start;
 
 ($result, $stdout, $stderr) =
   $cascading_standby->psql('postgres', "SELECT pg_sync_replication_slots();");
-ok( $stderr =~
-	  /ERROR:  cannot synchronize replication slots from a standby server/,
-	"cannot sync slots to a cascading standby server");
+like($stderr,
+	 qr/ERROR:  cannot synchronize replication slots from a standby server/,
+	 "cannot sync slots to a cascading standby server");
 
 $cascading_standby->stop;
 
diff --git a/src/test/recovery/t/042_low_level_backup.pl b/src/test/recovery/t/042_low_level_backup.pl
index 5749a1df533..0c8c51d52d8 100644
--- a/src/test/recovery/t/042_low_level_backup.pl
+++ b/src/test/recovery/t/042_low_level_backup.pl
@@ -105,8 +105,8 @@ copy(
 
 $node_replica->start;
 
-ok($node_replica->safe_psql('postgres', $canary_query) == 0,
-	'canary is missing');
+is($node_replica->safe_psql('postgres', $canary_query), 0,
+   'canary is missing');
 
 # Check log to ensure that crash recovery was used as there is no
 # backup_label.
@@ -134,7 +134,7 @@ $node_replica->init_from_backup($node_primary, $backup_name,
 	has_restoring => 1);
 $node_replica->start;
 
-ok($node_replica->safe_psql('postgres', $canary_query) == 1,
+is($node_replica->safe_psql('postgres', $canary_query), 1,
 	'canary is present');
 
 # Check log to ensure that backup_label was used for recovery.
diff --git a/src/test/recovery/t/044_invalidate_inactive_slots.pl b/src/test/recovery/t/044_invalidate_inactive_slots.pl
index ccace14b4dd..7b176ade3e3 100644
--- a/src/test/recovery/t/044_invalidate_inactive_slots.pl
+++ b/src/test/recovery/t/044_invalidate_inactive_slots.pl
@@ -94,7 +94,7 @@ my ($result, $stdout, $stderr);
 	'postgres', qq[
 		SELECT pg_replication_slot_advance('logical_slot', '0/1');
 ]);
-ok( $stderr =~ /can no longer access replication slot "logical_slot"/,
+like($stderr, qr/can no longer access replication slot "logical_slot"/,
 	"detected error upon trying to acquire invalidated slot on node")
   or die
   "could not detect error upon trying to acquire invalidated slot \"logical_slot\" on node";
diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl
index b2eb18d3e81..dbc82e08ff0 100644
--- a/src/test/ssl/t/001_ssltests.pl
+++ b/src/test/ssl/t/001_ssltests.pl
@@ -124,7 +124,7 @@ $node->append_conf('sslconfig.conf', qq{ssl_groups='bad:value'});
 my $log_size = -s $node->logfile;
 $result = $node->restart(fail_ok => 1);
 is($result, 0, 'restart fails with incorrect groups');
-ok($node->log_contains(qr/no SSL error reported/) == 0,
+is($node->log_contains(qr/no SSL error reported/), 0,
 	'error message translated');
 $node->append_conf('ssl_config.conf', qq{ssl_groups='prime256v1'});
 $result = $node->restart(fail_ok => 1);
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index ca55d8df50d..e928dfe7fc9 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -364,14 +364,14 @@ $node_publisher->safe_psql('postgres', "DELETE FROM tab_full_pk WHERE a = 2");
 $node_publisher->wait_for_catchup('tap_sub');
 
 my $logfile = slurp_file($node_subscriber->logfile, $log_location);
-ok( $logfile =~
-	  qr/conflict detected on relation "public.tab_full_pk": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(1, quux\); replica identity \(a\)=\(1\)/m,
+like($logfile,
+	qr/conflict detected on relation "public.tab_full_pk": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(1, quux\); replica identity \(a\)=\(1\)/m,
 	'update target row is missing');
-ok( $logfile =~
-	  qr/conflict detected on relation "public.tab_full": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(26\); replica identity full \(25\)/m,
+like($logfile,
+	qr/conflict detected on relation "public.tab_full": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(26\); replica identity full \(25\)/m,
 	'update target row is missing');
-ok( $logfile =~
-	  qr/conflict detected on relation "public.tab_full_pk": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(2\)/m,
+like($logfile,
+	qr/conflict detected on relation "public.tab_full_pk": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(2\)/m,
 	'delete target row is missing');
 
 $node_subscriber->append_conf('postgresql.conf',
@@ -515,7 +515,7 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_notrep VALUES (11)");
 $node_publisher->wait_for_catchup('tap_sub');
 
 $logfile = slurp_file($node_publisher->logfile, $log_location);
-ok($logfile =~ qr/skipped replication of an empty transaction with XID/,
+like($logfile, qr/skipped replication of an empty transaction with XID/,
 	'empty transaction is skipped');
 
 $result =
@@ -588,8 +588,8 @@ CREATE TABLE skip_wal();
 CREATE PUBLICATION tap_pub2 FOR TABLE skip_wal;
 ROLLBACK;
 });
-ok( $reterr =~
-	  m/WARNING:  "wal_level" is insufficient to publish logical changes/,
+like($reterr,
+	qr/WARNING:  "wal_level" is insufficient to publish logical changes/,
 	'CREATE PUBLICATION while "wal_level=minimal"');
 
 done_testing();
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
index 2a45fb13739..66c659622a9 100644
--- a/src/test/subscription/t/007_ddl.pl
+++ b/src/test/subscription/t/007_ddl.pl
@@ -45,8 +45,8 @@ pass "subscription disable and drop in same transaction did not hang";
 my ($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
 	"CREATE SUBSCRIPTION mysub1 CONNECTION '$publisher_connstr' PUBLICATION mypub, non_existent_pub"
 );
-ok( $stderr =~
-	  m/WARNING:  publication "non_existent_pub" does not exist on the publisher/,
+like($stderr,
+	qr/WARNING:  publication "non_existent_pub" does not exist on the publisher/,
 	"Create subscription throws warning for non-existent publication");
 
 # Wait for initial table sync to finish.
@@ -56,16 +56,16 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, 'mysub1');
 ($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
 	"ALTER SUBSCRIPTION mysub1 ADD PUBLICATION non_existent_pub1, non_existent_pub2"
 );
-ok( $stderr =~
-	  m/WARNING:  publications "non_existent_pub1", "non_existent_pub2" do not exist on the publisher/,
+like($stderr,
+	qr/WARNING:  publications "non_existent_pub1", "non_existent_pub2" do not exist on the publisher/,
 	"Alter subscription add publication throws warning for non-existent publications"
 );
 
 # Specifying non-existent publication along with set publication.
 ($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
 	"ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub");
-ok( $stderr =~
-	  m/WARNING:  publication "non_existent_pub" does not exist on the publisher/,
+like($stderr,
+	qr/WARNING:  publication "non_existent_pub" does not exist on the publisher/,
 	"Alter subscription set publication throws warning for non-existent publication"
 );
 
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index 763a91e75a3..ff6c0e9128c 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -367,16 +367,16 @@ $node_publisher->wait_for_catchup('sub1');
 $node_publisher->wait_for_catchup('sub2');
 
 my $logfile = slurp_file($node_subscriber1->logfile(), $log_location);
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab1_2_2": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(null, 4, quux\); replica identity \(a\)=\(4\)/,
 	'update target row is missing in tab1_2_2');
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab1_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
 	'delete target row is missing in tab1_1');
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab1_2_2": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(4\)/,
 	'delete target row is missing in tab1_2_2');
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab1_def": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(10\)/,
 	'delete target row is missing in tab1_def');
 
@@ -780,10 +780,10 @@ $node_publisher->wait_for_catchup('sub_viaroot');
 $node_publisher->wait_for_catchup('sub2');
 
 $logfile = slurp_file($node_subscriber1->logfile(), $log_location);
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab2_1": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(pub_tab2, quux, 5\); replica identity \(a\)=\(5\)/,
 	'update target row is missing in tab2_1');
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab2_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
 	'delete target row is missing in tab2_1');
 
@@ -801,7 +801,7 @@ $node_publisher->safe_psql('postgres',
 $node_publisher->wait_for_catchup('sub_viaroot');
 
 $logfile = slurp_file($node_subscriber1->logfile(), $log_location);
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab2_1": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified locally in transaction [0-9]+ at .*\n.*Existing local row \(yyy, null, 3\); remote row \(pub_tab2, quux, 3\); replica identity \(a\)=\(3\)/,
 	'updating a row that was modified by a different origin');
 
diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl
index 36af1c16e7f..2468e058b1f 100644
--- a/src/test/subscription/t/027_nosuperuser.pl
+++ b/src/test/subscription/t/027_nosuperuser.pl
@@ -399,8 +399,8 @@ SKIP:
 	isnt($ret, 0,
 		"non zero exit for subscription whose owner is a non-superuser must specify password parameter of the connection string"
 	);
-	ok( $stderr =~
-		  m/DETAIL:  Non-superusers must provide a password in the connection string./,
+	like($stderr,
+		qr/DETAIL:  Non-superusers must provide a password in the connection string./,
 		'subscription whose owner is a non-superuser must specify password parameter of the connection string'
 	);
 
diff --git a/src/test/subscription/t/031_column_list.pl b/src/test/subscription/t/031_column_list.pl
index e859bcdf4eb..3bbab7f6e83 100644
--- a/src/test/subscription/t/031_column_list.pl
+++ b/src/test/subscription/t/031_column_list.pl
@@ -1272,7 +1272,7 @@ my ($cmdret, $stdout, $stderr) = $node_subscriber->psql(
 	CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_1, pub_mix_2;
 ));
 
-ok( $stderr =~
+like( $stderr,
 	  qr/cannot use different column lists for table "public.test_mix_1" in different publications/,
 	'different column lists detected');
 
diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl
index 880551fc69d..f879b18eac3 100644
--- a/src/test/subscription/t/035_conflicts.pl
+++ b/src/test/subscription/t/035_conflicts.pl
@@ -224,9 +224,9 @@ ok( $node_B->poll_query_until(
 # Alter retain_dead_tuples for enabled subscription
 my ($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
 	"ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true)");
-ok( $stderr =~
-	  /ERROR:  cannot set option \"retain_dead_tuples\" for enabled subscription/,
-	"altering retain_dead_tuples is not allowed for enabled subscription");
+like($stderr,
+	 qr/ERROR:  cannot set option \"retain_dead_tuples\" for enabled subscription/,
+	 "altering retain_dead_tuples is not allowed for enabled subscription");
 
 # Disable the subscription
 $node_A->psql('postgres', "ALTER SUBSCRIPTION $subname_AB DISABLE;");
@@ -239,9 +239,9 @@ $node_A->poll_query_until('postgres',
 # Enable retain_dead_tuples for disabled subscription
 ($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
 	"ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true);");
-ok( $stderr =~
-	  /NOTICE:  deleted rows to detect conflicts would not be removed until the subscription is enabled/,
-	"altering retain_dead_tuples is allowed for disabled subscription");
+like($stderr,
+	 qr/NOTICE:  deleted rows to detect conflicts would not be removed until the subscription is enabled/,
+	 "altering retain_dead_tuples is allowed for disabled subscription");
 
 # Re-enable the subscription
 $node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB ENABLE;");
@@ -262,9 +262,9 @@ ok( $node_A->poll_query_until(
 
 ($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
 	"ALTER SUBSCRIPTION $subname_AB SET (origin = any);");
-ok( $stderr =~
-	  /WARNING:  subscription "tap_sub_a_b" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins/,
-	"warn of the possibility of receiving changes from origins other than the publisher");
+like($stderr,
+	 qr/WARNING:  subscription "tap_sub_a_b" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins/,
+	 "warn of the possibility of receiving changes from origins other than the publisher");
 
 # Reset the origin to none
 $node_A->psql('postgres',
@@ -302,16 +302,16 @@ $node_A->safe_psql('postgres', "DELETE FROM tab WHERE a = 1;");
 	'postgres', qq(VACUUM (verbose) public.tab;)
 );
 
-ok( $stderr =~
-	  qr/1 are dead but not yet removable/,
-	'the deleted column is non-removable');
+like($stderr,
+	 qr/1 are dead but not yet removable/,
+	 'the deleted column is non-removable');
 
 # Ensure the DELETE is replayed on Node B
 $node_A->wait_for_catchup($subname_BA);
 
 # Check the conflict detected on Node B
 my $logfile = slurp_file($node_B->logfile(), $log_location);
-ok( $logfile =~
+like($logfile,
 	  qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.*
 .*DETAIL:.* Deleting the row that was modified locally in transaction [0-9]+ at .*
 .*Existing local row \(1, 3\); replica identity \(a\)=\(1\)/,
@@ -324,11 +324,11 @@ $node_A->safe_psql(
 $node_B->wait_for_catchup($subname_AB);
 
 $logfile = slurp_file($node_A->logfile(), $log_location);
-ok( $logfile =~
-	  qr/conflict detected on relation "public.tab": conflict=update_deleted.*
+like( $logfile,
+      qr/conflict detected on relation "public.tab": conflict=update_deleted.*
 .*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
 .*Remote row \(1, 3\); replica identity \(a\)=\(1\)/,
-	'update target row was deleted in tab');
+      'update target row was deleted in tab');
 
 # Remember the next transaction ID to be assigned
 my $next_xid = $node_A->safe_psql('postgres', "SELECT txid_current() + 1;");
@@ -347,7 +347,7 @@ ok( $node_A->poll_query_until(
 	'postgres', qq(VACUUM (verbose) public.tab;)
 );
 
-ok( $stderr =~
+like( $stderr,
 	  qr/1 removed, 1 remain, 0 are dead but not yet removable/,
 	'the deleted column is removed');
 
@@ -380,7 +380,7 @@ $node_A->safe_psql(
 $node_B->wait_for_catchup($subname_AB);
 
 $logfile = slurp_file($node_A->logfile(), $log_location);
-ok( $logfile =~
+like( $logfile,
 	  qr/conflict detected on relation "public.tab": conflict=update_deleted.*
 .*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
 .*Remote row \(2, 4\); replica identity full \(2, 2\)/,
@@ -511,7 +511,7 @@ if ($injection_points_supported != 0)
 	($cmdret, $stdout, $stderr) =
 	  $node_A->psql('postgres', qq(VACUUM (verbose) public.tab;));
 
-	ok($stderr =~ qr/1 are dead but not yet removable/,
+	like($stderr, qr/1 are dead but not yet removable/,
 		'the deleted column is non-removable');
 
 	$log_location = -s $node_A->logfile;
@@ -536,7 +536,7 @@ if ($injection_points_supported != 0)
 	$node_B->wait_for_catchup($subname_AB);
 
 	$logfile = slurp_file($node_A->logfile(), $log_location);
-	ok( $logfile =~
+	like( $logfile,
 		  qr/conflict detected on relation "public.tab": conflict=update_deleted.*
 .*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
 .*Remote row \(1, 2\); replica identity full \(1, 1\)/,
@@ -559,7 +559,7 @@ if ($injection_points_supported != 0)
 	($cmdret, $stdout, $stderr) =
 	  $node_A->psql('postgres', qq(VACUUM (verbose) public.tab;));
 
-	ok($stderr =~ qr/1 removed, 0 remain, 0 are dead but not yet removable/,
+	like($stderr, qr/1 removed, 0 remain, 0 are dead but not yet removable/,
 		'the deleted column is removed');
 
 	# Get the commit timestamp for the publisher's update
-- 
2.43.0

