use strict;
use warnings FATAL => 'all';
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
use File::Basename;

# Initialize primary node
my $node_primary = PostgreSQL::Test::Cluster->new('primary');
$node_primary->init(allows_streaming => 1);

# Increase some settings that Cluster->new makes too low by default.
$node_primary->append_conf('postgresql.conf',"wal_level = logical
 listen_addresses = '*'");
$node_primary->append_conf('postgresql.conf', qq{
wal_level = logical
wal_sender_timeout = 0
wal_receiver_timeout = 0
shared_buffers = 40GB
max_worker_processes = 32
max_parallel_maintenance_workers = 24
max_parallel_workers = 32
#synchronous_commit = off
#checkpoint_timeout = 1d
max_wal_size = 24GB
min_wal_size = 15GB
autovacuum = off
max_logical_replication_workers = 15
max_sync_workers_per_subscription = 15
log_line_prefix = '%n [%p] '
max_wal_senders = 200
max_replication_slots = 200
});
$node_primary->append_conf('pg_hba.conf', "host    all             all                0.0.0.0/0               trust");
$node_primary->start;

for my $i(1...10)
{
        $node_primary->safe_psql('postgres',"CREATE TABLE t$i(a int)");
}

my $count = 60000000;

for my $i(1...10)
{
        $node_primary->safe_psql('postgres', "INSERT INTO t$i VALUES(generate_series(1,$count))");
}

$node_primary->safe_psql('postgres', "CHECKPOINT");
$node_primary->restart;
$node_primary->safe_psql('postgres', "CHECKPOINT");

my $backup_name = 'my_backup';

# Actual test start
#@timeData = localtime(time);
#print "@timeData\n"
use Time::HiRes qw(time);
my $start = time();

# Take backup
$node_primary->backup($backup_name);
my $elapsed = time() - $start;
printf("backup time %0.5f seconds\n", $elapsed);

my $node_s = PostgreSQL::Test::Cluster->new('s');
$node_s->init_from_backup($node_primary, 'my_backup', has_streaming => 1);
$node_s->set_standby_mode();
$node_s->append_conf('postgresql.conf', qq{
wal_level = logical
wal_sender_timeout = 0
wal_receiver_timeout = 0
shared_buffers = 40GB
max_worker_processes = 32
max_parallel_maintenance_workers = 24
max_parallel_workers = 32
#synchronous_commit = off
#checkpoint_timeout = 10s
max_wal_size = 25GB
min_wal_size = 15GB
autovacuum = off
max_logical_replication_workers = 15
max_sync_workers_per_subscription = 15
log_line_prefix = '%n [%p] '
max_wal_senders = 200
max_replication_slots = 200
});
$node_s->start;
$node_primary->wait_for_replay_catchup($node_s);

my $result;
my $start1 = time();
command_ok(
       [
               'pg_subscriber', '--verbose',
               '--pgdata', $node_s->data_dir,
               '--publisher-conninfo', $node_primary->connstr('postgres'),
               '--subscriber-conninfo', $node_s->connstr('postgres'),
               '--database', 'postgres'
       ],
        'run pg_subscriber --dry-run on node s');
# Actual test end
$elapsed = time() - $start;
printf("total execution time %0.5f seconds\n", $elapsed);

$elapsed = time() - $start1;
printf("Execution time excluding backup time %0.5f seconds\n", $elapsed);

is(0, 0, "test passed");
#sleep(5);
#$node_s->stop;
$node_s->restart;

for my $i(1...10)
{
$result = $node_s->safe_psql('postgres', "SELECT COUNT(*) FROM t$i");
is ($result, $count, "t$i has expected $count rows");
}

$node_primary->stop;
done_testing();
