Hi Kuroda-san, > > I have also modified the tests in 0001 patch. These changes are only > > related to syntax of writing tests. > > LGTM. I found small improvements, please find the attached.
I have applied the changes and updated the patch. Thanks & Regards, Shlok Kyal
From 07f94de76be177d0e39762cb2bd36a4bc04a7993 Mon Sep 17 00:00:00 2001 From: Shlok Kyal <shlok.kyal.oss@gmail.com> Date: Fri, 23 Aug 2024 14:02:20 +0530 Subject: [PATCH v14 1/2] Distribute invalidatons if change in catalog tables Distribute invalidations to inprogress transactions if the current committed transaction change any catalog table. --- .../replication/logical/reorderbuffer.c | 5 +- src/backend/replication/logical/snapbuild.c | 34 ++- src/include/replication/reorderbuffer.h | 4 + src/test/subscription/t/100_bugs.pl | 267 ++++++++++++++++++ 4 files changed, 296 insertions(+), 14 deletions(-) diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 22bcf171ff..c5dfc1ab06 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -221,9 +221,6 @@ int debug_logical_replication_streaming = DEBUG_LOGICAL_REP_STREAMING_BUFFERED */ static ReorderBufferTXN *ReorderBufferGetTXN(ReorderBuffer *rb); static void ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn); -static ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, - TransactionId xid, bool create, bool *is_new, - XLogRecPtr lsn, bool create_as_top); static void ReorderBufferTransferSnapToParent(ReorderBufferTXN *txn, ReorderBufferTXN *subtxn); @@ -622,7 +619,7 @@ ReorderBufferReturnRelids(ReorderBuffer *rb, Oid *relids) * (with the given LSN, and as top transaction if that's specified); * when this happens, is_new is set to true. */ -static ReorderBufferTXN * +ReorderBufferTXN * ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool *is_new, XLogRecPtr lsn, bool create_as_top) { diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 0450f94ba8..1f7c24cad0 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -300,7 +300,7 @@ static void SnapBuildFreeSnapshot(Snapshot snap); static void SnapBuildSnapIncRefcount(Snapshot snap); -static void SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn); +static void SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid); static inline bool SnapBuildXidHasCatalogChanges(SnapBuild *builder, TransactionId xid, uint32 xinfo); @@ -859,18 +859,21 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid, } /* - * Add a new Snapshot to all transactions we're decoding that currently are - * in-progress so they can see new catalog contents made by the transaction - * that just committed. This is necessary because those in-progress - * transactions will use the new catalog's contents from here on (at the very - * least everything they do needs to be compatible with newer catalog - * contents). + * Add a new Snapshot and invalidation messages to all transactions we're + * decoding that currently are in-progress so they can see new catalog contents + * made by the transaction that just committed. This is necessary because those + * in-progress transactions will use the new catalog's contents from here on + * (at the very least everything they do needs to be compatible with newer + * catalog contents). */ static void -SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn) +SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid) { dlist_iter txn_i; ReorderBufferTXN *txn; + ReorderBufferTXN *curr_txn; + + curr_txn = ReorderBufferTXNByXid(builder->reorder, xid, false, NULL, InvalidXLogRecPtr, false); /* * Iterate through all toplevel transactions. This can include @@ -913,6 +916,14 @@ SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn) SnapBuildSnapIncRefcount(builder->snapshot); ReorderBufferAddSnapshot(builder->reorder, txn->xid, lsn, builder->snapshot); + + /* + * Add invalidation messages to the reorder buffer of inprogress + * transactions except the current committed transaction + */ + if (txn->xid != xid && curr_txn->ninvalidations > 0) + ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn, + curr_txn->ninvalidations, curr_txn->invalidations); } } @@ -1184,8 +1195,11 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid, /* refcount of the snapshot builder for the new snapshot */ SnapBuildSnapIncRefcount(builder->snapshot); - /* add a new catalog snapshot to all currently running transactions */ - SnapBuildDistributeNewCatalogSnapshot(builder, lsn); + /* + * add a new catalog snapshot and invalidations messages to all + * currently running transactions + */ + SnapBuildDistributeSnapshotAndInval(builder, lsn, xid); } } diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index e332635f70..093d21213a 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -743,6 +743,10 @@ extern TransactionId *ReorderBufferGetCatalogChangesXacts(ReorderBuffer *rb); extern void ReorderBufferSetRestartPoint(ReorderBuffer *rb, XLogRecPtr ptr); +extern ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, + TransactionId xid, bool create, bool *is_new, + XLogRecPtr lsn, bool create_as_top); + extern void StartupReorderBuffer(void); #endif diff --git a/src/test/subscription/t/100_bugs.pl b/src/test/subscription/t/100_bugs.pl index cb36ca7b16..72aaaae272 100644 --- a/src/test/subscription/t/100_bugs.pl +++ b/src/test/subscription/t/100_bugs.pl @@ -487,6 +487,273 @@ $result = is( $result, qq(2|f 3|t), 'check replicated update on subscriber'); +# Clean up +$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub1"); +$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION sub1"); + +# The bug was that the incremental data synchronization was being skipped when +# a new table is added to the publication in presence of a concurrent active +# transaction performing the DML on the same table. + +# Initial setup. +$node_publisher->safe_psql( + 'postgres', qq( + CREATE TABLE tab_conc(a int); + CREATE SCHEMA sch3; + CREATE TABLE sch3.tab_conc(a int); + CREATE PUBLICATION regress_pub1; +)); + +$node_subscriber->safe_psql( + 'postgres', qq( + CREATE TABLE tab_conc(a int); + CREATE SCHEMA sch3; + CREATE TABLE sch3.tab_conc(a int); + CREATE SUBSCRIPTION regress_sub1 CONNECTION '$publisher_connstr' PUBLICATION regress_pub1; +)); + +# Bump the query timeout to avoid false negatives on slow test systems. +my $psql_timeout_secs = 4 * $PostgreSQL::Test::Utils::timeout_default; + +# Initiate 3 background sessions. +my $background_psql1 = $node_publisher->background_psql( + 'postgres', + on_error_stop => 0, + timeout => $psql_timeout_secs); +$background_psql1->set_query_timer_restart(); + +my $background_psql2 = $node_publisher->background_psql( + 'postgres', + on_error_stop => 0, + timeout => $psql_timeout_secs); + +$background_psql2->set_query_timer_restart(); + +my $background_psql3 = $node_publisher->background_psql( + 'postgres', + on_error_stop => 0, + timeout => $psql_timeout_secs); +$background_psql3->set_query_timer_restart(); + +# Maintain an active transaction with the table that will be added to the +# publication. +$background_psql1->query_safe( + qq( + BEGIN; + INSERT INTO tab_conc VALUES (1); +)); + +# Maintain an active transaction with a schema table that will be added to the +# publication. +$background_psql2->query_safe( + qq( + BEGIN; + INSERT INTO sch3.tab_conc VALUES (1); +)); + +# Add the table to the publication using background_psql, as the alter +# publication operation will distribute the invalidations to inprogress txns. +$background_psql3->query_safe( + "ALTER PUBLICATION regress_pub1 ADD TABLE tab_conc, TABLES IN SCHEMA sch3" +); + +# Complete the transaction on the tables. +$background_psql1->query_safe("COMMIT"); +$background_psql2->query_safe("COMMIT"); + +$node_publisher->safe_psql( + 'postgres', qq( + INSERT INTO tab_conc VALUES (2); + INSERT INTO sch3.tab_conc VALUES (2); +)); + +# Refresh the publication. +$node_subscriber->safe_psql('postgres', + "ALTER SUBSCRIPTION regress_sub1 REFRESH PUBLICATION"); + +$node_subscriber->wait_for_subscription_sync($node_publisher, 'regress_sub1'); + +$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_conc"); +is( $result, qq(1 +2), + 'Ensure that the data from the tab_conc table is synchronized to the subscriber after the subscription is refreshed' +); + +$result = + $node_subscriber->safe_psql('postgres', "SELECT * FROM sch3.tab_conc"); +is( $result, qq(1 +2), + 'Ensure that the data from the sch3.tab_conc table is synchronized to the subscriber after the subscription is refreshed' +); + +# Perform an insert. +$node_publisher->safe_psql( + 'postgres', qq( + INSERT INTO tab_conc VALUES (3); + INSERT INTO sch3.tab_conc VALUES (3); +)); +$node_publisher->wait_for_catchup('regress_sub1'); + +# Verify that the insert is replicated to the subscriber. +$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_conc"); +is( $result, qq(1 +2 +3), + 'Verify that the incremental data for table tab_conc added after table synchronization is replicated to the subscriber' +); + +$result = + $node_subscriber->safe_psql('postgres', "SELECT * FROM sch3.tab_conc"); +is( $result, qq(1 +2 +3), + 'Verify that the incremental data for table sch3.tab_conc added after table synchronization is replicated to the subscriber' +); + +# The bug was that the incremental data synchronization was happening even when +# tables are dropped from the publication in presence of a concurrent active +# transaction performing the DML on the same table. + +# Maintain an active transaction with the table that will be dropped from the +# publication. +$background_psql1->query_safe( + qq( + BEGIN; + INSERT INTO tab_conc VALUES (4); +)); + +# Maintain an active transaction with a schema table that will be dropped from the +# publication. +$background_psql2->query_safe( + qq( + BEGIN; + INSERT INTO sch3.tab_conc VALUES (4); +)); + +# Drop the table from the publication using background_psql, as the alter +# publication operation will distribute the invalidations to inprogress txns. +$background_psql3->query_safe( + "ALTER PUBLICATION regress_pub1 DROP TABLE tab_conc, TABLES IN SCHEMA sch3" +); + +# Complete the transaction on the tables. +$background_psql1->query_safe("COMMIT"); +$background_psql2->query_safe("COMMIT"); + +# Perform an insert. +$node_publisher->safe_psql( + 'postgres', qq( + INSERT INTO tab_conc VALUES (5); + INSERT INTO sch3.tab_conc VALUES (5); +)); + +$node_publisher->wait_for_catchup('regress_sub1'); + +# Verify that the insert is not replicated to the subscriber. +$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_conc"); +is( $result, qq(1 +2 +3 +4), + 'Verify that data for table tab_conc are not replicated to subscriber'); + +$result = + $node_subscriber->safe_psql('postgres', "SELECT * FROM sch3.tab_conc"); +is( $result, qq(1 +2 +3 +4), + 'Verify that the incremental data for table sch3.tab_conc are not replicated to subscriber' +); + +# The bug was that the incremental data synchronization was happening even after +# publication is dropped in a concurrent active transaction. + +# Add tables to the publication. +$background_psql3->query_safe( + "ALTER PUBLICATION regress_pub1 ADD TABLE tab_conc, TABLES IN SCHEMA sch3" +); + +# Maintain an active transaction with the table. +$background_psql1->query_safe( + qq( + BEGIN; + INSERT INTO tab_conc VALUES (6); +)); + +# Maintain an active transaction with a schema table. +$background_psql2->query_safe( + qq( + BEGIN; + INSERT INTO sch3.tab_conc VALUES (6); +)); + +# Drop publication. +$background_psql3->query_safe("DROP PUBLICATION regress_pub1"); + +# Perform an insert. +$background_psql1->query_safe("INSERT INTO tab_conc VALUES (7)"); +$background_psql2->query_safe("INSERT INTO sch3.tab_conc VALUES (7)"); + +# Complete the transaction on the tables. +$background_psql1->query_safe("COMMIT"); +$background_psql2->query_safe("COMMIT"); + +# ERROR should appear on subscriber. +my $offset = -s $node_subscriber->logfile; +$node_subscriber->wait_for_log( + qr/ERROR: publication "regress_pub1" does not exist/, $offset); + +$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION regress_sub1"); + +# The bug was that the incremental data synchronization was happening even after +# publication is renamed in a concurrent active transaction. + +# Create publication. +$background_psql3->query_safe( + "CREATE PUBLICATION regress_pub1 FOR TABLE tab_conc, TABLES IN SCHEMA sch3" +); + +# Create subscription. +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION regress_sub1 CONNECTION '$publisher_connstr' PUBLICATION regress_pub1" +); + +# Maintain an active transaction with the table. +$background_psql1->query_safe( + qq( + BEGIN; + INSERT INTO tab_conc VALUES (8); +)); + +# Maintain an active transaction with a schema table. +$background_psql2->query_safe( + qq( + BEGIN; + INSERT INTO sch3.tab_conc VALUES (8); +)); + +# Rename publication. +$background_psql3->query_safe( + "ALTER PUBLICATION regress_pub1 RENAME TO regress_pub1_rename"); + +# Perform an insert. +$background_psql1->query_safe("INSERT INTO tab_conc VALUES (9)"); +$background_psql2->query_safe("INSERT INTO sch3.tab_conc VALUES (9)"); + +# Complete the transaction on the tables. +$background_psql1->query_safe("COMMIT"); +$background_psql2->query_safe("COMMIT"); + +# ERROR should appear on subscriber. +$offset = -s $node_subscriber->logfile; +$node_subscriber->wait_for_log( + qr/ERROR: publication "regress_pub1" does not exist/, $offset); + +$background_psql1->quit; +$background_psql2->quit; +$background_psql3->quit; + $node_publisher->stop('fast'); $node_subscriber->stop('fast'); -- 2.34.1
From a134f762eec24dbacf1f9b94a8b777cfb58655c7 Mon Sep 17 00:00:00 2001 From: Shlok Kyal <shlok.kyal.oss@gmail.com> Date: Fri, 4 Oct 2024 12:25:31 +0530 Subject: [PATCH v14 2/2] Selective Invalidation of Cache When we alter a publication, add/drop namespace to/from publication all the cache for all the tables are invalidated. With this patch for the above operationns we will invalidate the cache of only the desired tables. --- src/backend/commands/alter.c | 4 +- src/backend/commands/publicationcmds.c | 107 ++++++++++++++++++++ src/backend/parser/gram.y | 2 +- src/backend/replication/logical/snapbuild.c | 9 +- src/backend/replication/pgoutput/pgoutput.c | 18 ---- src/include/commands/publicationcmds.h | 1 + 6 files changed, 118 insertions(+), 23 deletions(-) diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 4f99ebb447..395fe530b3 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -399,6 +399,9 @@ ExecRenameStmt(RenameStmt *stmt) case OBJECT_TYPE: return RenameType(stmt); + case OBJECT_PUBLICATION: + return RenamePublication(stmt->subname, stmt->newname); + case OBJECT_AGGREGATE: case OBJECT_COLLATION: case OBJECT_CONVERSION: @@ -416,7 +419,6 @@ ExecRenameStmt(RenameStmt *stmt) case OBJECT_TSDICTIONARY: case OBJECT_TSPARSER: case OBJECT_TSTEMPLATE: - case OBJECT_PUBLICATION: case OBJECT_SUBSCRIPTION: { ObjectAddress address; diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index d6ffef374e..ab380c60be 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -433,6 +433,87 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor return result; } +/* + * Execute ALTER PUBLICATION RENAME + */ +ObjectAddress +RenamePublication(const char *oldname, const char *newname) +{ + Relation rel; + HeapTuple tup; + ObjectAddress address; + Form_pg_publication pubform; + bool replaces[Natts_pg_publication]; + bool nulls[Natts_pg_publication]; + Datum values[Natts_pg_publication]; + + rel = table_open(PublicationRelationId, RowExclusiveLock); + + tup = SearchSysCacheCopy1(PUBLICATIONNAME, + CStringGetDatum(oldname)); + + if (!HeapTupleIsValid(tup)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("publication \"%s\" does not exist", + oldname))); + + pubform = (Form_pg_publication) GETSTRUCT(tup); + + /* must be owner */ + if (!object_ownercheck(PublicationRelationId, pubform->oid, GetUserId())) + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_PUBLICATION, + NameStr(pubform->pubname)); + + /* Everything ok, form a new tuple. */ + memset(values, 0, sizeof(values)); + memset(nulls, false, sizeof(nulls)); + memset(replaces, false, sizeof(replaces)); + + /* Only update the pubname */ + values[Anum_pg_publication_pubname - 1] = + DirectFunctionCall1(namein, CStringGetDatum(newname)); + replaces[Anum_pg_publication_pubname - 1] = true; + + tup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, + replaces); + + /* Invalidate the relcache. */ + if (pubform->puballtables) + { + CacheInvalidateRelcacheAll(); + } + else + { + List *relids = NIL; + List *schemarelids = NIL; + + /* + * For partition table, when we insert data, get_rel_sync_entry is + * called and a hash entry is created for the corresponding leaf table. + * So invalidating the leaf nodes would be sufficient here. + */ + relids = GetPublicationRelations(pubform->oid, + PUBLICATION_PART_LEAF); + schemarelids = GetAllSchemaPublicationRelations(pubform->oid, + PUBLICATION_PART_LEAF); + + relids = list_concat_unique_oid(relids, schemarelids); + + InvalidatePublicationRels(relids); + } + + CatalogTupleUpdate(rel, &tup->t_self, tup); + + ObjectAddressSet(address, PublicationRelationId, pubform->oid); + + heap_freetuple(tup); + + table_close(rel, RowExclusiveLock); + + return address; +} + /* check_functions_in_node callback */ static bool contain_mutable_or_user_functions_checker(Oid func_id, void *context) @@ -1920,6 +2001,32 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) } form->pubowner = newOwnerId; + + /* Invalidate the relcache. */ + if (form->puballtables) + { + CacheInvalidateRelcacheAll(); + } + else + { + List *relids = NIL; + List *schemarelids = NIL; + + /* + * For partition table, when we insert data, get_rel_sync_entry is + * called and a hash entry is created for the corresponding leaf table. + * So invalidating the leaf nodes would be sufficient here. + */ + relids = GetPublicationRelations(form->oid, + PUBLICATION_PART_LEAF); + schemarelids = GetAllSchemaPublicationRelations(form->oid, + PUBLICATION_PART_LEAF); + + relids = list_concat_unique_oid(relids, schemarelids); + + InvalidatePublicationRels(relids); + } + CatalogTupleUpdate(rel, &tup->t_self, tup); /* Update owner dependency reference */ diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index 4aa8646af7..ec10bfdd8c 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -9466,7 +9466,7 @@ RenameStmt: ALTER AGGREGATE aggregate_with_argtypes RENAME TO name RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_PUBLICATION; - n->object = (Node *) makeString($3); + n->subname = $3; n->newname = $6; n->missing_ok = false; $$ = (Node *) n; diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 1f7c24cad0..d0a5e7d026 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -867,13 +867,15 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid, * catalog contents). */ static void -SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid) +SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, + TransactionId xid) { dlist_iter txn_i; ReorderBufferTXN *txn; ReorderBufferTXN *curr_txn; - curr_txn = ReorderBufferTXNByXid(builder->reorder, xid, false, NULL, InvalidXLogRecPtr, false); + curr_txn = ReorderBufferTXNByXid(builder->reorder, xid, false, NULL, + InvalidXLogRecPtr, false); /* * Iterate through all toplevel transactions. This can include @@ -923,7 +925,8 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact */ if (txn->xid != xid && curr_txn->ninvalidations > 0) ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn, - curr_txn->ninvalidations, curr_txn->invalidations); + curr_txn->ninvalidations, + curr_txn->invalidations); } } diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 00e7024563..b8429be8cf 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -1739,12 +1739,6 @@ static void publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue) { publications_valid = false; - - /* - * Also invalidate per-relation cache so that next time the filtering info - * is checked it will be updated with the new publication settings. - */ - rel_sync_cache_publication_cb(arg, cacheid, hashvalue); } /* @@ -1920,18 +1914,6 @@ init_rel_sync_cache(MemoryContext cachectx) rel_sync_cache_publication_cb, (Datum) 0); - /* - * Flush all cache entries after any publication changes. (We need no - * callback entry for pg_publication, because publication_invalidation_cb - * will take care of it.) - */ - CacheRegisterSyscacheCallback(PUBLICATIONRELMAP, - rel_sync_cache_publication_cb, - (Datum) 0); - CacheRegisterSyscacheCallback(PUBLICATIONNAMESPACEMAP, - rel_sync_cache_publication_cb, - (Datum) 0); - relation_callbacks_registered = true; } diff --git a/src/include/commands/publicationcmds.h b/src/include/commands/publicationcmds.h index 5487c571f6..b953193812 100644 --- a/src/include/commands/publicationcmds.h +++ b/src/include/commands/publicationcmds.h @@ -35,5 +35,6 @@ extern bool pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors, bool pubviaroot); extern bool pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestors, bool pubviaroot); +extern ObjectAddress RenamePublication(const char *oldname, const char *newname); #endif /* PUBLICATIONCMDS_H */ -- 2.34.1