Hi, Based on the investigation and (lack of) progress so far, I'll revert part of the COPY FREEZE improvements shortly. I'll keep the initial 7db0cd2145 changes, tweaking heap_multi_insert, and remove most of 39b66a91bd (except for the heap_xlog_multi_insert bit).
This should address the small 5% regression in refresh matview. I have no other ideas how to fix that, short of adding a user-level option to REFRESH MATERIALIZED VIEW command so that the users can opt out/in. Attached is the revert patch - I'll get it committed in the next day or two, once the tests complete (running with CCA so it takes time). regards On 5/25/21 12:30 AM, Tomas Vondra wrote: > On 5/24/21 8:21 PM, Andres Freund wrote: >> Hi, >> >> On 2021-05-24 12:37:18 +0200, Tomas Vondra wrote: >>> Another option might be changes in the binary layout - 5% change is well >>> within the range that could be attributed to this, but it feels very >>> hand-wavy and more like an excuse than real analysis. >> >> I don't think 5% is likely to be explained by binary layout unless you >> look for an explicitly adverse layout. >> > > Yeah, true. But I'm out of ideas what might be causing the regression > and how to fix it :-( > >> >>> Hmmm, thanks for reminding us that patch. Why did we reject that approach in >>> favor of the current one? >> >> Don't know about others, but I think it's way too fragile. >> > > Is it really that fragile? Any particular risks you have in mind? Maybe > we could protect against that somehow ... Anyway, that change would > certainly be for PG15. > > > regards > -- Tomas Vondra EnterpriseDB: http://www.enterprisedb.com The Enterprise PostgreSQL Company
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index bd60129aeb..2433998f39 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2063,12 +2063,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, TransactionId xid = GetCurrentTransactionId(); HeapTuple heaptup; Buffer buffer; - Page page = NULL; Buffer vmbuffer = InvalidBuffer; - bool starting_with_empty_page; bool all_visible_cleared = false; - bool all_frozen_set = false; - uint8 vmstatus = 0; /* Cheap, simplistic check that the tuple matches the rel's rowtype. */ Assert(HeapTupleHeaderGetNatts(tup->t_data) <= @@ -2085,36 +2081,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, /* * Find buffer to insert this tuple into. If the page is all visible, * this will also pin the requisite visibility map page. - * - * Also pin visibility map page if COPY FREEZE inserts tuples into an - * empty page. See all_frozen_set below. */ buffer = RelationGetBufferForTuple(relation, heaptup->t_len, InvalidBuffer, options, bistate, &vmbuffer, NULL); - - /* - * If we're inserting frozen entry into an empty page, set visibility map - * bits and PageAllVisible() hint. - * - * If we're inserting frozen entry into already all_frozen page, preserve - * this state. - */ - if (options & HEAP_INSERT_FROZEN) - { - page = BufferGetPage(buffer); - - starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0; - - if (visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer)) - vmstatus = visibilitymap_get_status(relation, - BufferGetBlockNumber(buffer), &vmbuffer); - - if ((starting_with_empty_page || vmstatus & VISIBILITYMAP_ALL_FROZEN)) - all_frozen_set = true; - } - /* * We're about to do the actual insert -- but check for conflict first, to * avoid possibly having to roll back work we've just done. @@ -2138,14 +2109,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, RelationPutHeapTuple(relation, buffer, heaptup, (options & HEAP_INSERT_SPECULATIVE) != 0); - /* - * If the page is all visible, need to clear that, unless we're only going - * to add further frozen rows to it. - * - * If we're only adding already frozen rows to a page that was empty or - * marked as all visible, mark it as all-visible. - */ - if (PageIsAllVisible(BufferGetPage(buffer)) && !(options & HEAP_INSERT_FROZEN)) + if (PageIsAllVisible(BufferGetPage(buffer))) { all_visible_cleared = true; PageClearAllVisible(BufferGetPage(buffer)); @@ -2153,13 +2117,6 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, ItemPointerGetBlockNumber(&(heaptup->t_self)), vmbuffer, VISIBILITYMAP_VALID_BITS); } - else if (all_frozen_set) - { - /* We only ever set all_frozen_set after reading the page. */ - Assert(page); - - PageSetAllVisible(page); - } /* * XXX Should we set PageSetPrunable on this page ? @@ -2207,8 +2164,6 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, xlrec.flags = 0; if (all_visible_cleared) xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED; - if (all_frozen_set) - xlrec.flags = XLH_INSERT_ALL_FROZEN_SET; if (options & HEAP_INSERT_SPECULATIVE) xlrec.flags |= XLH_INSERT_IS_SPECULATIVE; Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer)); @@ -2257,29 +2212,6 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, END_CRIT_SECTION(); - /* - * If we've frozen everything on the page, update the visibilitymap. We're - * already holding pin on the vmbuffer. - * - * No need to update the visibilitymap if it had all_frozen bit set before - * this insertion. - */ - if (all_frozen_set && ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)) - { - Assert(PageIsAllVisible(page)); - Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer)); - - /* - * It's fine to use InvalidTransactionId here - this is only used when - * HEAP_INSERT_FROZEN is specified, which intentionally violates - * visibility rules. - */ - visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer, - InvalidXLogRecPtr, vmbuffer, - InvalidTransactionId, - VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN); - } - UnlockReleaseBuffer(buffer); if (vmbuffer != InvalidBuffer) ReleaseBuffer(vmbuffer); @@ -8946,10 +8878,6 @@ heap_xlog_insert(XLogReaderState *record) ItemPointerSetBlockNumber(&target_tid, blkno); ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum); - /* check that the mutually exclusive flags are not both set */ - Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) && - (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET))); - /* * The visibility map may need to be fixed even if the heap page is * already up-to-date. diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index d34edb4190..4097a7aa18 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -406,20 +406,20 @@ RelationGetBufferForTuple(Relation relation, Size len, * We have no cached target page, so ask the FSM for an initial * target. */ - targetBlock = GetPageWithFreeSpace(relation, targetFreeSpace); - } + targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace); - /* - * If the FSM knows nothing of the rel, try the last page before we give - * up and extend. This avoids one-tuple-per-page syndrome during - * bootstrapping or in a recently-started system. - */ - if (targetBlock == InvalidBlockNumber) - { - BlockNumber nblocks = RelationGetNumberOfBlocks(relation); + /* + * If the FSM knows nothing of the rel, try the last page before we + * give up and extend. This avoids one-tuple-per-page syndrome during + * bootstrapping or in a recently-started system. + */ + if (targetBlock == InvalidBlockNumber) + { + BlockNumber nblocks = RelationGetNumberOfBlocks(relation); - if (nblocks > 0) - targetBlock = nblocks - 1; + if (nblocks > 0) + targetBlock = nblocks - 1; + } } loop: