Hi, On 2021-04-28 00:44:47 +0900, Masahiko Sawada wrote: > On Wed, Apr 28, 2021 at 12:26 AM Masahiko Sawada <sawada.m...@gmail.com> > wrote: > > > What Andres is suggesting (I think) is to modify ExecInsert() to pass a > > > valid bistate to table_tuple_insert, instead of just NULL, and store the > > > vmbuffer in it. > > > > Understood. This approach keeps using the same vmbuffer until we need > > another vm page corresponding to the target heap page, which seems > > better. > > But how is ExecInsert() related to REFRESH MATERIALIZED VIEW?
I was thinking of the CONCURRENTLY path for REFRESH MATERIALIZED VIEW I think. Or something. That actually makes it easier - we already pass in a bistate in the relevant paths. So if we add a current_vmbuf to BulkInsertStateData, we can avoid needing to pin so often. It seems that'd end up with a good bit cleaner and less risky code than the skip_vmbuffer_for_frozen_tuple_insertion_v3.patch approach. The current RelationGetBufferForTuple() interface / how it's used in heapam.c doesn't make this quite as trivial as it could be... Attached is a quick hack implementing this. For me it reduces the overhead noticably: REFRESH MATERIALIZED VIEW mv; before: Time: 26542.333 ms (00:26.542) after: Time: 23105.047 ms (00:23.105) Greetings, Andres Freund
diff --git i/src/include/access/hio.h w/src/include/access/hio.h index 1d611287c08..6d8f18152f1 100644 --- i/src/include/access/hio.h +++ w/src/include/access/hio.h @@ -30,6 +30,7 @@ typedef struct BulkInsertStateData { BufferAccessStrategy strategy; /* our BULKWRITE strategy object */ Buffer current_buf; /* current insertion target page */ + Buffer current_vmbuf; } BulkInsertStateData; diff --git i/src/backend/access/heap/heapam.c w/src/backend/access/heap/heapam.c index 13396eb7f2c..5a63efc4386 100644 --- i/src/backend/access/heap/heapam.c +++ w/src/backend/access/heap/heapam.c @@ -2011,6 +2011,7 @@ GetBulkInsertState(void) bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData)); bistate->strategy = GetAccessStrategy(BAS_BULKWRITE); bistate->current_buf = InvalidBuffer; + bistate->current_vmbuf = InvalidBuffer; return bistate; } @@ -2020,8 +2021,7 @@ GetBulkInsertState(void) void FreeBulkInsertState(BulkInsertState bistate) { - if (bistate->current_buf != InvalidBuffer) - ReleaseBuffer(bistate->current_buf); + ReleaseBulkInsertStatePin(bistate); FreeAccessStrategy(bistate->strategy); pfree(bistate); } @@ -2033,8 +2033,15 @@ void ReleaseBulkInsertStatePin(BulkInsertState bistate) { if (bistate->current_buf != InvalidBuffer) + { ReleaseBuffer(bistate->current_buf); - bistate->current_buf = InvalidBuffer; + bistate->current_buf = InvalidBuffer; + } + if (bistate->current_vmbuf != InvalidBuffer) + { + ReleaseBuffer(bistate->current_vmbuf); + bistate->current_vmbuf = InvalidBuffer; + } } @@ -2277,8 +2284,12 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, } UnlockReleaseBuffer(buffer); - if (vmbuffer != InvalidBuffer) + + if (vmbuffer != InvalidBuffer && + (!bistate || bistate->current_vmbuf != vmbuffer)) + { ReleaseBuffer(vmbuffer); + } /* * If tuple is cachable, mark it for invalidation from the caches in case @@ -2655,8 +2666,11 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, } /* We're done with inserting all tuples, so release the last vmbuffer. */ - if (vmbuffer != InvalidBuffer) + if (vmbuffer != InvalidBuffer && + (!bistate || bistate->current_vmbuf != vmbuffer)) + { ReleaseBuffer(vmbuffer); + } /* * We're done with the actual inserts. Check for conflicts again, to diff --git i/src/backend/access/heap/hio.c w/src/backend/access/heap/hio.c index ffc89685bff..a573322bb6d 100644 --- i/src/backend/access/heap/hio.c +++ w/src/backend/access/heap/hio.c @@ -136,7 +136,8 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock, * be less than block2. */ static void -GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, +GetVisibilityMapPins(Relation relation, BulkInsertState bistate, + Buffer buffer1, Buffer buffer2, BlockNumber block1, BlockNumber block2, Buffer *vmbuffer1, Buffer *vmbuffer2) { @@ -157,6 +158,12 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, if (!need_to_pin_buffer1 && !need_to_pin_buffer2) return; + if (bistate && bistate->current_vmbuf != InvalidBuffer) + { + ReleaseBuffer(bistate->current_vmbuf); + bistate->current_vmbuf = InvalidBuffer; + } + /* We must unlock both buffers before doing any I/O. */ LockBuffer(buffer1, BUFFER_LOCK_UNLOCK); if (buffer2 != InvalidBuffer && buffer2 != buffer1) @@ -269,6 +276,26 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) FreeSpaceMapVacuumRange(relation, firstBlock, blockNum + 1); } +static void +visibilitymap_pin_bi(Relation rel, BlockNumber targetBlock, BulkInsertState bistate, Buffer *vmbuffer) +{ + if (bistate != NULL) + { + if (*vmbuffer != InvalidBuffer && *vmbuffer != bistate->current_vmbuf) + { + ReleaseBuffer(*vmbuffer); + } + + visibilitymap_pin(rel, targetBlock, &bistate->current_vmbuf); + + + + *vmbuffer = bistate->current_vmbuf; + } + else + visibilitymap_pin(rel, targetBlock, vmbuffer); +} + /* * RelationGetBufferForTuple * @@ -443,14 +470,14 @@ loop: /* easy case */ buffer = ReadBufferBI(relation, targetBlock, RBM_NORMAL, bistate); if (PageIsAllVisible(BufferGetPage(buffer))) - visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin_bi(relation, targetBlock, bistate, vmbuffer); /* * If the page is empty, pin vmbuffer to set all_frozen bit later. */ if ((options & HEAP_INSERT_FROZEN) && (PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0)) - visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin_bi(relation, targetBlock, bistate, vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } @@ -459,7 +486,7 @@ loop: /* also easy case */ buffer = otherBuffer; if (PageIsAllVisible(BufferGetPage(buffer))) - visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin_bi(relation, targetBlock, bistate, vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } else if (otherBlock < targetBlock) @@ -467,7 +494,7 @@ loop: /* lock other buffer first */ buffer = ReadBuffer(relation, targetBlock); if (PageIsAllVisible(BufferGetPage(buffer))) - visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin_bi(relation, targetBlock, bistate, vmbuffer); LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } @@ -476,7 +503,7 @@ loop: /* lock target buffer first */ buffer = ReadBuffer(relation, targetBlock); if (PageIsAllVisible(BufferGetPage(buffer))) - visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin_bi(relation, targetBlock, bistate, vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE); } @@ -503,11 +530,13 @@ loop: * done. */ if (otherBuffer == InvalidBuffer || targetBlock <= otherBlock) - GetVisibilityMapPins(relation, buffer, otherBuffer, + GetVisibilityMapPins(relation, bistate, + buffer, otherBuffer, targetBlock, otherBlock, vmbuffer, vmbuffer_other); else - GetVisibilityMapPins(relation, otherBuffer, buffer, + GetVisibilityMapPins(relation, bistate, + otherBuffer, buffer, otherBlock, targetBlock, vmbuffer_other, vmbuffer); @@ -644,7 +673,7 @@ loop: if (options & HEAP_INSERT_FROZEN) { Assert(PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0); - visibilitymap_pin(relation, BufferGetBlockNumber(buffer), vmbuffer); + visibilitymap_pin_bi(relation, BufferGetBlockNumber(buffer), bistate, vmbuffer); } /* @@ -686,7 +715,8 @@ loop: * use GetVisibilityMapPins to deal with the first case. In the * second case, just retry from start. */ - GetVisibilityMapPins(relation, otherBuffer, buffer, + GetVisibilityMapPins(relation, bistate, + otherBuffer, buffer, otherBlock, targetBlock, vmbuffer_other, vmbuffer);