I'd like to see fastupdate=on in test too, now tests cover only case
without fastupdate. Please, add them.
Here's a couple of tests for pending list (fastupdate = on).
--
Dmitry Ivanov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3b72..095b1192cb 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
#include "access/gin_private.h"
#include "access/ginxlog.h"
#include "access/xloginsert.h"
+#include "storage/predicate.h"
#include "miscadmin.h"
#include "utils/memutils.h"
#include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
btree->fillRoot(btree, newrootpg,
BufferGetBlockNumber(lbuffer), newlpage,
BufferGetBlockNumber(rbuffer), newrpage);
+
+ if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+ {
+
+ PredicateLockPageSplit(btree->index,
+ BufferGetBlockNumber(stack->buffer),
+ BufferGetBlockNumber(lbuffer));
+
+ PredicateLockPageSplit(btree->index,
+ BufferGetBlockNumber(stack->buffer),
+ BufferGetBlockNumber(rbuffer));
+ }
+
}
else
{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+ if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+ {
+
+ PredicateLockPageSplit(btree->index,
+ BufferGetBlockNumber(stack->buffer),
+ BufferGetBlockNumber(rbuffer));
+ }
}
/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba52e..3fb4fc8264 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
#include "access/xloginsert.h"
#include "lib/ilist.h"
#include "miscadmin.h"
+#include "storage/predicate.h"
#include "utils/rel.h"
/*
@@ -1423,7 +1424,7 @@ disassembleLeaf(Page page)
* Any segments that acquire new items are decoded, and the new items are
* merged with the old items.
*
- * Returns true if any new items were added. False means they were all
+ * Returns true if any new items were added. false means they were all
* duplicates of existing items on the page.
*/
static bool
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
*/
BlockNumber
createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
- GinStatsData *buildStats)
+ GinStatsData *buildStats, Buffer entrybuffer)
{
BlockNumber blkno;
Buffer buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
page = BufferGetPage(buffer);
blkno = BufferGetBlockNumber(buffer);
+ /*
+ * Copy a predicate lock from entry tree leaf (containing posting list)
+ * to posting tree.
+ */
+ PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
START_CRIT_SECTION();
PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
btree.itemptr = insertdata.items[insertdata.curitem];
stack = ginFindLeafPage(&btree, false, NULL);
+ GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
ginInsertValue(&btree, stack, &insertdata, buildStats);
}
}
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f346d..63603859bc 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
#include "access/gin_private.h"
#include "access/relscan.h"
#include "miscadmin.h"
+#include "storage/predicate.h"
#include "utils/datum.h"
#include "utils/memutils.h"
+#include "utils/rel.h"
/* GUC parameter */
int GinFuzzySearchLimit = 0;
@@ -33,11 +35,18 @@ typedef struct pendingPosition
} pendingPosition;
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+ if (!GinGetUseFastUpdate(index))
+ PredicateLockPage(index, blkno, snapshot);
+}
+
/*
* Goes to the next page if current offset is outside of bounds
*/
static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
{
Page page = BufferGetPage(stack->buffer);
@@ -73,6 +82,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
/* Descend to the leftmost leaf page */
stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
buffer = stack->buffer;
+
+ GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
freeGinBtreeStack(stack);
@@ -94,6 +106,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
break; /* no more pages */
buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+ GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
}
UnlockReleaseBuffer(buffer);
@@ -131,6 +145,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
attnum = scanEntry->attnum;
attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
+ GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
for (;;)
{
Page page;
@@ -141,7 +157,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
/*
* stack->off points to the interested entry, buffer is already locked
*/
- if (moveRightIfItNeeded(btree, stack) == false)
+ if (moveRightIfItNeeded(btree, stack, snapshot) == false)
return true;
page = BufferGetPage(stack->buffer);
@@ -250,7 +266,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
Datum newDatum;
GinNullCategory newCategory;
- if (moveRightIfItNeeded(btree, stack) == false)
+ if (moveRightIfItNeeded(btree, stack, snapshot) == false)
elog(ERROR, "lost saved point in index"); /* must not happen !!! */
page = BufferGetPage(stack->buffer);
@@ -323,6 +339,15 @@ restartScanEntry:
ginstate);
stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
page = BufferGetPage(stackEntry->buffer);
+
+ /*
+ * If fast update is enabled, we acquire a predicate lock on the entire
+ * relation as fast update postpones the insertion of tuples into index
+ * structure due to which we can't detect rw conflicts.
+ */
+ if (GinGetUseFastUpdate(ginstate->index))
+ PredicateLockRelation(ginstate->index, snapshot);
+
/* ginFindLeafPage() will have already checked snapshot age. */
needUnlock = true;
@@ -391,6 +416,8 @@ restartScanEntry:
rootPostingTree, snapshot);
entry->buffer = stack->buffer;
+ GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
/*
* We keep buffer pinned because we need to prevent deletion of
* page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +441,8 @@ restartScanEntry:
}
else if (GinGetNPosting(itup) > 0)
{
+ GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
entry->list = ginReadTuple(ginstate, entry->attnum, itup,
&entry->nlist);
entry->predictNumberResult = entry->nlist;
@@ -493,7 +522,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
for (i = 0; i < key->nentries - 1; i++)
{
- /* Pass all entries <= i as FALSE, and the rest as MAYBE */
+ /* Pass all entries <= i as false, and the rest as MAYBE */
for (j = 0; j <= i; j++)
key->entryRes[entryIndexes[j]] = GIN_FALSE;
for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +662,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
entry->btree.fullScan = false;
stack = ginFindLeafPage(&entry->btree, true, snapshot);
+ GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
/* we don't need the stack, just the buffer. */
entry->buffer = stack->buffer;
IncrBufferRefCount(entry->buffer);
@@ -677,6 +708,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
entry->buffer = ginStepRight(entry->buffer,
ginstate->index,
GIN_SHARE);
+
+ GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
page = BufferGetPage(entry->buffer);
}
stepright = true;
@@ -1038,8 +1073,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
* lossy page even when none of the other entries match.
*
* Our strategy is to call the tri-state consistent function, with the
- * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
- * returns FALSE, none of the lossy items alone are enough for a match, so
+ * lossy-page entries set to MAYBE, and all the other entries false. If it
+ * returns false, none of the lossy items alone are enough for a match, so
* we don't need to return a lossy-page pointer. Otherwise, return a
* lossy-page pointer to indicate that the whole heap page must be
* checked. (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1768,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
return;
}
+ /*
+ * If fast update is disabled, but some items still exist in the pending
+ * list, then a predicate lock on the entire relation is required.
+ */
+ if (GinGetPendingListCleanupSize(scan->indexRelation))
+ PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
LockBuffer(pos.pendingBuffer, GIN_SHARE);
pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 23f7285547..3faaf8adf4 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
#include "storage/bufmgr.h"
#include "storage/smgr.h"
#include "storage/indexfsm.h"
+#include "storage/predicate.h"
#include "utils/memutils.h"
#include "utils/rel.h"
@@ -48,7 +49,7 @@ static IndexTuple
addItemPointersToLeafTuple(GinState *ginstate,
IndexTuple old,
ItemPointerData *items, uint32 nitem,
- GinStatsData *buildStats)
+ GinStatsData *buildStats, Buffer buffer)
{
OffsetNumber attnum;
Datum key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
postingRoot = createPostingTree(ginstate->index,
oldItems,
oldNPosting,
- buildStats);
+ buildStats,
+ buffer);
/* Now insert the TIDs-to-be-added into the posting tree */
ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
buildFreshLeafTuple(GinState *ginstate,
OffsetNumber attnum, Datum key, GinNullCategory category,
ItemPointerData *items, uint32 nitem,
- GinStatsData *buildStats)
+ GinStatsData *buildStats, Buffer buffer)
{
IndexTuple res = NULL;
GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
* Initialize a new posting tree with the TIDs.
*/
postingRoot = createPostingTree(ginstate->index, items, nitem,
- buildStats);
+ buildStats, buffer);
/* And save the root link in the result tuple */
GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
return;
}
+ GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
/* modify an existing leaf entry */
itup = addItemPointersToLeafTuple(ginstate, itup,
- items, nitem, buildStats);
+ items, nitem, buildStats, stack->buffer);
insertdata.isDelete = true;
}
else
{
+ GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
/* no match, so construct a new leaf entry */
itup = buildFreshLeafTuple(ginstate, attnum, key, category,
- items, nitem, buildStats);
+ items, nitem, buildStats, stack->buffer);
}
/* Insert the new or modified leaf tuple */
@@ -513,6 +517,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
memset(&collector, 0, sizeof(GinTupleCollector));
+ /*
+ * do not use GinCheckForSerializableConflictIn() here because
+ * it will do nothing (it do actual work only with fastupdate off
+ */
+ CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
for (i = 0; i < ginstate->origTupdesc->natts; i++)
ginHeapTupleFastCollect(ginstate, &collector,
(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1252..5632cc5a77 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
#include "miscadmin.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
+#include "storage/predicate.h"
#include "utils/builtins.h"
#include "utils/index_selfuncs.h"
#include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
amroutine->amsearchnulls = false;
amroutine->amstorage = true;
amroutine->amclusterable = false;
- amroutine->ampredlocks = false;
+ amroutine->ampredlocks = true;
amroutine->amcanparallel = false;
amroutine->amkeytype = InvalidOid;
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
END_CRIT_SECTION();
}
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+ if (!GinGetUseFastUpdate(relation))
+ CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d80b..6bce58942b 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
#include "postmaster/autovacuum.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
+#include "storage/predicate.h"
#include "utils/memutils.h"
struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
LockBuffer(lBuffer, GIN_EXCLUSIVE);
+ page = BufferGetPage(dBuffer);
+ rightlink = GinPageGetOpaque(page)->rightlink;
+
+ /*
+ * Any insert which would have gone on the leaf block will now go to its
+ * right sibling.
+ */
+ PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
START_CRIT_SECTION();
/* Unlink the page by changing left sibling's rightlink */
- page = BufferGetPage(dBuffer);
- rightlink = GinPageGetOpaque(page)->rightlink;
page = BufferGetPage(lBuffer);
GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index e221241f96..7ea5f0b48e 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -380,6 +380,17 @@ then be trusted to ripple up to all levels and locations where
conflicting predicate locks may exist. In case there is a page split,
we need to copy predicate lock from an original page to all new pages.
+ * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
* The effects of page splits, overflows, consolidations, and
removals must be carefully reviewed to ensure that predicate locks
aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596a7a..d1df3033a6 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+ HeapTuple tuple, Buffer buffer);
/* gininsert.c */
extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
extern int GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
extern BlockNumber createPostingTree(Relation index,
ItemPointerData *items, uint32 nitems,
- GinStatsData *buildStats);
+ GinStatsData *buildStats, Buffer entrybuffer);
extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000000..368cff395d
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,888 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10050
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10050
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+
+starting permutation: fu1 rxy1 wx1 c1 rxy2 wy2 c2
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10050
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: fu1 rxy3 wx3 c1 rxy4 wy4 c2
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+
+starting permutation: fu1 rxy1 wx1 rxy2 c1 wy2 c2
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: fu1 rxy1 rxy2 wy2 wx1 c1 c2
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
+
+starting permutation: fu1 rxy2 rxy1 wy2 c2 wx1 c1
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count
+
+10000
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count
+
+10000
+step wy2: insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: fu1 rxy3 wx3 rxy4 c1 wy4 c2
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c2: commit;
+
+starting permutation: fu1 rxy4 rxy3 wx3 c1 wy4 c2
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+ERROR: could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: fu1 rxy4 rxy3 wx3 wy4 c1 c2
+step fu1: alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000];
+count
+
+4
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count
+
+4
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+ERROR: could not serialize access due to read/write dependencies among transactions
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 53e1f192b0..d3965fe73f 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -67,3 +67,4 @@ test: vacuum-reltuples
test: timeouts
test: vacuum-concurrent-drop
test: predicate-gist
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000000..40cb6ddf73
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,154 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+ insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+ insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+}
+
+teardown
+{
+ drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+}
+
+# enable pending list for a small subset of tests
+step "fu1" { alter index ginidx set (fastupdate = on);
+ commit;
+ begin isolation level serializable; }
+
+step "rxy1" { select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1" { insert into gin_tbl select g, array[5,6] from generate_series
+ (20001, 20050) g; }
+step "rxy3" { select count(*) from gin_tbl where p @> array[1,2] or
+ p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3" { insert into gin_tbl select g, array[g,g*2] from generate_series
+ (1, 50) g; }
+step "c1" { commit; }
+
+session "s2"
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+}
+
+step "rxy2" { select count(*) from gin_tbl where p @> array[5,6]; }
+step "wy2" { insert into gin_tbl select g, array[4,5] from
+ generate_series(20051, 20100) g; }
+step "rxy4" { select count(*) from gin_tbl where p @> array[4000,8000] or
+ p @> array[5000,10000] or p @> array[6000,12000] or
+ p @> array[8000,16000]; }
+step "wy4" { insert into gin_tbl select g, array[g,g*2] from generate_series
+ (10000, 10050) g; }
+step "c2" { commit; }
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
+
+
+# Finally, a small subset of tests for pending list
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "fu1" "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "fu1" "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "fu1" "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "fu1" "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "fu1" "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index, but pending list is enabled,
+# so there might be a r-w conflict.
+
+permutation "fu1" "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "fu1" "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "fu1" "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"