On 25.10.2019 20:00, Pavel Stehule wrote:
>
>> So except the limitation mentioned above (which I do not
consider as critical) there is only one problem which was not
addressed: maintaining statistics for GTT.
>> If all of the following conditions are true:
>>
>> 1) GTT are used in joins
>> 2) There are indexes defined for GTT
>> 3) Size and histogram of GTT in different backends can
significantly vary.
>> 4) ANALYZE was explicitly called for GTT
>>
>> then query execution plan built in one backend will be also
used for other backends where it can be inefficient.
>> I also do not consider this problem as "show stopper" for
adding GTT to Postgres.
> I think that's *definitely* a show stopper.
Well, if both you and Pavel think that it is really "show
stopper", then
this problem really has to be addressed.
I slightly confused about this opinion, because Pavel has told me
himself that 99% of users never create indexes for temp tables
or run "analyze" for them. And without it, this problem is not a
problem
at all.
Users doesn't do ANALYZE on temp tables in 99%. It's true. But second
fact is so users has lot of problems. It's very similar to wrong
statistics on persistent tables. When data are small, then it is not
problem for users, although from my perspective it's not optimal. When
data are not small, then the problem can be brutal. Temporary tables
are not a exception. And users and developers are people - we know
only about fatal problems. There are lot of unoptimized queries, but
because the problem is not fatal, then it is not reason for report it.
And lot of people has not any idea how fast the databases can be. The
knowledges of users and app developers are sad book.
Pavel
It seems to me that I have found quite elegant solution for per-backend
statistic for GTT: I just inserting it in backend's catalog cache, but
not in pg_statistic table itself.
To do it I have to add InsertSysCache/InsertCatCache functions which
insert pinned entry in the correspondent cache.
I wonder if there are some pitfalls of such approach?
New patch for GTT is attached.
--
Konstantin Knizhnik
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index 647350c..ca5f22d 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -25,6 +25,7 @@
#include "access/brin_revmap.h"
#include "access/brin_tuple.h"
#include "access/brin_xlog.h"
+#include "access/brin.h"
#include "access/rmgr.h"
#include "access/xloginsert.h"
#include "miscadmin.h"
@@ -79,6 +80,11 @@ brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange,
meta = ReadBuffer(idxrel, BRIN_METAPAGE_BLKNO);
LockBuffer(meta, BUFFER_LOCK_SHARE);
page = BufferGetPage(meta);
+
+ if (GlobalTempRelationPageIsNotInitialized(idxrel, page))
+ brin_metapage_init(page, BrinGetPagesPerRange(idxrel),
+ BRIN_CURRENT_VERSION);
+
TestForOldSnapshot(snapshot, idxrel, page);
metadata = (BrinMetaPageData *) PageGetContents(page);
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 439a91b..8a6ac71 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -241,6 +241,16 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
metapage = BufferGetPage(metabuffer);
+ if (GlobalTempRelationPageIsNotInitialized(index, metapage))
+ {
+ Buffer rootbuffer = ReadBuffer(index, GIN_ROOT_BLKNO);
+ LockBuffer(rootbuffer, BUFFER_LOCK_EXCLUSIVE);
+ GinInitMetabuffer(metabuffer);
+ GinInitBuffer(rootbuffer, GIN_LEAF);
+ MarkBufferDirty(rootbuffer);
+ UnlockReleaseBuffer(rootbuffer);
+ }
+
/*
* An insertion to the pending list could logically belong anywhere in the
* tree, so it conflicts with all serializable scans. All scans acquire a
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index b18ae2b..41bab5d 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -1750,7 +1750,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
/*
* Collect all matched rows from pending list into bitmap.
*/
-static void
+static bool
scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
{
GinScanOpaque so = (GinScanOpaque) scan->opaque;
@@ -1774,6 +1774,12 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
LockBuffer(metabuffer, GIN_SHARE);
page = BufferGetPage(metabuffer);
TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
+
+ if (GlobalTempRelationPageIsNotInitialized(scan->indexRelation, page))
+ {
+ UnlockReleaseBuffer(metabuffer);
+ return false;
+ }
blkno = GinPageGetMeta(page)->head;
/*
@@ -1784,7 +1790,7 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
{
/* No pending list, so proceed with normal scan */
UnlockReleaseBuffer(metabuffer);
- return;
+ return true;
}
pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
@@ -1840,6 +1846,7 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
}
pfree(pos.hasMatchKey);
+ return true;
}
@@ -1875,7 +1882,8 @@ gingetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
*/
- scanPendingInsert(scan, tbm, &ntids);
+ if (!scanPendingInsert(scan, tbm, &ntids))
+ return 0;
/*
* Now scan the main index.
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 0cc8791..3215b6f 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -677,7 +677,10 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
if (!xlocked)
{
LockBuffer(stack->buffer, GIST_SHARE);
- gistcheckpage(state.r, stack->buffer);
+ if (stack->blkno == GIST_ROOT_BLKNO && GlobalTempRelationPageIsNotInitialized(state.r, BufferGetPage(stack->buffer)))
+ GISTInitBuffer(stack->buffer, F_LEAF);
+ else
+ gistcheckpage(state.r, stack->buffer);
}
stack->page = (Page) BufferGetPage(stack->buffer);
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 22d790d..4c52dbe 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -344,7 +344,10 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem,
buffer = ReadBuffer(scan->indexRelation, pageItem->blkno);
LockBuffer(buffer, GIST_SHARE);
PredicateLockPage(r, BufferGetBlockNumber(buffer), scan->xs_snapshot);
- gistcheckpage(scan->indexRelation, buffer);
+ if (pageItem->blkno == GIST_ROOT_BLKNO && GlobalTempRelationPageIsNotInitialized(r, BufferGetPage(buffer)))
+ GISTInitBuffer(buffer, F_LEAF);
+ else
+ gistcheckpage(scan->indexRelation, buffer);
page = BufferGetPage(buffer);
TestForOldSnapshot(scan->xs_snapshot, r, page);
opaque = GistPageGetOpaque(page);
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 45804d7..50b306a 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -1028,7 +1028,7 @@ gistGetFakeLSN(Relation rel)
{
static XLogRecPtr counter = FirstNormalUnloggedLSN;
- if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
+ if (RelationHasSessionScope(rel))
{
/*
* Temporary relations are only accessible in our session, so a simple
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 838ee68..4f794b3 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -75,13 +75,20 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
buf = ReadBuffer(rel, blkno);
- if (access != HASH_NOLOCK)
- LockBuffer(buf, access);
-
/* ref count and lock type are correct */
- _hash_checkpage(rel, buf, flags);
-
+ if (blkno == HASH_METAPAGE && GlobalTempRelationPageIsNotInitialized(rel, BufferGetPage(buf)))
+ {
+ _hash_init(rel, 0, MAIN_FORKNUM);
+ if (access != HASH_NOLOCK)
+ LockBuffer(buf, access);
+ }
+ else
+ {
+ if (access != HASH_NOLOCK)
+ LockBuffer(buf, access);
+ _hash_checkpage(rel, buf, flags);
+ }
return buf;
}
@@ -339,7 +346,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
bool use_wal;
/* safety check */
- if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
+ if (rel->rd_rel->relpersistence != RELPERSISTENCE_SESSION && RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
RelationGetRelationName(rel));
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 2dd8821..92df373 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -673,6 +673,7 @@ heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
* init fork of an unlogged relation.
*/
if (rel->rd_rel->relpersistence == RELPERSISTENCE_PERMANENT ||
+ rel->rd_rel->relpersistence == RELPERSISTENCE_SESSION ||
(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
forkNum == INIT_FORKNUM))
log_smgrcreate(newrnode, forkNum);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 268f869..ed3ab70 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -763,7 +763,11 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
/* Read an existing block of the relation */
buf = ReadBuffer(rel, blkno);
LockBuffer(buf, access);
- _bt_checkpage(rel, buf);
+ /* Session temporary relation may be not yet initialized for this backend. */
+ if (blkno == BTREE_METAPAGE && GlobalTempRelationPageIsNotInitialized(rel, BufferGetPage(buf)))
+ _bt_initmetapage(BufferGetPage(buf), P_NONE, 0);
+ else
+ _bt_checkpage(rel, buf);
}
else
{
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index 45472db..a8497a2 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -106,6 +106,7 @@ spgGetCache(Relation index)
spgConfigIn in;
FmgrInfo *procinfo;
Buffer metabuffer;
+ Page metapage;
SpGistMetaPageData *metadata;
cache = MemoryContextAllocZero(index->rd_indexcxt,
@@ -155,12 +156,32 @@ spgGetCache(Relation index)
metabuffer = ReadBuffer(index, SPGIST_METAPAGE_BLKNO);
LockBuffer(metabuffer, BUFFER_LOCK_SHARE);
- metadata = SpGistPageGetMeta(BufferGetPage(metabuffer));
+ metapage = BufferGetPage(metabuffer);
+ metadata = SpGistPageGetMeta(metapage);
if (metadata->magicNumber != SPGIST_MAGIC_NUMBER)
- elog(ERROR, "index \"%s\" is not an SP-GiST index",
- RelationGetRelationName(index));
+ {
+ if (GlobalTempRelationPageIsNotInitialized(index, metapage))
+ {
+ Buffer rootbuffer = ReadBuffer(index, SPGIST_ROOT_BLKNO);
+ Buffer nullbuffer = ReadBuffer(index, SPGIST_NULL_BLKNO);
+
+ SpGistInitMetapage(metapage);
+
+ LockBuffer(rootbuffer, BUFFER_LOCK_EXCLUSIVE);
+ SpGistInitPage(BufferGetPage(rootbuffer), SPGIST_LEAF);
+ MarkBufferDirty(rootbuffer);
+ UnlockReleaseBuffer(rootbuffer);
+ LockBuffer(nullbuffer, BUFFER_LOCK_EXCLUSIVE);
+ SpGistInitPage(BufferGetPage(nullbuffer), SPGIST_LEAF | SPGIST_NULLS);
+ MarkBufferDirty(nullbuffer);
+ UnlockReleaseBuffer(nullbuffer);
+ }
+ else
+ elog(ERROR, "index \"%s\" is not an SP-GiST index",
+ RelationGetRelationName(index));
+ }
cache->lastUsedPages = metadata->lastUsedPages;
UnlockReleaseBuffer(metabuffer);
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 1af31c2..e60bdb7 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -402,6 +402,9 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
case RELPERSISTENCE_TEMP:
backend = BackendIdForTempRelations();
break;
+ case RELPERSISTENCE_SESSION:
+ backend = BackendIdForSessionRelations();
+ break;
case RELPERSISTENCE_UNLOGGED:
case RELPERSISTENCE_PERMANENT:
backend = InvalidBackendId;
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index f6c31cc..d943b57 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -3652,7 +3652,7 @@ reindex_relation(Oid relid, int flags, int options)
if (flags & REINDEX_REL_FORCE_INDEXES_UNLOGGED)
persistence = RELPERSISTENCE_UNLOGGED;
else if (flags & REINDEX_REL_FORCE_INDEXES_PERMANENT)
- persistence = RELPERSISTENCE_PERMANENT;
+ persistence = rel->rd_rel->relpersistence == RELPERSISTENCE_SESSION ? RELPERSISTENCE_SESSION : RELPERSISTENCE_PERMANENT;
else
persistence = rel->rd_rel->relpersistence;
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 625af8d..1e192fa 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -93,6 +93,10 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
backend = InvalidBackendId;
needs_wal = false;
break;
+ case RELPERSISTENCE_SESSION:
+ backend = BackendIdForSessionRelations();
+ needs_wal = false;
+ break;
case RELPERSISTENCE_PERMANENT:
backend = InvalidBackendId;
needs_wal = true;
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 7accb95..9f2ea48 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -102,7 +102,7 @@ static int acquire_inherited_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
static void update_attstats(Oid relid, bool inh,
- int natts, VacAttrStats **vacattrstats);
+ int natts, VacAttrStats **vacattrstats, bool is_global_temp);
static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
@@ -318,6 +318,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
Oid save_userid;
int save_sec_context;
int save_nestlevel;
+ bool is_global_temp = onerel->rd_rel->relpersistence == RELPERSISTENCE_SESSION;
if (inh)
ereport(elevel,
@@ -575,14 +576,14 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
* pg_statistic for columns we didn't process, we leave them alone.)
*/
update_attstats(RelationGetRelid(onerel), inh,
- attr_cnt, vacattrstats);
+ attr_cnt, vacattrstats, is_global_temp);
for (ind = 0; ind < nindexes; ind++)
{
AnlIndexData *thisdata = &indexdata[ind];
update_attstats(RelationGetRelid(Irel[ind]), false,
- thisdata->attr_cnt, thisdata->vacattrstats);
+ thisdata->attr_cnt, thisdata->vacattrstats, is_global_temp);
}
/*
@@ -1425,7 +1426,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel,
* by taking a self-exclusive lock on the relation in analyze_rel().
*/
static void
-update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats)
+update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats, bool is_global_temp)
{
Relation sd;
int attno;
@@ -1527,30 +1528,42 @@ update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats)
}
}
- /* Is there already a pg_statistic tuple for this attribute? */
- oldtup = SearchSysCache3(STATRELATTINH,
- ObjectIdGetDatum(relid),
- Int16GetDatum(stats->attr->attnum),
- BoolGetDatum(inh));
-
- if (HeapTupleIsValid(oldtup))
+ if (is_global_temp)
{
- /* Yes, replace it */
- stup = heap_modify_tuple(oldtup,
- RelationGetDescr(sd),
- values,
- nulls,
- replaces);
- ReleaseSysCache(oldtup);
- CatalogTupleUpdate(sd, &stup->t_self, stup);
+ stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
+ InsertSysCache(STATRELATTINH,
+ ObjectIdGetDatum(relid),
+ Int16GetDatum(stats->attr->attnum),
+ BoolGetDatum(inh),
+ 0,
+ stup);
}
else
{
- /* No, insert new tuple */
- stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
- CatalogTupleInsert(sd, stup);
- }
+ /* Is there already a pg_statistic tuple for this attribute? */
+ oldtup = SearchSysCache3(STATRELATTINH,
+ ObjectIdGetDatum(relid),
+ Int16GetDatum(stats->attr->attnum),
+ BoolGetDatum(inh));
+ if (HeapTupleIsValid(oldtup))
+ {
+ /* Yes, replace it */
+ stup = heap_modify_tuple(oldtup,
+ RelationGetDescr(sd),
+ values,
+ nulls,
+ replaces);
+ ReleaseSysCache(oldtup);
+ CatalogTupleUpdate(sd, &stup->t_self, stup);
+ }
+ else
+ {
+ /* No, insert new tuple */
+ stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
+ CatalogTupleInsert(sd, stup);
+ }
+ }
heap_freetuple(stup);
}
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index a23128d..5d131a7 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1400,7 +1400,7 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
*/
if (newrelpersistence == RELPERSISTENCE_UNLOGGED)
reindex_flags |= REINDEX_REL_FORCE_INDEXES_UNLOGGED;
- else if (newrelpersistence == RELPERSISTENCE_PERMANENT)
+ else if (newrelpersistence != RELPERSISTENCE_TEMP)
reindex_flags |= REINDEX_REL_FORCE_INDEXES_PERMANENT;
/* Report that we are now reindexing relations */
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index a13322b..be661a4 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -94,7 +94,7 @@ static HTAB *seqhashtab = NULL; /* hash table for SeqTable items */
*/
static SeqTableData *last_used_seq = NULL;
-static void fill_seq_with_data(Relation rel, HeapTuple tuple);
+static void fill_seq_with_data(Relation rel, HeapTuple tuple, Buffer buf);
static Relation lock_and_open_sequence(SeqTable seq);
static void create_seq_hashtable(void);
static void init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel);
@@ -222,7 +222,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq)
/* now initialize the sequence's data */
tuple = heap_form_tuple(tupDesc, value, null);
- fill_seq_with_data(rel, tuple);
+ fill_seq_with_data(rel, tuple, InvalidBuffer);
/* process OWNED BY if given */
if (owned_by)
@@ -327,7 +327,7 @@ ResetSequence(Oid seq_relid)
/*
* Insert the modified tuple into the new storage file.
*/
- fill_seq_with_data(seq_rel, tuple);
+ fill_seq_with_data(seq_rel, tuple, InvalidBuffer);
/* Clear local cache so that we don't think we have cached numbers */
/* Note that we do not change the currval() state */
@@ -340,18 +340,21 @@ ResetSequence(Oid seq_relid)
* Initialize a sequence's relation with the specified tuple as content
*/
static void
-fill_seq_with_data(Relation rel, HeapTuple tuple)
+fill_seq_with_data(Relation rel, HeapTuple tuple, Buffer buf)
{
- Buffer buf;
Page page;
sequence_magic *sm;
OffsetNumber offnum;
+ bool lockBuffer = false;
/* Initialize first page of relation with special magic number */
- buf = ReadBuffer(rel, P_NEW);
- Assert(BufferGetBlockNumber(buf) == 0);
-
+ if (buf == InvalidBuffer)
+ {
+ buf = ReadBuffer(rel, P_NEW);
+ Assert(BufferGetBlockNumber(buf) == 0);
+ lockBuffer = true;
+ }
page = BufferGetPage(buf);
PageInit(page, BufferGetPageSize(buf), sizeof(sequence_magic));
@@ -360,7 +363,8 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
/* Now insert sequence tuple */
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+ if (lockBuffer)
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/*
* Since VACUUM does not process sequences, we have to force the tuple to
@@ -410,7 +414,8 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
END_CRIT_SECTION();
- UnlockReleaseBuffer(buf);
+ if (lockBuffer)
+ UnlockReleaseBuffer(buf);
}
/*
@@ -502,7 +507,7 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt)
/*
* Insert the modified tuple into the new storage file.
*/
- fill_seq_with_data(seqrel, newdatatuple);
+ fill_seq_with_data(seqrel, newdatatuple, InvalidBuffer);
}
/* process OWNED BY if given */
@@ -1178,6 +1183,17 @@ read_seq_tuple(Relation rel, Buffer *buf, HeapTuple seqdatatuple)
LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(*buf);
+ if (GlobalTempRelationPageIsNotInitialized(rel, page))
+ {
+ /* Initialize sequence for global temporary tables */
+ Datum value[SEQ_COL_LASTCOL] = {0};
+ bool null[SEQ_COL_LASTCOL] = {false};
+ HeapTuple tuple;
+ value[SEQ_COL_LASTVAL-1] = Int64GetDatumFast(1); /* start sequence with 1 */
+ tuple = heap_form_tuple(RelationGetDescr(rel), value, null);
+ fill_seq_with_data(rel, tuple, *buf);
+ }
+
sm = (sequence_magic *) PageGetSpecialPointer(page);
if (sm->magic != SEQ_MAGIC)
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 8d25d14..50d0402 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -587,7 +587,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
* Check consistency of arguments
*/
if (stmt->oncommit != ONCOMMIT_NOOP
- && stmt->relation->relpersistence != RELPERSISTENCE_TEMP)
+ && !IsLocalRelpersistence(stmt->relation->relpersistence))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("ON COMMIT can only be used on temporary tables")));
@@ -1772,7 +1772,8 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
* table or the current physical file to be thrown away anyway.
*/
if (rel->rd_createSubid == mySubid ||
- rel->rd_newRelfilenodeSubid == mySubid)
+ rel->rd_newRelfilenodeSubid == mySubid ||
+ rel->rd_rel->relpersistence == RELPERSISTENCE_SESSION)
{
/* Immediate, non-rollbackable truncation is OK */
heap_truncate_one_rel(rel);
@@ -7708,6 +7709,12 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("constraints on unlogged tables may reference only permanent or unlogged tables")));
break;
+ case RELPERSISTENCE_SESSION:
+ if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ errmsg("constraints on session tables may reference only session tables")));
+ break;
case RELPERSISTENCE_TEMP:
if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
ereport(ERROR,
@@ -14140,6 +14147,13 @@ ATPrepChangePersistence(Relation rel, bool toLogged)
RelationGetRelationName(rel)),
errtable(rel)));
break;
+ case RELPERSISTENCE_SESSION:
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ errmsg("cannot change logged status of session table \"%s\"",
+ RelationGetRelationName(rel)),
+ errtable(rel)));
+ break;
case RELPERSISTENCE_PERMANENT:
if (toLogged)
/* nothing to do */
@@ -14627,14 +14641,7 @@ PreCommit_on_commit_actions(void)
/* Do nothing (there shouldn't be such entries, actually) */
break;
case ONCOMMIT_DELETE_ROWS:
-
- /*
- * If this transaction hasn't accessed any temporary
- * relations, we can skip truncating ON COMMIT DELETE ROWS
- * tables, as they must still be empty.
- */
- if ((MyXactFlags & XACT_FLAGS_ACCESSEDTEMPNAMESPACE))
- oids_to_truncate = lappend_oid(oids_to_truncate, oc->relid);
+ oids_to_truncate = lappend_oid(oids_to_truncate, oc->relid);
break;
case ONCOMMIT_DROP:
oids_to_drop = lappend_oid(oids_to_drop, oc->relid);
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 3f67aaf..565c868 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -3266,20 +3266,11 @@ OptTemp: TEMPORARY { $$ = RELPERSISTENCE_TEMP; }
| TEMP { $$ = RELPERSISTENCE_TEMP; }
| LOCAL TEMPORARY { $$ = RELPERSISTENCE_TEMP; }
| LOCAL TEMP { $$ = RELPERSISTENCE_TEMP; }
- | GLOBAL TEMPORARY
- {
- ereport(WARNING,
- (errmsg("GLOBAL is deprecated in temporary table creation"),
- parser_errposition(@1)));
- $$ = RELPERSISTENCE_TEMP;
- }
- | GLOBAL TEMP
- {
- ereport(WARNING,
- (errmsg("GLOBAL is deprecated in temporary table creation"),
- parser_errposition(@1)));
- $$ = RELPERSISTENCE_TEMP;
- }
+ | GLOBAL TEMPORARY { $$ = RELPERSISTENCE_SESSION; }
+ | GLOBAL TEMP { $$ = RELPERSISTENCE_SESSION; }
+ | SESSION { $$ = RELPERSISTENCE_SESSION; }
+ | SESSION TEMPORARY { $$ = RELPERSISTENCE_SESSION; }
+ | SESSION TEMP { $$ = RELPERSISTENCE_SESSION; }
| UNLOGGED { $$ = RELPERSISTENCE_UNLOGGED; }
| /*EMPTY*/ { $$ = RELPERSISTENCE_PERMANENT; }
;
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index ee47547..ea7fe4c 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -437,6 +437,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
seqstmt->options = seqoptions;
/*
+ * Why we should not always use persistence of parent table?
+ * Although it is prohibited to have unlogged sequences,
+ * unlogged tables with SERIAL fields are accepted!
+ */
+ if (cxt->relation->relpersistence != RELPERSISTENCE_UNLOGGED)
+ seqstmt->sequence->relpersistence = cxt->relation->relpersistence;
+
+ /*
* If a sequence data type was specified, add it to the options. Prepend
* to the list rather than append; in case a user supplied their own AS
* clause, the "redundant options" error will point to their occurrence,
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index c1dd816..dcfc134 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -2157,7 +2157,7 @@ do_autovacuum(void)
/*
* We cannot safely process other backends' temp tables, so skip 'em.
*/
- if (classForm->relpersistence == RELPERSISTENCE_TEMP)
+ if (IsLocalRelpersistence(classForm->relpersistence))
continue;
relid = classForm->oid;
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 07f3c93..5db79ec 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -33,6 +33,7 @@
#include "postmaster/bgwriter.h"
#include "storage/fd.h"
#include "storage/bufmgr.h"
+#include "storage/ipc.h"
#include "storage/md.h"
#include "storage/relfilenode.h"
#include "storage/smgr.h"
@@ -87,6 +88,18 @@ typedef struct _MdfdVec
static MemoryContext MdCxt; /* context for all MdfdVec objects */
+/*
+ * Structure used to collect information created by this backend.
+ * Data of this related should be deleted on backend exit.
+ */
+typedef struct SessionRelation
+{
+ RelFileNodeBackend rnode;
+ struct SessionRelation* next;
+} SessionRelation;
+
+
+static SessionRelation* SessionRelations;
/* Populate a file tag describing an md.c segment file. */
#define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \
@@ -152,6 +165,45 @@ mdinit(void)
ALLOCSET_DEFAULT_SIZES);
}
+
+/*
+ * Delete all data of session relations and remove their pages from shared buffers.
+ * This function is called on backend exit.
+ */
+static void
+TruncateSessionRelations(int code, Datum arg)
+{
+ SessionRelation* rel;
+ for (rel = SessionRelations; rel != NULL; rel = rel->next)
+ {
+ /* Delete relation files */
+ mdunlink(rel->rnode, InvalidForkNumber, false);
+ }
+}
+
+/*
+ * Maintain information about session relations accessed by this backend.
+ * This list is needed to perform cleanup on backend exit.
+ * Session relation is linked in this list when this relation is created or opened and file doesn't exist.
+ * Such procedure guarantee that each relation is linked into list only once.
+ */
+static void
+RegisterSessionRelation(SMgrRelation reln)
+{
+ SessionRelation* rel = (SessionRelation*)MemoryContextAlloc(TopMemoryContext, sizeof(SessionRelation));
+
+ /*
+ * Perform session relation cleanup on backend exit. We are using shared memory hook, because
+ * cleanup should be performed before backend is disconnected from shared memory.
+ */
+ if (SessionRelations == NULL)
+ on_shmem_exit(TruncateSessionRelations, 0);
+
+ rel->rnode = reln->smgr_rnode;
+ rel->next = SessionRelations;
+ SessionRelations = rel;
+}
+
/*
* mdexists() -- Does the physical file exist?
*
@@ -218,6 +270,8 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
errmsg("could not create file \"%s\": %m", path)));
}
}
+ if (RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+ RegisterSessionRelation(reln);
pfree(path);
@@ -465,6 +519,19 @@ mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
if (fd < 0)
{
+ /*
+ * In case of session relation access, there may be no yet files of this relation for this backend.
+ * If so, then create file and register session relation for truncation on backend exit.
+ */
+ if (RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+ {
+ fd = PathNameOpenFile(path, O_RDWR | PG_BINARY | O_CREAT);
+ if (fd >= 0)
+ {
+ RegisterSessionRelation(reln);
+ goto NewSegment;
+ }
+ }
if ((behavior & EXTENSION_RETURN_NULL) &&
FILE_POSSIBLY_DELETED(errno))
{
@@ -476,6 +543,7 @@ mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
errmsg("could not open file \"%s\": %m", path)));
}
+ NewSegment:
pfree(path);
_fdvec_resize(reln, forknum, 1);
@@ -652,8 +720,13 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
* complaining. This allows, for example, the case of trying to
* update a block that was later truncated away.
*/
- if (zero_damaged_pages || InRecovery)
+ if (zero_damaged_pages || InRecovery || RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+ {
MemSet(buffer, 0, BLCKSZ);
+ /* In case of session relation we need to write zero page to provide correct result of subsequent mdnblocks */
+ if (RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+ mdwrite(reln, forknum, blocknum, buffer, true);
+ }
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
@@ -738,12 +811,18 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
BlockNumber
mdnblocks(SMgrRelation reln, ForkNumber forknum)
{
- MdfdVec *v = mdopenfork(reln, forknum, EXTENSION_FAIL);
+ /*
+ * If we access session relation, there may be no files yet of this relation for this backend.
+ * Pass EXTENSION_RETURN_NULL to make mdopen return NULL in this case instead of reporting error.
+ */
+ MdfdVec *v = mdopenfork(reln, forknum, RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode)
+ ? EXTENSION_RETURN_NULL : EXTENSION_FAIL);
BlockNumber nblocks;
BlockNumber segno = 0;
/* mdopen has opened the first segment */
- Assert(reln->md_num_open_segs[forknum] > 0);
+ if (reln->md_num_open_segs[forknum] == 0)
+ return 0;
/*
* Start from the last open segments, to avoid redundant seeks. We have
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index a87e721..2401361 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -994,6 +994,9 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
/* Determine owning backend. */
switch (relform->relpersistence)
{
+ case RELPERSISTENCE_SESSION:
+ backend = BackendIdForSessionRelations();
+ break;
case RELPERSISTENCE_UNLOGGED:
case RELPERSISTENCE_PERMANENT:
backend = InvalidBackendId;
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index c3e7d94..6d86c28 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1191,6 +1191,111 @@ SearchCatCache4(CatCache *cache,
return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
}
+
+void InsertCatCache(CatCache *cache,
+ Datum v1, Datum v2, Datum v3, Datum v4,
+ HeapTuple tuple)
+{
+ Datum arguments[CATCACHE_MAXKEYS];
+ uint32 hashValue;
+ Index hashIndex;
+ CatCTup *ct;
+ dlist_iter iter;
+ dlist_head *bucket;
+ int nkeys = cache->cc_nkeys;
+ MemoryContext oldcxt;
+ int i;
+
+ /*
+ * one-time startup overhead for each cache
+ */
+ if (unlikely(cache->cc_tupdesc == NULL))
+ CatalogCacheInitializeCache(cache);
+
+ /* Initialize local parameter array */
+ arguments[0] = v1;
+ arguments[1] = v2;
+ arguments[2] = v3;
+ arguments[3] = v4;
+ /*
+ * find the hash bucket in which to look for the tuple
+ */
+ hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
+ hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
+
+ /*
+ * scan the hash bucket until we find a match or exhaust our tuples
+ *
+ * Note: it's okay to use dlist_foreach here, even though we modify the
+ * dlist within the loop, because we don't continue the loop afterwards.
+ */
+ bucket = &cache->cc_bucket[hashIndex];
+ dlist_foreach(iter, bucket)
+ {
+ ct = dlist_container(CatCTup, cache_elem, iter.cur);
+
+ if (ct->dead)
+ continue; /* ignore dead entries */
+
+ if (ct->hash_value != hashValue)
+ continue; /* quickly skip entry if wrong hash val */
+
+ if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
+ continue;
+
+ /*
+ * If it's a positive entry, bump its refcount and return it. If it's
+ * negative, we can report failure to the caller.
+ */
+ if (ct->tuple.t_len == tuple->t_len)
+ {
+ memcpy((char *) ct->tuple.t_data,
+ (const char *) tuple->t_data,
+ tuple->t_len);
+ return;
+ }
+ dlist_delete(&ct->cache_elem);
+ pfree(ct);
+ cache->cc_ntup -= 1;
+ CacheHdr->ch_ntup -= 1;
+ break;
+ }
+ /* Allocate memory for CatCTup and the cached tuple in one go */
+ oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+
+ ct = (CatCTup *) palloc(sizeof(CatCTup) +
+ MAXIMUM_ALIGNOF + tuple->t_len);
+ ct->tuple.t_len = tuple->t_len;
+ ct->tuple.t_self = tuple->t_self;
+ ct->tuple.t_tableOid = tuple->t_tableOid;
+ ct->tuple.t_data = (HeapTupleHeader)
+ MAXALIGN(((char *) ct) + sizeof(CatCTup));
+ /* copy tuple contents */
+ memcpy((char *) ct->tuple.t_data,
+ (const char *) tuple->t_data,
+ tuple->t_len);
+ ct->ct_magic = CT_MAGIC;
+ ct->my_cache = cache;
+ ct->c_list = NULL;
+ ct->refcount = 1; /* pinned*/
+ ct->dead = false;
+ ct->negative = false;
+ ct->hash_value = hashValue;
+ dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
+ memcpy(ct->keys, arguments, nkeys*sizeof(Datum));
+
+ cache->cc_ntup++;
+ CacheHdr->ch_ntup++;
+ MemoryContextSwitchTo(oldcxt);
+
+ /*
+ * If the hash table has become too full, enlarge the buckets array. Quite
+ * arbitrarily, we enlarge when fill factor > 2.
+ */
+ if (cache->cc_ntup > cache->cc_nbuckets * 2)
+ RehashCatCache(cache);
+}
+
/*
* Work-horse for SearchCatCache/SearchCatCacheN.
*/
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 585dcee..ce8852c 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1098,6 +1098,10 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
switch (relation->rd_rel->relpersistence)
{
+ case RELPERSISTENCE_SESSION:
+ relation->rd_backend = BackendIdForSessionRelations();
+ relation->rd_islocaltemp = false;
+ break;
case RELPERSISTENCE_UNLOGGED:
case RELPERSISTENCE_PERMANENT:
relation->rd_backend = InvalidBackendId;
@@ -3301,6 +3305,10 @@ RelationBuildLocalRelation(const char *relname,
rel->rd_rel->relpersistence = relpersistence;
switch (relpersistence)
{
+ case RELPERSISTENCE_SESSION:
+ rel->rd_backend = BackendIdForSessionRelations();
+ rel->rd_islocaltemp = false;
+ break;
case RELPERSISTENCE_UNLOGGED:
case RELPERSISTENCE_PERMANENT:
rel->rd_backend = InvalidBackendId;
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 16297a5..e7a4d3c 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -1164,6 +1164,16 @@ SearchSysCache4(int cacheId,
return SearchCatCache4(SysCache[cacheId], key1, key2, key3, key4);
}
+void
+InsertSysCache(int cacheId,
+ Datum key1, Datum key2, Datum key3, Datum key4,
+ HeapTuple value)
+{
+ Assert(cacheId >= 0 && cacheId < SysCacheSize &&
+ PointerIsValid(SysCache[cacheId]));
+ InsertCatCache(SysCache[cacheId], key1, key2, key3, key4, value);
+}
+
/*
* ReleaseSysCache
* Release previously grabbed reference count on a tuple
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index bf69adc..fa7479c 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -15637,8 +15637,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
tbinfo->dobj.catId.oid, false);
appendPQExpBuffer(q, "CREATE %s%s %s",
- tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
- "UNLOGGED " : "",
+ tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ? "UNLOGGED "
+ : tbinfo->relpersistence == RELPERSISTENCE_SESSION ? "SESSION " : "",
reltypename,
qualrelname);
diff --git a/src/common/relpath.c b/src/common/relpath.c
index 62b9553..cef99d2 100644
--- a/src/common/relpath.c
+++ b/src/common/relpath.c
@@ -166,7 +166,18 @@ GetRelationPath(Oid dbNode, Oid spcNode, Oid relNode,
}
else
{
- if (forkNumber != MAIN_FORKNUM)
+ /*
+ * Session relations are distinguished from local temp relations by adding
+ * SessionRelFirstBackendId offset to backendId.
+ * These is no need to separate them at file system level, so just subtract SessionRelFirstBackendId
+ * to avoid too long file names.
+ * Segments of session relations have the same prefix (t%d_) as local temporary relations
+ * to make it possible to cleanup them in the same way as local temporary relation files.
+ */
+ if (backendId >= SessionRelFirstBackendId)
+ backendId -= SessionRelFirstBackendId;
+
+ if (forkNumber != MAIN_FORKNUM)
path = psprintf("base/%u/t%d_%u_%s",
dbNode, backendId, relNode,
forkNames[forkNumber]);
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index 090b6ba..6a39663 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -165,6 +165,7 @@ typedef FormData_pg_class *Form_pg_class;
#define RELPERSISTENCE_PERMANENT 'p' /* regular table */
#define RELPERSISTENCE_UNLOGGED 'u' /* unlogged permanent table */
#define RELPERSISTENCE_TEMP 't' /* temporary table */
+#define RELPERSISTENCE_SESSION 's' /* session table */
/* default selection for replica identity (primary key or nothing) */
#define REPLICA_IDENTITY_DEFAULT 'd'
diff --git a/src/include/storage/backendid.h b/src/include/storage/backendid.h
index 70ef8eb..f226e7c 100644
--- a/src/include/storage/backendid.h
+++ b/src/include/storage/backendid.h
@@ -22,6 +22,13 @@ typedef int BackendId; /* unique currently active backend identifier */
#define InvalidBackendId (-1)
+/*
+ * We need to distinguish local and global temporary relations by RelFileNodeBackend.
+ * The least invasive change is to add some special bias value to backend id (since
+ * maximal number of backed is limited by MaxBackends).
+ */
+#define SessionRelFirstBackendId (0x40000000)
+
extern PGDLLIMPORT BackendId MyBackendId; /* backend id of this backend */
/* backend id of our parallel session leader, or InvalidBackendId if none */
@@ -34,4 +41,10 @@ extern PGDLLIMPORT BackendId ParallelMasterBackendId;
#define BackendIdForTempRelations() \
(ParallelMasterBackendId == InvalidBackendId ? MyBackendId : ParallelMasterBackendId)
+
+#define BackendIdForSessionRelations() \
+ (BackendIdForTempRelations() + SessionRelFirstBackendId)
+
+#define IsSessionRelationBackendId(id) ((id) >= SessionRelFirstBackendId)
+
#endif /* BACKENDID_H */
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index 4ef6d8d..bac7a31 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -229,6 +229,13 @@ typedef PageHeaderData *PageHeader;
#define PageIsNew(page) (((PageHeader) (page))->pd_upper == 0)
/*
+ * Page of temporary relation is not initialized
+ */
+#define GlobalTempRelationPageIsNotInitialized(rel, page) \
+ ((rel)->rd_rel->relpersistence == RELPERSISTENCE_SESSION && PageIsNew(page))
+
+
+/*
* PageGetItemId
* Returns an item identifier of a page.
*/
diff --git a/src/include/storage/relfilenode.h b/src/include/storage/relfilenode.h
index 586500a..20aec72 100644
--- a/src/include/storage/relfilenode.h
+++ b/src/include/storage/relfilenode.h
@@ -75,10 +75,25 @@ typedef struct RelFileNodeBackend
BackendId backend;
} RelFileNodeBackend;
+/*
+ * Check whether it is local or global temporary relation, which data belongs only to one backend.
+ */
#define RelFileNodeBackendIsTemp(rnode) \
((rnode).backend != InvalidBackendId)
/*
+ * Check whether it is global temporary relation which metadata is shared by all sessions,
+ * but data is private for the current session.
+ */
+#define RelFileNodeBackendIsGlobalTemp(rnode) IsSessionRelationBackendId((rnode).backend)
+
+/*
+ * Check whether it is local temporary relation which exists only in this backend.
+ */
+#define RelFileNodeBackendIsLocalTemp(rnode) \
+ (RelFileNodeBackendIsTemp(rnode) && !RelFileNodeBackendIsGlobalTemp(rnode))
+
+/*
* Note: RelFileNodeEquals and RelFileNodeBackendEquals compare relNode first
* since that is most likely to be different in two unequal RelFileNodes. It
* is probably redundant to compare spcNode if the other fields are found equal,
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index ff1faba..31f615d 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -228,4 +228,8 @@ extern void PrepareToInvalidateCacheTuple(Relation relation,
extern void PrintCatCacheLeakWarning(HeapTuple tuple);
extern void PrintCatCacheListLeakWarning(CatCList *list);
+extern void InsertCatCache(CatCache *cache,
+ Datum v1, Datum v2, Datum v3, Datum v4,
+ HeapTuple tuple);
+
#endif /* CATCACHE_H */
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index a5cf804..d42830f 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -327,6 +327,18 @@ typedef struct StdRdOptions
((relation)->rd_options ? \
((StdRdOptions *) (relation)->rd_options)->parallel_workers : (defaultpw))
+/*
+ * Relation persistence is either TEMP either SESSION
+ */
+#define IsLocalRelpersistence(relpersistence) \
+ ((relpersistence) == RELPERSISTENCE_TEMP || (relpersistence) == RELPERSISTENCE_SESSION)
+
+/*
+ * Relation is either global either local temp table
+ */
+#define RelationHasSessionScope(relation) \
+ IsLocalRelpersistence(((relation)->rd_rel->relpersistence))
+
/* ViewOptions->check_option values */
typedef enum ViewOptCheckOption
{
@@ -335,6 +347,7 @@ typedef enum ViewOptCheckOption
VIEW_OPTION_CHECK_OPTION_CASCADED
} ViewOptCheckOption;
+
/*
* ViewOptions
* Contents of rd_options for views
@@ -526,7 +539,7 @@ typedef struct ViewOptions
* True if relation's pages are stored in local buffers.
*/
#define RelationUsesLocalBuffers(relation) \
- ((relation)->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
+ RelationHasSessionScope(relation)
/*
* RELATION_IS_LOCAL
diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h
index 918765c..5b1598b 100644
--- a/src/include/utils/syscache.h
+++ b/src/include/utils/syscache.h
@@ -216,4 +216,8 @@ extern bool RelationSupportsSysCache(Oid relid);
#define ReleaseSysCacheList(x) ReleaseCatCacheList(x)
+
+extern void InsertSysCache(int cacheId,
+ Datum v1, Datum v2, Datum v3, Datum v4,
+ HeapTuple tuple);
#endif /* SYSCACHE_H */
diff --git a/src/test/isolation/expected/inherit-global-temp.out b/src/test/isolation/expected/inherit-global-temp.out
new file mode 100644
index 0000000..6114f8c
--- /dev/null
+++ b/src/test/isolation/expected/inherit-global-temp.out
@@ -0,0 +1,218 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+2
+3
+4
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a
+
+3
+4
+step s2_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+2
+5
+6
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a
+
+5
+6
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_update_p s1_update_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_update_p: UPDATE inh_global_parent SET a = 11 WHERE a = 1;
+step s1_update_c: UPDATE inh_global_parent SET a = 13 WHERE a IN (3, 5);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a
+
+2
+11
+4
+13
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a
+
+4
+13
+step s2_select_p: SELECT a FROM inh_global_parent;
+a
+
+2
+11
+5
+6
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a
+
+5
+6
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s2_update_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s2_update_c: UPDATE inh_global_parent SET a = 15 WHERE a IN (3, 5);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+2
+3
+4
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a
+
+3
+4
+step s2_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+2
+6
+15
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a
+
+6
+15
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_delete_p s1_delete_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_delete_p: DELETE FROM inh_global_parent WHERE a = 2;
+step s1_delete_c: DELETE FROM inh_global_parent WHERE a IN (4, 6);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+3
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a
+
+3
+step s2_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+5
+6
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a
+
+5
+6
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s2_delete_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s2_delete_c: DELETE FROM inh_global_parent WHERE a IN (4, 6);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+2
+3
+4
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a
+
+3
+4
+step s2_select_p: SELECT a FROM inh_global_parent;
+a
+
+1
+2
+5
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a
+
+5
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_truncate_p s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_truncate_p: TRUNCATE inh_global_parent;
+step s1_select_p: SELECT a FROM inh_global_parent;
+a
+
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a
+
+step s2_select_p: SELECT a FROM inh_global_parent;
+a
+
+5
+6
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a
+
+5
+6
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s2_truncate_p s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s2_truncate_p: TRUNCATE inh_global_parent;
+step s1_select_p: SELECT a FROM inh_global_parent;
+a
+
+3
+4
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a
+
+3
+4
+step s2_select_p: SELECT a FROM inh_global_parent;
+a
+
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a
+
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_begin s1_truncate_p s2_select_p s1_commit
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_begin: BEGIN;
+step s1_truncate_p: TRUNCATE inh_global_parent;
+step s2_select_p: SELECT a FROM inh_global_parent; <waiting ...>
+step s1_commit: COMMIT;
+step s2_select_p: <... completed>
+a
+
+5
+6
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_begin s1_truncate_p s2_select_c s1_commit
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_begin: BEGIN;
+step s1_truncate_p: TRUNCATE inh_global_parent;
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2; <waiting ...>
+step s1_commit: COMMIT;
+step s2_select_c: <... completed>
+a
+
+5
+6
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index a2fa192..ef7aa85 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -88,3 +88,4 @@ test: plpgsql-toast
test: truncate-conflict
test: serializable-parallel
test: serializable-parallel-2
+test: inherit-global-temp
diff --git a/src/test/isolation/specs/inherit-global-temp.spec b/src/test/isolation/specs/inherit-global-temp.spec
new file mode 100644
index 0000000..5e95dd6
--- /dev/null
+++ b/src/test/isolation/specs/inherit-global-temp.spec
@@ -0,0 +1,73 @@
+# This is a copy of the inherit-temp test with little changes for global temporary tables.
+#
+
+setup
+{
+ CREATE TABLE inh_global_parent (a int);
+}
+
+teardown
+{
+ DROP TABLE inh_global_parent;
+}
+
+# Session 1 executes actions which act directly on both the parent and
+# its child. Abbreviation "c" is used for queries working on the child
+# and "p" on the parent.
+session "s1"
+setup
+{
+ CREATE GLOBAL TEMPORARY TABLE inh_global_temp_child_s1 () INHERITS (inh_global_parent);
+}
+step "s1_begin" { BEGIN; }
+step "s1_truncate_p" { TRUNCATE inh_global_parent; }
+step "s1_select_p" { SELECT a FROM inh_global_parent; }
+step "s1_select_c" { SELECT a FROM inh_global_temp_child_s1; }
+step "s1_insert_p" { INSERT INTO inh_global_parent VALUES (1), (2); }
+step "s1_insert_c" { INSERT INTO inh_global_temp_child_s1 VALUES (3), (4); }
+step "s1_update_p" { UPDATE inh_global_parent SET a = 11 WHERE a = 1; }
+step "s1_update_c" { UPDATE inh_global_parent SET a = 13 WHERE a IN (3, 5); }
+step "s1_delete_p" { DELETE FROM inh_global_parent WHERE a = 2; }
+step "s1_delete_c" { DELETE FROM inh_global_parent WHERE a IN (4, 6); }
+step "s1_commit" { COMMIT; }
+teardown
+{
+ DROP TABLE inh_global_temp_child_s1;
+}
+
+# Session 2 executes actions on the parent which act only on the child.
+session "s2"
+setup
+{
+ CREATE GLOBAL TEMPORARY TABLE inh_global_temp_child_s2 () INHERITS (inh_global_parent);
+}
+step "s2_truncate_p" { TRUNCATE inh_global_parent; }
+step "s2_select_p" { SELECT a FROM inh_global_parent; }
+step "s2_select_c" { SELECT a FROM inh_global_temp_child_s2; }
+step "s2_insert_c" { INSERT INTO inh_global_temp_child_s2 VALUES (5), (6); }
+step "s2_update_c" { UPDATE inh_global_parent SET a = 15 WHERE a IN (3, 5); }
+step "s2_delete_c" { DELETE FROM inh_global_parent WHERE a IN (4, 6); }
+teardown
+{
+ DROP TABLE inh_global_temp_child_s2;
+}
+
+# Check INSERT behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# Check UPDATE behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_update_p" "s1_update_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s2_update_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# Check DELETE behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_delete_p" "s1_delete_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s2_delete_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# Check TRUNCATE behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_truncate_p" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s2_truncate_p" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# TRUNCATE on a parent tree does not block access to temporary child relation
+# of another session, and blocks when scanning the parent.
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_begin" "s1_truncate_p" "s2_select_p" "s1_commit"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_begin" "s1_truncate_p" "s2_select_c" "s1_commit"
diff --git a/src/test/regress/expected/global_temp.out b/src/test/regress/expected/global_temp.out
new file mode 100644
index 0000000..ae1adb6
--- /dev/null
+++ b/src/test/regress/expected/global_temp.out
@@ -0,0 +1,247 @@
+--
+-- GLOBAL TEMP
+-- Test global temp relations
+--
+-- Test ON COMMIT DELETE ROWS
+CREATE GLOBAL TEMP TABLE global_temptest(col int) ON COMMIT DELETE ROWS;
+BEGIN;
+INSERT INTO global_temptest VALUES (1);
+INSERT INTO global_temptest VALUES (2);
+SELECT * FROM global_temptest;
+ col
+-----
+ 1
+ 2
+(2 rows)
+
+COMMIT;
+SELECT * FROM global_temptest;
+ col
+-----
+(0 rows)
+
+DROP TABLE global_temptest;
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
+SELECT * FROM global_temptest;
+ col
+-----
+ 1
+(1 row)
+
+COMMIT;
+SELECT * FROM global_temptest;
+ col
+-----
+(0 rows)
+
+DROP TABLE global_temptest;
+-- Test foreign keys
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest1(col int PRIMARY KEY);
+CREATE GLOBAL TEMP TABLE global_temptest2(col int REFERENCES global_temptest1)
+ ON COMMIT DELETE ROWS;
+INSERT INTO global_temptest1 VALUES (1);
+INSERT INTO global_temptest2 VALUES (1);
+COMMIT;
+SELECT * FROM global_temptest1;
+ col
+-----
+ 1
+(1 row)
+
+SELECT * FROM global_temptest2;
+ col
+-----
+(0 rows)
+
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temptest4(col int REFERENCES global_temptest3);
+COMMIT;
+ERROR: unsupported ON COMMIT and foreign key combination
+DETAIL: Table "global_temptest4" references "global_temptest3", but they do not have the same ON COMMIT setting.
+-- For partitioned temp tables, ON COMMIT actions ignore storage-less
+-- partitioned tables.
+BEGIN;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit (a int)
+ PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit_1
+ PARTITION OF temp_parted_oncommit
+ FOR VALUES IN (1) ON COMMIT DELETE ROWS;
+INSERT INTO temp_parted_oncommit VALUES (1);
+COMMIT;
+-- partitions are emptied by the previous commit
+SELECT * FROM temp_parted_oncommit;
+ a
+---
+(0 rows)
+
+DROP TABLE temp_parted_oncommit;
+-- Using ON COMMIT DELETE on a partitioned table does not remove
+-- all rows if partitions preserve their data.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test (a int)
+ PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test1
+ PARTITION OF global_temp_parted_oncommit_test
+ FOR VALUES IN (1) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_parted_oncommit_test VALUES (1);
+COMMIT;
+-- Data from the remaining partition is still here as its rows are
+-- preserved.
+SELECT * FROM global_temp_parted_oncommit_test;
+ a
+---
+ 1
+(1 row)
+
+-- two relations remain in this case.
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_parted_oncommit_test%';
+ relname
+-----------------------------------
+ global_temp_parted_oncommit_test
+ global_temp_parted_oncommit_test1
+(2 rows)
+
+DROP TABLE global_temp_parted_oncommit_test;
+-- Check dependencies between ON COMMIT actions with inheritance trees.
+-- Data on the parent is removed, and the child goes away.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test1 ()
+ INHERITS(global_temp_inh_oncommit_test) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_inh_oncommit_test1 VALUES (1);
+INSERT INTO global_temp_inh_oncommit_test VALUES (1);
+COMMIT;
+SELECT * FROM global_temp_inh_oncommit_test;
+ a
+---
+ 1
+(1 row)
+
+-- two relations remain
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_inh_oncommit_test%';
+ relname
+--------------------------------
+ global_temp_inh_oncommit_test
+ global_temp_inh_oncommit_test1
+(2 rows)
+
+DROP TABLE global_temp_inh_oncommit_test1;
+DROP TABLE global_temp_inh_oncommit_test;
+-- Global temp table cannot inherit from temporary relation
+BEGIN;
+CREATE TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_table1 ()
+ INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+ERROR: cannot inherit from temporary relation "global_temp_table"
+ROLLBACK;
+-- Temp table can inherit from global temporary relation
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE TEMP TABLE temp_table1 ()
+ INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+CREATE TEMP TABLE temp_table2 ()
+ INHERITS(global_temp_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO global_temp_table VALUES (0);
+SELECT * FROM global_temp_table;
+ a
+---
+ 0
+ 1
+ 2
+(3 rows)
+
+COMMIT;
+SELECT * FROM global_temp_table;
+ a
+---
+ 1
+(1 row)
+
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE global_temp_table;
+-- Global temp table can inherit from normal relation
+BEGIN;
+CREATE TABLE normal_table (a int);
+CREATE GLOBAL TEMP TABLE temp_table1 ()
+ INHERITS(normal_table) ON COMMIT PRESERVE ROWS;
+CREATE GLOBAL TEMP TABLE temp_table2 ()
+ INHERITS(normal_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO normal_table VALUES (0);
+SELECT * FROM normal_table;
+ a
+---
+ 0
+ 1
+ 2
+(3 rows)
+
+COMMIT;
+SELECT * FROM normal_table;
+ a
+---
+ 0
+ 1
+(2 rows)
+
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE normal_table;
+-- Check SERIAL and BIGSERIAL pseudo-types
+CREATE GLOBAL TEMP TABLE global_temp_table ( aid BIGSERIAL, bid SERIAL );
+CREATE SEQUENCE test_sequence;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+ aid | bid
+-----+-----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+SELECT NEXTVAL( 'test_sequence' );
+ nextval
+---------
+ 1
+(1 row)
+
+\c
+SELECT * FROM global_temp_table;
+ aid | bid
+-----+-----
+(0 rows)
+
+SELECT NEXTVAL( 'test_sequence' );
+ nextval
+---------
+ 2
+(1 row)
+
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+ aid | bid
+-----+-----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+SELECT NEXTVAL( 'test_sequence' );
+ nextval
+---------
+ 3
+(1 row)
+
+DROP TABLE global_temp_table;
+DROP SEQUENCE test_sequence;
diff --git a/src/test/regress/expected/session_table.out b/src/test/regress/expected/session_table.out
new file mode 100644
index 0000000..1b9b3f4
--- /dev/null
+++ b/src/test/regress/expected/session_table.out
@@ -0,0 +1,64 @@
+create session table my_private_table(x integer primary key, y integer);
+insert into my_private_table values (generate_series(1,10000), generate_series(1,10000));
+select count(*) from my_private_table;
+ count
+-------
+ 10000
+(1 row)
+
+\c
+select count(*) from my_private_table;
+ count
+-------
+ 0
+(1 row)
+
+select * from my_private_table where x=10001;
+ x | y
+---+---
+(0 rows)
+
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+create index on my_private_table(y);
+select * from my_private_table where x=10001;
+ x | y
+-------+-------
+ 10001 | 10001
+(1 row)
+
+select * from my_private_table where y=10001;
+ x | y
+-------+-------
+ 10001 | 10001
+(1 row)
+
+select count(*) from my_private_table;
+ count
+--------
+ 100000
+(1 row)
+
+\c
+select * from my_private_table where x=100001;
+ x | y
+---+---
+(0 rows)
+
+select * from my_private_table order by y desc limit 1;
+ x | y
+---+---
+(0 rows)
+
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+select * from my_private_table where x=100001;
+ x | y
+---+---
+(0 rows)
+
+select * from my_private_table order by y desc limit 1;
+ x | y
+--------+--------
+ 100000 | 100000
+(1 row)
+
+drop table my_private_table;
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index fc0f141..507cf7d 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -107,7 +107,7 @@ test: json jsonb json_encoding jsonpath jsonpath_encoding jsonb_jsonpath
# NB: temp.sql does a reconnect which transiently uses 2 connections,
# so keep this parallel group to at most 19 tests
# ----------
-test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion truncate alter_table sequence polymorphism rowtypes returning largeobject with xml
+test: plancache limit plpgsql copy2 temp global_temp session_table domain rangefuncs prepare conversion truncate alter_table sequence polymorphism rowtypes returning largeobject with xml
# ----------
# Another group of parallel tests
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 68ac56a..3890777 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -172,6 +172,8 @@ test: limit
test: plpgsql
test: copy2
test: temp
+test: global_temp
+test: session_table
test: domain
test: rangefuncs
test: prepare
diff --git a/src/test/regress/sql/global_temp.sql b/src/test/regress/sql/global_temp.sql
new file mode 100644
index 0000000..3058b9b
--- /dev/null
+++ b/src/test/regress/sql/global_temp.sql
@@ -0,0 +1,151 @@
+--
+-- GLOBAL TEMP
+-- Test global temp relations
+--
+
+-- Test ON COMMIT DELETE ROWS
+
+CREATE GLOBAL TEMP TABLE global_temptest(col int) ON COMMIT DELETE ROWS;
+
+BEGIN;
+INSERT INTO global_temptest VALUES (1);
+INSERT INTO global_temptest VALUES (2);
+
+SELECT * FROM global_temptest;
+COMMIT;
+
+SELECT * FROM global_temptest;
+
+DROP TABLE global_temptest;
+
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
+
+SELECT * FROM global_temptest;
+COMMIT;
+
+SELECT * FROM global_temptest;
+
+DROP TABLE global_temptest;
+
+-- Test foreign keys
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest1(col int PRIMARY KEY);
+CREATE GLOBAL TEMP TABLE global_temptest2(col int REFERENCES global_temptest1)
+ ON COMMIT DELETE ROWS;
+INSERT INTO global_temptest1 VALUES (1);
+INSERT INTO global_temptest2 VALUES (1);
+COMMIT;
+SELECT * FROM global_temptest1;
+SELECT * FROM global_temptest2;
+
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temptest4(col int REFERENCES global_temptest3);
+COMMIT;
+
+-- For partitioned temp tables, ON COMMIT actions ignore storage-less
+-- partitioned tables.
+BEGIN;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit (a int)
+ PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit_1
+ PARTITION OF temp_parted_oncommit
+ FOR VALUES IN (1) ON COMMIT DELETE ROWS;
+INSERT INTO temp_parted_oncommit VALUES (1);
+COMMIT;
+-- partitions are emptied by the previous commit
+SELECT * FROM temp_parted_oncommit;
+DROP TABLE temp_parted_oncommit;
+
+-- Using ON COMMIT DELETE on a partitioned table does not remove
+-- all rows if partitions preserve their data.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test (a int)
+ PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test1
+ PARTITION OF global_temp_parted_oncommit_test
+ FOR VALUES IN (1) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_parted_oncommit_test VALUES (1);
+COMMIT;
+-- Data from the remaining partition is still here as its rows are
+-- preserved.
+SELECT * FROM global_temp_parted_oncommit_test;
+-- two relations remain in this case.
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_parted_oncommit_test%';
+DROP TABLE global_temp_parted_oncommit_test;
+
+-- Check dependencies between ON COMMIT actions with inheritance trees.
+-- Data on the parent is removed, and the child goes away.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test1 ()
+ INHERITS(global_temp_inh_oncommit_test) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_inh_oncommit_test1 VALUES (1);
+INSERT INTO global_temp_inh_oncommit_test VALUES (1);
+COMMIT;
+SELECT * FROM global_temp_inh_oncommit_test;
+-- two relations remain
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_inh_oncommit_test%';
+DROP TABLE global_temp_inh_oncommit_test1;
+DROP TABLE global_temp_inh_oncommit_test;
+
+-- Global temp table cannot inherit from temporary relation
+BEGIN;
+CREATE TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_table1 ()
+ INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+ROLLBACK;
+
+-- Temp table can inherit from global temporary relation
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE TEMP TABLE temp_table1 ()
+ INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+CREATE TEMP TABLE temp_table2 ()
+ INHERITS(global_temp_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO global_temp_table VALUES (0);
+SELECT * FROM global_temp_table;
+COMMIT;
+SELECT * FROM global_temp_table;
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE global_temp_table;
+
+-- Global temp table can inherit from normal relation
+BEGIN;
+CREATE TABLE normal_table (a int);
+CREATE GLOBAL TEMP TABLE temp_table1 ()
+ INHERITS(normal_table) ON COMMIT PRESERVE ROWS;
+CREATE GLOBAL TEMP TABLE temp_table2 ()
+ INHERITS(normal_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO normal_table VALUES (0);
+SELECT * FROM normal_table;
+COMMIT;
+SELECT * FROM normal_table;
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE normal_table;
+
+-- Check SERIAL and BIGSERIAL pseudo-types
+CREATE GLOBAL TEMP TABLE global_temp_table ( aid BIGSERIAL, bid SERIAL );
+CREATE SEQUENCE test_sequence;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+SELECT NEXTVAL( 'test_sequence' );
+\c
+SELECT * FROM global_temp_table;
+SELECT NEXTVAL( 'test_sequence' );
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+SELECT NEXTVAL( 'test_sequence' );
+DROP TABLE global_temp_table;
+DROP SEQUENCE test_sequence;
diff --git a/src/test/regress/sql/session_table.sql b/src/test/regress/sql/session_table.sql
new file mode 100644
index 0000000..c6663dc
--- /dev/null
+++ b/src/test/regress/sql/session_table.sql
@@ -0,0 +1,18 @@
+create session table my_private_table(x integer primary key, y integer);
+insert into my_private_table values (generate_series(1,10000), generate_series(1,10000));
+select count(*) from my_private_table;
+\c
+select count(*) from my_private_table;
+select * from my_private_table where x=10001;
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+create index on my_private_table(y);
+select * from my_private_table where x=10001;
+select * from my_private_table where y=10001;
+select count(*) from my_private_table;
+\c
+select * from my_private_table where x=100001;
+select * from my_private_table order by y desc limit 1;
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+select * from my_private_table where x=100001;
+select * from my_private_table order by y desc limit 1;
+drop table my_private_table;