Hi, Some time ago I noticed that every buffer table entry is quite large at 40 bytes (+8): 16 bytes of HASHELEMENT header (of which the last 4 bytes are padding), 20 bytes of BufferTag, and 4 bytes for the offset into the shared buffers array, with generally 8 more bytes used for the bucket pointers. (32-bit systems: 32 (+4) bytes)
Does anyone know why we must have the buffer tag in the buffer table? It seems to me we can follow the offset pointer into the shared BufferDesc array whenever we find out we need to compare the tags (as opposed to just the hash, which is stored and present in HASHELEMENT). If we decide to just follow the pointer, we can immediately shave 16 bytes (40%) off the lookup table's per-element size, or 24 if we pack the 4-byte shared buffer offset into the unused bytes in HASHELEMENT, reducing the memory usage of that hash table by ~50%: We'd have 16 bytes for every ELEMENT+shared_buffer_offset, plus 8 bytes for every bucket pointer (of which there are approximately as many as there are elements), resulting in 24 bytes /max_alloc elements. (This was also discussed on Discord in the Hackers Mentoring server, over at [0]) Together that results in the following prototype patchset. 0001 adds the ability for dynahash users to opt in to using the 4-byte alignment hole in HASHELEMENT (by providing size- and alignment info that dynahash uses to partially move the entry into the alignment hole), 0002 uses that feature to get the per-element size of the buffer lookup hash table down to 16 bytes (+8B for bucket pointers), or 12 (+4) on 32-bit systems An alternative approach to current patch 1 (which introduces "element data offset" to determine where to start looking for the key) would be to add an option to allow "0-length" keys/entries when there is alignment space, and make the hash/compare functions handle writing/reading of key data (thus removing the new data dependencies in the hash lookup function), but I'm not sure that's a winning idea as that requires the user of the API to have knowledge about the internals of dynahash, rather than dynahash internally optimizing usage based on a clearer picture of what the hash entry needs. Does anyone have an idea on how to best benchmark this kind of patch, apart from "running pgbench"? Other ideas on how to improve this? Specific concerns? Kind regards, Matthias van de Meent [0] https://discord.com/channels/1258108670710124574/1318997914777026580
From be1923e29a0ca53adae8853f7ca7b4534fa58258 Mon Sep 17 00:00:00 2001 From: Matthias van de Meent <boekewurm+postgres@gmail.com> Date: Fri, 20 Dec 2024 18:38:52 +0100 Subject: [PATCH v0 2/2] Buftable: Reduce size of buffer table entries by 60% By using the stored id as identifier of which BufferDesc we point to and thus which BufferTag we need to compare against, we can remove that BufferTag from the BufferLookupEnt. On 32-bit systems, that reduces the per-element size from 32 bytes to 12 bytes, a 62% reduction. On 64-bit systems we additionally fit the now 4-byte entry data into the empty space of the hash table element, thus reducing the per-element size down to 16 bytes from 40 bytes; a 60% reduction. --- src/include/storage/buf_internals.h | 2 +- src/backend/storage/buffer/buf_table.c | 112 ++++++++++++++++++++++--- src/backend/storage/buffer/bufmgr.c | 4 +- 3 files changed, 105 insertions(+), 13 deletions(-) diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index 1a65342177d..db752bf8368 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -439,7 +439,7 @@ extern void InitBufTable(int size); extern uint32 BufTableHashCode(BufferTag *tagPtr); extern int BufTableLookup(BufferTag *tagPtr, uint32 hashcode); extern int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id); -extern void BufTableDelete(BufferTag *tagPtr, uint32 hashcode); +extern void BufTableDelete(int buffer, uint32 hashcode); /* localbuf.c */ extern bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount); diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c index a50955d5286..0cda2652807 100644 --- a/src/backend/storage/buffer/buf_table.c +++ b/src/backend/storage/buffer/buf_table.c @@ -22,16 +22,90 @@ #include "postgres.h" #include "storage/buf_internals.h" +#include "common/hashfn.h" /* entry for buffer lookup hashtable */ typedef struct { - BufferTag key; /* Tag of a disk page */ - int id; /* Associated buffer ID */ + int id; /* Buffer offset into BufferDescriptors */ } BufferLookupEnt; +/* + * We don't store Buffer in BufTable, but offsets into BufferDescriptors, so + * we must use our own InvalidBuffer equivalent. + */ +#define SurrogateBuffer (-1) + static HTAB *SharedBufHash; +static BufferTag *MyBackendBufLookup; + +static inline BufferTag * +BufTableIdToBufTag(int id) +{ + if (id == SurrogateBuffer) + { + Assert(PointerIsValid(MyBackendBufLookup)); + return MyBackendBufLookup; + } + else + { + BufferDesc *buf = GetBufferDescriptor(id); + /* can't be in the buffer table if the tag ain't valid */ + Assert(pg_atomic_read_u32(&buf->state) & BM_TAG_VALID); + return &buf->tag; + } +} + +static uint32 +BufTableHashValue(const void *key, Size keysize) +{ + int id = *(int *) key; + + /* + * Key hashes are stored, and are provided during search/insert/delete + * operations, so the only opportunity for this to happen is the hash + * table doing hash operations (which we don't want to happen). Therefore, + * assert we don't get called, but correctly handle other operations. + */ + Assert(false); + + if (id == SurrogateBuffer) + { + if (MyBackendBufLookup == NULL) + { + elog(ERROR, "Surrogate buffer ID should have a lookup buffer tag set aside"); + } + + return tag_hash(MyBackendBufLookup, sizeof(BufferTag)); + } + + return tag_hash(BufTableIdToBufTag(id), keysize); +} + +static int +BufTableHashCompare(const void *key1, const void *key2, Size keysize) +{ + int id1 = *(int *) key1; + int id2 = *(int *) key2; + + /* + * Hash table searches always provide existing entries as argument 1; + * and we can't have a missing ID as argument. + */ + Assert(id1 != SurrogateBuffer); + + if (id1 == id2) + return 0; + + /* we're looking for exactly this buffer ID */ + if (id2 != SurrogateBuffer) + return false; + + return memcmp(BufTableIdToBufTag(id1), + BufTableIdToBufTag(id2), + sizeof(BufferTag)); +} /* * Estimate space needed for mapping hashtable @@ -40,7 +114,9 @@ static HTAB *SharedBufHash; Size BufTableShmemSize(int size) { - return hash_estimate_size(size, sizeof(BufferLookupEnt)); + return hash_estimate_size_aligned(size, + sizeof(BufferLookupEnt), + ALIGNOF_INT); } /* @@ -55,14 +131,18 @@ InitBufTable(int size) /* assume no locking is needed yet */ /* BufferTag maps to Buffer */ - info.keysize = sizeof(BufferTag); + info.keysize = sizeof(BufferLookupEnt); info.entrysize = sizeof(BufferLookupEnt); + info.entryalign = ALIGNOF_INT; info.num_partitions = NUM_BUFFER_PARTITIONS; + info.hash = BufTableHashValue; + info.match = BufTableHashCompare; SharedBufHash = ShmemInitHash("Shared Buffer Lookup Table", size, size, &info, - HASH_ELEM | HASH_BLOBS | HASH_PARTITION); + HASH_ELEM | HASH_PARTITION | HASH_FUNCTION + | HASH_COMPARE | HASH_ALIGN); } /* @@ -77,7 +157,7 @@ InitBufTable(int size) uint32 BufTableHashCode(BufferTag *tagPtr) { - return get_hash_value(SharedBufHash, tagPtr); + return tag_hash(tagPtr, sizeof(BufferTag)); } /* @@ -89,15 +169,19 @@ BufTableHashCode(BufferTag *tagPtr) int BufTableLookup(BufferTag *tagPtr, uint32 hashcode) { + const int surrogateid = SurrogateBuffer; BufferLookupEnt *result; + MyBackendBufLookup = tagPtr; result = (BufferLookupEnt *) hash_search_with_hash_value(SharedBufHash, - tagPtr, + &surrogateid, hashcode, HASH_FIND, NULL); + MyBackendBufLookup = NULL; + if (!result) return -1; @@ -117,21 +201,29 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode) int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id) { + const int surrogateid = SurrogateBuffer; BufferLookupEnt *result; bool found; Assert(buf_id >= 0); /* -1 is reserved for not-in-table */ Assert(tagPtr->blockNum != P_NEW); /* invalid tag */ + MyBackendBufLookup = tagPtr; + result = (BufferLookupEnt *) hash_search_with_hash_value(SharedBufHash, - tagPtr, + &surrogateid, hashcode, HASH_ENTER, &found); + MyBackendBufLookup = NULL; + if (found) /* found something already in the table */ + { + Assert(result->id != SurrogateBuffer); return result->id; + } result->id = buf_id; @@ -145,13 +237,13 @@ BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id) * Caller must hold exclusive lock on BufMappingLock for tag's partition */ void -BufTableDelete(BufferTag *tagPtr, uint32 hashcode) +BufTableDelete(int buffer, uint32 hashcode) { BufferLookupEnt *result; result = (BufferLookupEnt *) hash_search_with_hash_value(SharedBufHash, - tagPtr, + &buffer, hashcode, HASH_REMOVE, NULL); diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 5008641baff..25398467a2b 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -1856,7 +1856,7 @@ retry: * Remove the buffer from the lookup hashtable, if it was in there. */ if (oldFlags & BM_TAG_VALID) - BufTableDelete(&oldTag, oldHash); + BufTableDelete(buf->buf_id, oldHash); /* * Done with mapping lock. @@ -1935,7 +1935,7 @@ InvalidateVictimBuffer(BufferDesc *buf_hdr) Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0); /* finally delete buffer from the buffer mapping table */ - BufTableDelete(&tag, hash); + BufTableDelete(buf_hdr->buf_id, hash); LWLockRelease(partition_lock); -- 2.45.2
From 3f4cd31478d3e725ab3900eac8c9e7f8982727c5 Mon Sep 17 00:00:00 2001 From: Matthias van de Meent <boekewurm+postgres@gmail.com> Date: Wed, 18 Dec 2024 16:43:27 +0100 Subject: [PATCH v0 1/2] Dynahash: Allow improved packing of hash elements On 64-bit builds, there is a 4-byte gap in each Element, which is wasteful when the entries align to 4 bytes, too. This adds APIs to indicate entries align by 4 bytes, thus reducing the space wastage by some bytes per entry (when the alignment works out). This will be used in future commits to reduce the size of each buf_table element by 24 bytes down to 16 bytes, a reduction of 60%. --- src/include/utils/hsearch.h | 10 +++- src/backend/utils/hash/dynahash.c | 90 ++++++++++++++++++++++++------- 2 files changed, 79 insertions(+), 21 deletions(-) diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index 932cc4f34d9..0856e1209f3 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -74,6 +74,9 @@ typedef struct HASHCTL /* Used if HASH_ELEM flag is set (which is now required): */ Size keysize; /* hash key length in bytes */ Size entrysize; /* total user element size in bytes */ + /* Used if HASH_ALIGN flag is set: */ + int entryalign; /* alignment of entries; either ALIGNOF_INT or + * MAXIMUM_ALIGNOF */ /* Used if HASH_FUNCTION flag is set: */ HashValueFunc hash; /* hash function */ /* Used if HASH_COMPARE flag is set: */ @@ -103,6 +106,7 @@ typedef struct HASHCTL #define HASH_SHARED_MEM 0x0800 /* Hashtable is in shared memory */ #define HASH_ATTACH 0x1000 /* Do not initialize hctl */ #define HASH_FIXED_SIZE 0x2000 /* Initial size is a hard limit */ +#define HASH_ALIGN 0x4000 /* Pack-align elements */ /* max_dsize value to indicate expansible directory */ #define NO_MAX_DSIZE (-1) @@ -149,7 +153,11 @@ extern void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, extern void *hash_seq_search(HASH_SEQ_STATUS *status); extern void hash_seq_term(HASH_SEQ_STATUS *status); extern void hash_freeze(HTAB *hashp); -extern Size hash_estimate_size(long num_entries, Size entrysize); + +#define hash_estimate_size(num_entries, size) \ + hash_estimate_size_aligned(num_entries, size, MAXIMUM_ALIGNOF) +extern Size hash_estimate_size_aligned(long num_entries, Size entrysize, + Size entryalign); extern long hash_select_dirsize(long num_entries); extern Size hash_get_shared_size(HASHCTL *info, int flags); extern void AtEOXact_HashTables(bool isCommit); diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index cd5a00132fc..f74c3ba0682 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -190,6 +190,10 @@ struct HASHHDR /* These fields are fixed at hashtable creation */ Size keysize; /* hash key length in bytes */ Size entrysize; /* total user element size in bytes */ + int entryalign; /* offset of user data into element, to pack + * data tightly */ + int entryoffset; /* offset of entry into element, may point + * into the alignment gap in HASHELEMENT */ long num_partitions; /* # partitions (must be power of 2), or 0 */ long max_dsize; /* 'dsize' limit if directory is fixed size */ long ssize; /* segment size --- must be power of 2 */ @@ -234,6 +238,7 @@ struct HTAB /* We keep local copies of these fixed values to reduce contention */ Size keysize; /* hash key length in bytes */ + Size keyoff; /* key start offset off the base entry pointer */ long ssize; /* segment size --- must be power of 2 */ int sshift; /* segment shift = log2(ssize) */ }; @@ -241,7 +246,7 @@ struct HTAB /* * Key (also entry) part of a HASHELEMENT */ -#define ELEMENTKEY(helem) (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT))) +#define ELEMENTKEY(helem, off) (((char *)(helem)) + off) /* * Obtain element pointer given pointer to key @@ -270,7 +275,7 @@ static bool dir_realloc(HTAB *hashp); static bool expand_table(HTAB *hashp); static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx); static void hdefault(HTAB *hashp); -static int choose_nelem_alloc(Size entrysize); +static int choose_nelem_alloc(Size entrysize, Size entryalign); static bool init_htab(HTAB *hashp, long nelem); static void hash_corrupted(HTAB *hashp) pg_attribute_noreturn(); static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue, @@ -361,6 +366,8 @@ hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags) Assert(flags & HASH_ELEM); Assert(info->keysize > 0); Assert(info->entrysize >= info->keysize); + Assert(flags & (~HASH_ALIGN) || info->entryalign == ALIGNOF_INT || + info->entryalign == MAXIMUM_ALIGNOF); /* * For shared hash tables, we have a local hash header (HTAB struct) that @@ -558,8 +565,37 @@ hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags) hctl->keysize = info->keysize; hctl->entrysize = info->entrysize; + if (flags & HASH_ALIGN) + { + if (info->entryalign != MAXIMUM_ALIGNOF && + info->entryalign != ALIGNOF_INT) + { + elog(ERROR, "Invalid hash table entry alignment %d", + info->entryalign); + } + + hctl->entryalign = info->entryalign; + + if (info->entryalign == MAXIMUM_ALIGNOF) + hctl->entryoffset = sizeof(HASHELEMENT); + else if (info->entryalign == ALIGNOF_INT) + { + const Size startOfGap = offsetof(HASHELEMENT, hashvalue) + sizeof(uint32); + Size aligned = TYPEALIGN(ALIGNOF_INT, startOfGap); + + hctl->entryoffset = aligned; + } + } + else + { + hctl->entryalign = MAXIMUM_ALIGNOF; + hctl->entryoffset = sizeof(HASHELEMENT); + } + Assert(TYPEALIGN(hctl->entryalign, hctl->entryoffset) == hctl->entryoffset); + /* make local copies of heavily-used constant fields */ hashp->keysize = hctl->keysize; + hashp->keyoff = hctl->entryoffset; hashp->ssize = hctl->ssize; hashp->sshift = hctl->sshift; @@ -653,15 +689,19 @@ hdefault(HTAB *hashp) * elements to add to the hash table when we need more. */ static int -choose_nelem_alloc(Size entrysize) +choose_nelem_alloc(Size entrysize, Size entryalign) { int nelem_alloc; Size elementSize; Size allocSize; + Assert(entryalign == 4 || entryalign == MAXIMUM_ALIGNOF); /* Each element has a HASHELEMENT header plus user data. */ /* NB: this had better match element_alloc() */ - elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize); + elementSize = offsetof(HASHELEMENT, hashvalue) + sizeof(uint32); + elementSize = TYPEALIGN(entryalign, elementSize); + + elementSize = MAXALIGN(elementSize + entrysize); /* * The idea here is to choose nelem_alloc at least 32, but round up so @@ -756,7 +796,7 @@ init_htab(HTAB *hashp, long nelem) } /* Choose number of entries to allocate at a time */ - hctl->nelem_alloc = choose_nelem_alloc(hctl->entrysize); + hctl->nelem_alloc = choose_nelem_alloc(hctl->entrysize, hctl->entryalign); #ifdef HASH_DEBUG fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n", @@ -772,6 +812,8 @@ init_htab(HTAB *hashp, long nelem) return true; } + + /* * Estimate the space needed for a hashtable containing the given number * of entries of given size. @@ -780,7 +822,7 @@ init_htab(HTAB *hashp, long nelem) * NB: assumes that all hash structure parameters have default values! */ Size -hash_estimate_size(long num_entries, Size entrysize) +hash_estimate_size_aligned(long num_entries, Size entrysize, Size entryalign) { Size size; long nBuckets, @@ -807,9 +849,12 @@ hash_estimate_size(long num_entries, Size entrysize) size = add_size(size, mul_size(nSegments, MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET)))); /* elements --- allocated in groups of choose_nelem_alloc() entries */ - elementAllocCnt = choose_nelem_alloc(entrysize); + elementAllocCnt = choose_nelem_alloc(entrysize, entryalign); nElementAllocs = (num_entries - 1) / elementAllocCnt + 1; - elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize); + + elementSize = offsetof(HASHELEMENT, hashvalue) + sizeof(uint32); + elementSize = TYPEALIGN(entryalign, elementSize); + elementSize = MAXALIGN(elementSize + entrysize); size = add_size(size, mul_size(nElementAllocs, mul_size(elementAllocCnt, elementSize))); @@ -974,6 +1019,7 @@ hash_search_with_hash_value(HTAB *hashp, HASHHDR *hctl = hashp->hctl; int freelist_idx = FREELIST_IDX(hctl, hashvalue); Size keysize; + Size keyoff; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; HashCompareFunc match; @@ -1014,11 +1060,12 @@ hash_search_with_hash_value(HTAB *hashp, */ match = hashp->match; /* save one fetch in inner loop */ keysize = hashp->keysize; /* ditto */ + keyoff = hashp->keyoff; /* ditto */ while (currBucket != NULL) { if (currBucket->hashvalue == hashvalue && - match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) + match(ELEMENTKEY(currBucket, keyoff), keyPtr, keysize) == 0) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; @@ -1038,7 +1085,7 @@ hash_search_with_hash_value(HTAB *hashp, { case HASH_FIND: if (currBucket != NULL) - return ELEMENTKEY(currBucket); + return ELEMENTKEY(currBucket, keyoff); return NULL; case HASH_REMOVE: @@ -1067,7 +1114,7 @@ hash_search_with_hash_value(HTAB *hashp, * element, because someone else is going to reuse it the next * time something is added to the table */ - return ELEMENTKEY(currBucket); + return ELEMENTKEY(currBucket, keyoff); } return NULL; @@ -1075,7 +1122,7 @@ hash_search_with_hash_value(HTAB *hashp, case HASH_ENTER_NULL: /* Return existing element if found, else create one */ if (currBucket != NULL) - return ELEMENTKEY(currBucket); + return ELEMENTKEY(currBucket, keyoff); /* disallow inserts if frozen */ if (hashp->frozen) @@ -1105,7 +1152,7 @@ hash_search_with_hash_value(HTAB *hashp, /* copy key into record */ currBucket->hashvalue = hashvalue; - hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize); + hashp->keycopy(ELEMENTKEY(currBucket, keyoff), keyPtr, keysize); /* * Caller is expected to fill the data field on return. DO NOT @@ -1114,7 +1161,7 @@ hash_search_with_hash_value(HTAB *hashp, * caller's data structure. */ - return ELEMENTKEY(currBucket); + return ELEMENTKEY(currBucket, keyoff); } elog(ERROR, "unrecognized hash action code: %d", (int) action); @@ -1149,6 +1196,7 @@ hash_update_hash_key(HTAB *hashp, HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry); uint32 newhashvalue; Size keysize; + Size keyoff; uint32 bucket; uint32 newbucket; HASHBUCKET currBucket; @@ -1202,11 +1250,12 @@ hash_update_hash_key(HTAB *hashp, */ match = hashp->match; /* save one fetch in inner loop */ keysize = hashp->keysize; /* ditto */ + keyoff = hashp->keyoff; /* ditto */ while (currBucket != NULL) { if (currBucket->hashvalue == newhashvalue && - match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0) + match(ELEMENTKEY(currBucket, keyoff), newKeyPtr, keysize) == 0) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; @@ -1240,7 +1289,7 @@ hash_update_hash_key(HTAB *hashp, /* copy new key into record */ currBucket->hashvalue = newhashvalue; - hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize); + hashp->keycopy(ELEMENTKEY(currBucket, keyoff), newKeyPtr, keysize); /* rest of record is untouched */ @@ -1440,7 +1489,7 @@ hash_seq_search(HASH_SEQ_STATUS *status) status->curEntry = curElem->link; if (status->hashvalue != curElem->hashvalue) continue; - return (void *) ELEMENTKEY(curElem); + return (void *) ELEMENTKEY(curElem, status->hashp->keyoff); } hash_seq_term(status); @@ -1453,7 +1502,7 @@ hash_seq_search(HASH_SEQ_STATUS *status) status->curEntry = curElem->link; if (status->curEntry == NULL) /* end of this bucket */ ++status->curBucket; - return ELEMENTKEY(curElem); + return ELEMENTKEY(curElem, status->hashp->keyoff); } /* @@ -1507,7 +1556,7 @@ hash_seq_search(HASH_SEQ_STATUS *status) if (status->curEntry == NULL) /* end of this bucket */ ++curBucket; status->curBucket = curBucket; - return ELEMENTKEY(curElem); + return ELEMENTKEY(curElem, status->hashp->keyoff); } void @@ -1716,7 +1765,8 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx) return false; /* Each element has a HASHELEMENT header plus user data. */ - elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize); + Assert(hctl->entryoffset >= offsetof(HASHELEMENT, hashvalue) + sizeof(uint32)); + elementSize = MAXALIGN(hctl->entryoffset + hctl->entrysize); CurrentDynaHashCxt = hashp->hcxt; firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize); -- 2.45.2