diff --git a/src/backend/access/common/tidstore.c b/src/backend/access/common/tidstore.c
index 745393806d..be7a5c9c00 100644
--- a/src/backend/access/common/tidstore.c
+++ b/src/backend/access/common/tidstore.c
@@ -119,7 +119,7 @@ static void tidstore_iter_extract_tids(TidStoreIter *iter, BlockNumber blkno,
  * The returned object is allocated in backend-local memory.
  */
 TidStore *
-TidStoreCreate(size_t max_bytes, dsa_area *area, int tranche_id)
+TidStoreCreate(size_t max_bytes, bool shared, int tranche_id)
 {
 	TidStore   *ts;
 	size_t		initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
@@ -143,8 +143,13 @@ TidStoreCreate(size_t max_bytes, dsa_area *area, int tranche_id)
 										   initBlockSize,
 										   maxBlockSize);
 
-	if (area != NULL)
+	if (shared)
 	{
+		dsa_area *area;
+
+		/* XXX: set initial and max segment sizes */
+		area = dsa_create(tranche_id);
+
 		ts->tree.shared = shared_ts_create(ts->rt_context, area,
 										   tranche_id);
 		ts->area = area;
@@ -160,16 +165,19 @@ TidStoreCreate(size_t max_bytes, dsa_area *area, int tranche_id)
  * is allocated in backend-local memory using the CurrentMemoryContext.
  */
 TidStore *
-TidStoreAttach(dsa_area *area, dsa_pointer handle)
+TidStoreAttach(dsa_handle area_handle, dsa_pointer handle)
 {
 	TidStore   *ts;
+	dsa_area *area;
 
-	Assert(area != NULL);
+	Assert(area_handle != DSA_HANDLE_INVALID);
 	Assert(DsaPointerIsValid(handle));
 
 	/* create per-backend state */
 	ts = palloc0(sizeof(TidStore));
 
+	area = dsa_attach(area_handle);
+
 	/* Find the shared the shared radix tree */
 	ts->tree.shared = shared_ts_attach(area, handle);
 	ts->area = area;
@@ -189,6 +197,8 @@ TidStoreDetach(TidStore *ts)
 	Assert(TidStoreIsShared(ts));
 
 	shared_ts_detach(ts->tree.shared);
+	dsa_detach(ts->area);
+
 	pfree(ts);
 }
 
@@ -234,7 +244,10 @@ TidStoreDestroy(TidStore *ts)
 {
 	/* Destroy underlying radix tree */
 	if (TidStoreIsShared(ts))
+	{
 		shared_ts_free(ts->tree.shared);
+		dsa_detach(ts->area);
+	}
 	else
 		local_ts_free(ts->tree.local);
 
@@ -420,6 +433,17 @@ TidStoreMemoryUsage(TidStore *ts)
 		return local_ts_memory_usage(ts->tree.local);
 }
 
+/*
+ * Return the DSA area where the TidStore lives.
+ */
+dsa_area *
+TidStoreGetDSA(TidStore *ts)
+{
+	Assert(TidStoreIsShared(ts));
+
+	return ts->area;
+}
+
 dsa_pointer
 TidStoreGetHandle(TidStore *ts)
 {
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 6c025b609c..f72a68aa52 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -3155,7 +3155,7 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
 	}
 
 	/* Serial VACUUM case */
-	vacrel->dead_items = TidStoreCreate(vac_work_mem, NULL, 0);
+	vacrel->dead_items = TidStoreCreate(vac_work_mem, false, 0);
 
 	dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
 	dead_items_info->max_bytes = vac_work_mem;
@@ -3197,7 +3197,7 @@ dead_items_reset(LVRelState *vacrel)
 	/* Recreate the tidstore with the same max_bytes limitation */
 	TidStoreDestroy(dead_items);
 	vacrel->dead_items = TidStoreCreate(vacrel->dead_items_info->max_bytes,
-										NULL, 0);
+										false, 0);
 
 	/* Reset the counter */
 	vacrel->dead_items_info->num_items = 0;
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 23a4fc6c58..2b7667c7ef 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -45,7 +45,7 @@
  * use small integers.
  */
 #define PARALLEL_VACUUM_KEY_SHARED			1
-#define PARALLEL_VACUUM_KEY_DEAD_ITEMS		2
+#define PARALLEL_VACUUM_KEY_DEAD_ITEMS		2 /* XXX unused */
 #define PARALLEL_VACUUM_KEY_QUERY_TEXT		3
 #define PARALLEL_VACUUM_KEY_BUFFER_USAGE	4
 #define PARALLEL_VACUUM_KEY_WAL_USAGE		5
@@ -111,6 +111,9 @@ typedef struct PVShared
 	/* Counter for vacuuming and cleanup */
 	pg_atomic_uint32 idx;
 
+	/* DSA handle where the TidStore lives */
+	dsa_handle	dead_items_dsa_handle;
+
 	/* DSA pointer to the shared TidStore */
 	dsa_pointer dead_items_handle;
 
@@ -183,7 +186,6 @@ struct ParallelVacuumState
 
 	/* Shared dead items space among parallel vacuum workers */
 	TidStore   *dead_items;
-	dsa_area   *dead_items_area;
 
 	/* Points to buffer usage area in DSM */
 	BufferUsage *buffer_usage;
@@ -250,7 +252,6 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 	BufferUsage *buffer_usage;
 	WalUsage   *wal_usage;
 	void	   *area_space;
-	dsa_area   *dead_items_dsa;
 	bool	   *will_parallel_vacuum;
 	Size		est_indstats_len;
 	Size		est_shared_len;
@@ -373,13 +374,9 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 	/* Prepare DSA space for dead items */
 	area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
 	shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_ITEMS, area_space);
-	dead_items_dsa = dsa_create_in_place(area_space, dsa_minsize,
-										 LWTRANCHE_PARALLEL_VACUUM_DSA,
-										 pcxt->seg);
-	dead_items = TidStoreCreate(vac_work_mem, dead_items_dsa,
+	dead_items = TidStoreCreate(vac_work_mem, true,
 								LWTRANCHE_PARALLEL_VACUUM_DSA);
 	pvs->dead_items = dead_items;
-	pvs->dead_items_area = dead_items_dsa;
 
 	/* Prepare shared information */
 	shared = (PVShared *) shm_toc_allocate(pcxt->toc, est_shared_len);
@@ -390,6 +387,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 		(nindexes_mwm > 0) ?
 		maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
 		maintenance_work_mem;
+	shared->dead_items_dsa_handle = dsa_get_handle(TidStoreGetDSA(dead_items));
 	shared->dead_items_handle = TidStoreGetHandle(dead_items);
 	shared->dead_items_info.max_bytes = vac_work_mem;
 
@@ -461,7 +459,6 @@ parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)
 	}
 
 	TidStoreDestroy(pvs->dead_items);
-	dsa_detach(pvs->dead_items_area);
 
 	DestroyParallelContext(pvs->pcxt);
 	ExitParallelMode();
@@ -493,11 +490,12 @@ parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)
 	 * limitation we just used.
 	 */
 	TidStoreDestroy(dead_items);
-	dsa_trim(pvs->dead_items_area);
-	pvs->dead_items = TidStoreCreate(dead_items_info->max_bytes, pvs->dead_items_area,
+	pvs->dead_items = TidStoreCreate(dead_items_info->max_bytes,
+									 true,
 									 LWTRANCHE_PARALLEL_VACUUM_DSA);
 
 	/* Update the DSA pointer for dead_items to the new one */
+	pvs->shared->dead_items_dsa_handle = dsa_get_handle(TidStoreGetDSA(dead_items));
 	pvs->shared->dead_items_handle = TidStoreGetHandle(dead_items);
 
 	/* Reset the counter */
@@ -1005,8 +1003,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	PVIndStats *indstats;
 	PVShared   *shared;
 	TidStore   *dead_items;
-	void	   *area_space;
-	dsa_area   *dead_items_area;
+	void	   *area_space; /* XXX: unused */
 	BufferUsage *buffer_usage;
 	WalUsage   *wal_usage;
 	int			nindexes;
@@ -1052,8 +1049,8 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 
 	/* Set dead items */
 	area_space = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_DEAD_ITEMS, false);
-	dead_items_area = dsa_attach_in_place(area_space, seg);
-	dead_items = TidStoreAttach(dead_items_area, shared->dead_items_handle);
+	dead_items = TidStoreAttach(shared->dead_items_dsa_handle,
+								shared->dead_items_handle);
 
 	/* Set cost-based vacuum delay */
 	VacuumUpdateCosts();
@@ -1102,7 +1099,6 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 						  &wal_usage[ParallelWorkerNumber]);
 
 	TidStoreDetach(dead_items);
-	dsa_detach(dead_items_area);
 
 	/* Pop the error context stack */
 	error_context_stack = errcallback.previous;
diff --git a/src/include/access/tidstore.h b/src/include/access/tidstore.h
index 8cf4e94f12..d91898d60f 100644
--- a/src/include/access/tidstore.h
+++ b/src/include/access/tidstore.h
@@ -29,9 +29,10 @@ typedef struct TidStoreIterResult
 	OffsetNumber *offsets;
 } TidStoreIterResult;
 
-extern TidStore *TidStoreCreate(size_t max_bytes, dsa_area *dsa,
+extern TidStore *TidStoreCreate(size_t max_bytes, bool shared,//dsa_area *dsa,
 								int tranche_id);
-extern TidStore *TidStoreAttach(dsa_area *dsa, dsa_pointer rt_dp);
+//extern TidStore *TidStoreAttach(dsa_area *dsa, dsa_pointer rt_dp);
+extern TidStore *TidStoreAttach(dsa_handle dsa_handle, dsa_pointer rt_dp);
 extern void TidStoreDetach(TidStore *ts);
 extern void TidStoreLockExclusive(TidStore *ts);
 extern void TidStoreLockShare(TidStore *ts);
@@ -45,5 +46,6 @@ extern TidStoreIterResult *TidStoreIterateNext(TidStoreIter *iter);
 extern void TidStoreEndIterate(TidStoreIter *iter);
 extern size_t TidStoreMemoryUsage(TidStore *ts);
 extern dsa_pointer TidStoreGetHandle(TidStore *ts);
+extern dsa_area *TidStoreGetDSA(TidStore *ts);
 
 #endif							/* TIDSTORE_H */
diff --git a/src/test/modules/test_tidstore/test_tidstore.c b/src/test/modules/test_tidstore/test_tidstore.c
index c74ad2cf8b..8a15f2646b 100644
--- a/src/test/modules/test_tidstore/test_tidstore.c
+++ b/src/test/modules/test_tidstore/test_tidstore.c
@@ -34,7 +34,6 @@ PG_FUNCTION_INFO_V1(test_is_full);
 PG_FUNCTION_INFO_V1(test_destroy);
 
 static TidStore *tidstore = NULL;
-static dsa_area *dsa = NULL;
 static size_t tidstore_empty_size;
 
 /* array for verification of some tests */
@@ -94,7 +93,6 @@ test_create(PG_FUNCTION_ARGS)
 	size_t		array_init_size = 1024;
 
 	Assert(tidstore == NULL);
-	Assert(dsa == NULL);
 
 	/*
 	 * Create the TidStore on TopMemoryContext so that the same process use it
@@ -109,15 +107,13 @@ test_create(PG_FUNCTION_ARGS)
 		tranche_id = LWLockNewTrancheId();
 		LWLockRegisterTranche(tranche_id, "test_tidstore");
 
-		dsa = dsa_create(tranche_id);
+		tidstore = TidStoreCreate(tidstore_max_size, true, tranche_id);
 
 		/*
 		 * Remain attached until end of backend or explicitly detached so that
 		 * the same process use the tidstore for subsequent tests.
 		 */
-		dsa_pin_mapping(dsa);
-
-		tidstore = TidStoreCreate(tidstore_max_size, dsa, tranche_id);
+		dsa_pin_mapping(TidStoreGetDSA(tidstore));
 	}
 	else
 		tidstore = TidStoreCreate(tidstore_max_size, NULL, 0);
@@ -309,9 +305,5 @@ test_destroy(PG_FUNCTION_ARGS)
 	pfree(items.lookup_tids);
 	pfree(items.iter_tids);
 
-	if (dsa)
-		dsa_detach(dsa);
-	dsa = NULL;
-
 	PG_RETURN_VOID();
 }
