diff -cpr HEAD/src/backend/commands/vacuumlazy.c aggressive_freeze/src/backend/commands/vacuumlazy.c
*** HEAD/src/backend/commands/vacuumlazy.c	Mon Feb 26 09:46:04 2007
--- aggressive_freeze/src/backend/commands/vacuumlazy.c	Mon Mar  5 19:11:09 2007
*************** lazy_scan_heap(Relation onerel, LVRelSta
*** 220,226 ****
  {
  	BlockNumber nblocks,
  				blkno;
- 	HeapTupleData tuple;
  	char	   *relname;
  	BlockNumber empty_pages,
  				vacuumed_pages;
--- 220,225 ----
*************** lazy_scan_heap(Relation onerel, LVRelSta
*** 260,268 ****
  					maxoff;
  		bool		tupgone,
  					hastup;
! 		int			prev_dead_count;
! 		OffsetNumber frozen[MaxOffsetNumber];
! 		int			nfrozen;
  
  		vacuum_delay_point();
  
--- 259,267 ----
  					maxoff;
  		bool		tupgone,
  					hastup;
! 		int				ndead,
! 						nlive;
! 		OffsetNumber	live_tuples[MaxHeapTuplesPerPage];
  
  		vacuum_delay_point();
  
*************** lazy_scan_heap(Relation onerel, LVRelSta
*** 342,356 ****
  			continue;
  		}
  
- 		nfrozen = 0;
  		hastup = false;
! 		prev_dead_count = vacrelstats->num_dead_tuples;
  		maxoff = PageGetMaxOffsetNumber(page);
  		for (offnum = FirstOffsetNumber;
  			 offnum <= maxoff;
  			 offnum = OffsetNumberNext(offnum))
  		{
! 			ItemId		itemid;
  
  			itemid = PageGetItemId(page, offnum);
  
--- 341,355 ----
  			continue;
  		}
  
  		hastup = false;
! 		ndead = nlive = 0;
  		maxoff = PageGetMaxOffsetNumber(page);
  		for (offnum = FirstOffsetNumber;
  			 offnum <= maxoff;
  			 offnum = OffsetNumberNext(offnum))
  		{
! 			ItemId			itemid;
! 			HeapTupleData	tuple;
  
  			itemid = PageGetItemId(page, offnum);
  
*************** lazy_scan_heap(Relation onerel, LVRelSta
*** 401,406 ****
--- 400,406 ----
  			{
  				lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
  				tups_vacuumed += 1;
+ 				ndead += 1;
  			}
  			else
  			{
*************** lazy_scan_heap(Relation onerel, LVRelSta
*** 408,458 ****
  				hastup = true;
  
  				/*
! 				 * Each non-removable tuple must be checked to see if it
! 				 * needs freezing.  If we already froze anything, then
! 				 * we've already switched the buffer lock to exclusive.
  				 */
! 				if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
! 									  (nfrozen > 0) ? InvalidBuffer : buf))
! 					frozen[nfrozen++] = offnum;
  			}
  		}						/* scan along page */
  
! 		/*
! 		 * If we froze any tuples, mark the buffer dirty, and write a WAL
! 		 * record recording the changes.  We must log the changes to be
! 		 * crash-safe against future truncation of CLOG.
! 		 */
! 		if (nfrozen > 0)
  		{
! 			MarkBufferDirty(buf);
! 			/* no XLOG for temp tables, though */
! 			if (!onerel->rd_istemp)
  			{
! 				XLogRecPtr	recptr;
  
! 				recptr = log_heap_freeze(onerel, buf, FreezeLimit,
! 										 frozen, nfrozen);
! 				PageSetLSN(page, recptr);
! 				PageSetTLI(page, ThisTimeLineID);
  			}
  		}
! 
! 		/*
! 		 * If there are no indexes then we can vacuum the page right now
! 		 * instead of doing a second scan.
! 		 */
! 		if (nindexes == 0 &&
! 			vacrelstats->num_dead_tuples > 0)
  		{
! 			/* Trade in buffer share lock for super-exclusive lock */
! 			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
! 			LockBufferForCleanup(buf);
! 			/* Remove tuples from heap */
! 			lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
! 			/* Forget the now-vacuumed tuples, and press on */
! 			vacrelstats->num_dead_tuples = 0;
! 			vacuumed_pages++;
  		}
  
  		/*
--- 408,497 ----
  				hastup = true;
  
  				/*
! 				 * We don't freeze tuples here. If there are some dead tuples,
! 				 * we delay freezing until lazy_vacuum_heap in order to avoid
! 				 * making dirty buffers only for freezing. If no dead tuples,
! 				 * we freeze them just below.
  				 */
! 				live_tuples[nlive++] = offnum;
  			}
  		}						/* scan along page */
  
! 		if (ndead > 0)
  		{
! 			/*
! 			 * If there are no indexes then we can vacuum the page right now
! 			 * instead of doing a second scan.
! 			 */
! 			if (nindexes == 0)
  			{
! 				Assert(vacrelstats->num_dead_tuples == ndead);
  
! 				/* Trade in buffer share lock for super-exclusive lock */
! 				LockBuffer(buf, BUFFER_LOCK_UNLOCK);
! 				LockBufferForCleanup(buf);
! 				/* Remove tuples from heap */
! 				lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
! 				/* Forget the now-vacuumed tuples, and press on */
! 				vacrelstats->num_dead_tuples = ndead = 0;
! 				vacuumed_pages++;
  			}
  		}
! 		else if (nlive > 0)
  		{
! 			int				nfrozen;
! 			OffsetNumber	frozen[MaxHeapTuplesPerPage];
! 			TransactionId	limit;
! 
! 			/* If the page is already dirty, we freeze tuples aggressively. */
! 			limit = (BufferIsDirty(buf) ? OldestXmin : FreezeLimit);
! 
! 			nfrozen = 0;
! 			for (i = 0; i < nlive; i++)
! 			{
! 				ItemId			itemid;
! 				HeapTupleHeader	tuple;
! 
! 				itemid = PageGetItemId(page, live_tuples[i]);
! 				tuple = (HeapTupleHeader) PageGetItem(page, itemid);
! 
! 				/*
! 				 * Each non-removable tuple must be checked to see if it
! 				 * needs freezing.  If we already froze anything, then
! 				 * we've already switched the buffer lock to exclusive.
! 				 */
! 				if (heap_freeze_tuple(tuple, limit,
! 									  nfrozen > 0 ? InvalidBuffer : buf))
! 				{
! 					/* 
! 					 * If there are any tuples to be frozen in this page.
! 					 * We will freeze leftover tuples aggressively. It
! 					 * requires no additional costs.
! 					 */
! 					limit = OldestXmin;
! 					frozen[nfrozen++] = live_tuples[i];
! 				}
! 			}
! 
! 			/*
! 			 * If we froze any tuples, mark the buffer dirty, and write a WAL
! 			 * record recording the changes.  We must log the changes to be
! 			 * crash-safe against future truncation of CLOG.
! 			 */
! 			if (nfrozen > 0)
! 			{
! 				MarkBufferDirty(buf);
! 				/* no XLOG for temp tables, though */
! 				if (!onerel->rd_istemp)
! 				{
! 					XLogRecPtr	recptr;
! 
! 					recptr = log_heap_freeze(onerel, buf, FreezeLimit,
! 											 frozen, nfrozen);
! 					PageSetLSN(page, recptr);
! 					PageSetTLI(page, ThisTimeLineID);
! 				}
! 			}
  		}
  
  		/*
*************** lazy_scan_heap(Relation onerel, LVRelSta
*** 462,468 ****
  		 * page, so remember its free space as-is.	(This path will always be
  		 * taken if there are no indexes.)
  		 */
! 		if (vacrelstats->num_dead_tuples == prev_dead_count)
  		{
  			lazy_record_free_space(vacrelstats, blkno,
  								   PageGetFreeSpace(page));
--- 501,507 ----
  		 * page, so remember its free space as-is.	(This path will always be
  		 * taken if there are no indexes.)
  		 */
! 		if (ndead == 0)
  		{
  			lazy_record_free_space(vacrelstats, blkno,
  								   PageGetFreeSpace(page));
*************** lazy_vacuum_heap(Relation onerel, LVRelS
*** 571,577 ****
  }
  
  /*
!  *	lazy_vacuum_page() -- free dead tuples on a page
   *					 and repair its fragmentation.
   *
   * Caller must hold pin and lock on the buffer.
--- 610,616 ----
  }
  
  /*
!  *	lazy_vacuum_page() -- free dead tuples and freeze live tuples on a page
   *					 and repair its fragmentation.
   *
   * Caller must hold pin and lock on the buffer.
*************** lazy_vacuum_page(Relation onerel, BlockN
*** 587,607 ****
  	OffsetNumber unused[MaxOffsetNumber];
  	int			uncnt;
  	Page		page = BufferGetPage(buffer);
! 	ItemId		itemid;
  
  	START_CRIT_SECTION();
  
! 	for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
  	{
! 		BlockNumber tblk;
! 		OffsetNumber toff;
  
! 		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
! 		if (tblk != blkno)
! 			break;				/* past end of tuples for this block */
! 		toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
! 		itemid = PageGetItemId(page, toff);
! 		itemid->lp_flags &= ~LP_USED;
  	}
  
  	uncnt = PageRepairFragmentation(page, unused);
--- 626,683 ----
  	OffsetNumber unused[MaxOffsetNumber];
  	int			uncnt;
  	Page		page = BufferGetPage(buffer);
! 	OffsetNumber	offdead;
! 	OffsetNumber	offnum,
! 					maxoff;
! 	OffsetNumber	frozen[MaxHeapTuplesPerPage];
! 	int				nfrozen;
! 
! 	Assert(tupindex < vacrelstats->num_dead_tuples);
! 	Assert(blkno == ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]));
! 
! 	offdead = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
! 	maxoff = PageGetMaxOffsetNumber(page);
! 	nfrozen = 0;
  
  	START_CRIT_SECTION();
  
! 	for (offnum = FirstOffsetNumber;
! 		 offnum <= maxoff;
! 		 offnum = OffsetNumberNext(offnum))
  	{
! 		ItemId	itemid = PageGetItemId(page, offnum);
  
! 		if (offnum == offdead)
! 		{
! 			itemid->lp_flags &= ~LP_USED;
! 
! 			tupindex++;
! 			if (tupindex < vacrelstats->num_dead_tuples &&
! 				blkno == ItemPointerGetBlockNumber(
! 							&vacrelstats->dead_tuples[tupindex]))
! 			{
! 				offdead = ItemPointerGetOffsetNumber(
! 					&vacrelstats->dead_tuples[tupindex]);
! 			}
! 			else
! 			{
! 				/* past end of dead tuples for this block */
! 				offdead = InvalidOffsetNumber;
! 			}
! 		}
! 		else if (ItemIdIsUsed(itemid))
! 		{
! 			HeapTupleHeader tuple;
! 
! 			tuple = (HeapTupleHeader) PageGetItem(page, itemid);
! 
! 			/*
! 			 * Do an aggressive freeze. We use OldestXmin as the freeze
! 			 * threshold instead FreezeLimit here.
! 			 */
! 			if (heap_freeze_tuple(tuple, OldestXmin, InvalidBuffer))
! 				frozen[nfrozen++] = offnum;
! 		}
  	}
  
  	uncnt = PageRepairFragmentation(page, unused);
*************** lazy_vacuum_page(Relation onerel, BlockN
*** 613,618 ****
--- 689,696 ----
  	{
  		XLogRecPtr	recptr;
  
+ 		if (nfrozen > 0)
+ 			log_heap_freeze(onerel, buffer, OldestXmin, frozen, nfrozen);
  		recptr = log_heap_clean(onerel, buffer, unused, uncnt);
  		PageSetLSN(page, recptr);
  		PageSetTLI(page, ThisTimeLineID);
diff -cpr HEAD/src/backend/storage/buffer/bufmgr.c aggressive_freeze/src/backend/storage/buffer/bufmgr.c
*** HEAD/src/backend/storage/buffer/bufmgr.c	Mon Feb  5 10:35:59 2007
--- aggressive_freeze/src/backend/storage/buffer/bufmgr.c	Mon Mar  5 19:11:09 2007
*************** buffer_write_error_callback(void *arg)
*** 2149,2151 ****
--- 2149,2163 ----
  				   bufHdr->tag.rnode.dbNode,
  				   bufHdr->tag.rnode.relNode);
  }
+ 
+ /*
+  * BufferIsDirty -- retrieve dirty status of the buffer
+  */
+ bool
+ BufferIsDirty(Buffer buffer)
+ {
+ 	volatile BufferDesc *bufHdr;
+ 
+ 	bufHdr = &BufferDescriptors[buffer - 1];
+ 	return (bufHdr->flags & BM_DIRTY) != 0;
+ }
diff -cpr HEAD/src/include/storage/bufmgr.h aggressive_freeze/src/include/storage/bufmgr.h
*** HEAD/src/include/storage/bufmgr.h	Thu Jan 11 14:20:57 2007
--- aggressive_freeze/src/include/storage/bufmgr.h	Mon Mar  5 19:11:09 2007
*************** extern Size BufferShmemSize(void);
*** 141,146 ****
--- 141,147 ----
  extern RelFileNode BufferGetFileNode(Buffer buffer);
  
  extern void SetBufferCommitInfoNeedsSave(Buffer buffer);
+ extern bool BufferIsDirty(Buffer buffer);
  
  extern void UnlockBuffers(void);
  extern void LockBuffer(Buffer buffer, int mode);
