*** a/src/backend/access/transam/xlog.c
--- b/src/backend/access/transam/xlog.c
***************
*** 352,358 **** typedef struct XLogCtlInsert
  	XLogPageHeader currpage;	/* points to header of block in cache */
  	char	   *currpos;		/* current insertion point in cache */
  	XLogRecPtr	RedoRecPtr;		/* current redo point for insertions */
! 	bool		forcePageWrites;	/* forcing full-page writes for PITR? */
  
  	/*
  	 * exclusiveBackup is true if a backup started with pg_start_backup() is
--- 352,379 ----
  	XLogPageHeader currpage;	/* points to header of block in cache */
  	char	   *currpos;		/* current insertion point in cache */
  	XLogRecPtr	RedoRecPtr;		/* current redo point for insertions */
! } XLogCtlInsert;
! 
! /*
!  * Shared state data for XLogWrite/XLogFlush.
!  */
! typedef struct XLogCtlWrite
! {
! 	XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
! 	int			curridx;		/* cache index of next block to write */
! 	pg_time_t	lastSegSwitchTime;		/* time of last xlog segment switch */
! } XLogCtlWrite;
! 
! /*
!  * Shared state data for backup details.
!  * Protected by WALInsertLock but frequently accessed without lock.
!  */
! typedef struct XLogCtlBackup
! {
! 	/*
! 	 * Defines whether we are currently running a base backup.
! 	 */
! 	bool		forcePageWrites;
  
  	/*
  	 * exclusiveBackup is true if a backup started with pg_start_backup() is
***************
*** 364,380 **** typedef struct XLogCtlInsert
  	bool		exclusiveBackup;
  	int			nonExclusiveBackups;
  	XLogRecPtr	lastBackupStart;
! } XLogCtlInsert;
  
  /*
!  * Shared state data for XLogWrite/XLogFlush.
   */
! typedef struct XLogCtlWrite
! {
! 	XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
! 	int			curridx;		/* cache index of next block to write */
! 	pg_time_t	lastSegSwitchTime;		/* time of last xlog segment switch */
! } XLogCtlWrite;
  
  /*
   * Total shared-memory state for XLOG.
--- 385,397 ----
  	bool		exclusiveBackup;
  	int			nonExclusiveBackups;
  	XLogRecPtr	lastBackupStart;
! } XLogCtlBackup;
  
  /*
!  * Set cacheline size big enough for heavyweight iron, since a lower setting
!  * only saves a few bytes.
   */
! #define CACHELINE_SIZE	256
  
  /*
   * Total shared-memory state for XLOG.
***************
*** 384,401 **** typedef struct XLogCtlData
  	/* Protected by WALInsertLock: */
  	XLogCtlInsert Insert;
  
! 	/* Protected by info_lck: */
! 	XLogwrtRqst LogwrtRqst;
! 	XLogwrtResult LogwrtResult;
! 	uint32		ckptXidEpoch;	/* nextXID & epoch of latest checkpoint */
! 	TransactionId ckptXid;
! 	XLogRecPtr	asyncXactLSN;	/* LSN of newest async commit/abort */
! 	uint32		lastRemovedLog; /* latest removed/recycled XLOG segment */
! 	uint32		lastRemovedSeg;
  
  	/* Protected by WALWriteLock: */
  	XLogCtlWrite Write;
  
  	/*
  	 * These values do not change after startup, although the pointed-to pages
  	 * and xlblocks values certainly do.  Permission to read/write the pages
--- 401,417 ----
  	/* Protected by WALInsertLock: */
  	XLogCtlInsert Insert;
  
! 	char		WALInsertLock_padding[CACHELINE_SIZE - sizeof(XLogCtlInsert)];
! 
! 	XLogCtlBackup Backup;
! 
! 	char		backup_padding[CACHELINE_SIZE - sizeof(XLogCtlBackup)];
  
  	/* Protected by WALWriteLock: */
  	XLogCtlWrite Write;
  
+ 	char		WALWriteLock_padding[CACHELINE_SIZE - sizeof(XLogCtlWrite)];
+ 
  	/*
  	 * These values do not change after startup, although the pointed-to pages
  	 * and xlblocks values certainly do.  Permission to read/write the pages
***************
*** 407,412 **** typedef struct XLogCtlData
--- 423,439 ----
  	TimeLineID	ThisTimeLineID;
  	TimeLineID	RecoveryTargetTLI;
  
+ 	char		info_lck_padding[CACHELINE_SIZE];
+ 
+ 	/* Protected by info_lck: */
+ 	XLogwrtRqst LogwrtRqst;
+ 	XLogwrtResult LogwrtResult;
+ 	uint32		ckptXidEpoch;	/* nextXID & epoch of latest checkpoint */
+ 	TransactionId ckptXid;
+ 	XLogRecPtr	asyncXactLSN;	/* LSN of newest async commit/abort */
+ 	uint32		lastRemovedLog; /* latest removed/recycled XLOG segment */
+ 	uint32		lastRemovedSeg;
+ 
  	/*
  	 * archiveCleanupCommand is read from recovery.conf but needs to be in
  	 * shared memory so that the checkpointer process can access it.
***************
*** 750,758 **** begin:;
  	 * Decide if we need to do full-page writes in this XLOG record: true if
  	 * full_page_writes is on or we have a PITR request for it.  Since we
  	 * don't yet have the insert lock, forcePageWrites could change under us,
! 	 * but we'll recheck it once we have the lock.
  	 */
! 	doPageWrites = fullPageWrites || Insert->forcePageWrites;
  
  	INIT_CRC32(rdata_crc);
  	len = 0;
--- 777,786 ----
  	 * Decide if we need to do full-page writes in this XLOG record: true if
  	 * full_page_writes is on or we have a PITR request for it.  Since we
  	 * don't yet have the insert lock, forcePageWrites could change under us,
! 	 * but we'll recheck it once we have the lock. Be careful not to touch
! 	 * the cachelines being used by processes holding WALInsertLock.
  	 */
! 	doPageWrites = fullPageWrites || XLogCtl->Backup.forcePageWrites;
  
  	INIT_CRC32(rdata_crc);
  	len = 0;
***************
*** 898,904 **** begin:;
  	 * just turned off, we could recompute the record without full pages, but
  	 * we choose not to bother.)
  	 */
! 	if (Insert->forcePageWrites && !doPageWrites)
  	{
  		/* Oops, must redo it with full-page data */
  		LWLockRelease(WALInsertLock);
--- 926,932 ----
  	 * just turned off, we could recompute the record without full pages, but
  	 * we choose not to bother.)
  	 */
! 	if (XLogCtl->Backup.forcePageWrites && !doPageWrites)
  	{
  		/* Oops, must redo it with full-page data */
  		LWLockRelease(WALInsertLock);
***************
*** 974,980 **** begin:;
  	 * defining it like this leaves the info bit free for some potential other
  	 * use in records without any backup blocks.
  	 */
! 	if ((info & XLR_BKP_BLOCK_MASK) && !Insert->forcePageWrites)
  		info |= XLR_BKP_REMOVABLE;
  
  	/*
--- 1002,1008 ----
  	 * defining it like this leaves the info bit free for some potential other
  	 * use in records without any backup blocks.
  	 */
! 	if ((info & XLR_BKP_BLOCK_MASK) && !XLogCtl->Backup.forcePageWrites)
  		info |= XLR_BKP_REMOVABLE;
  
  	/*
***************
*** 8018,8024 **** CreateRestartPoint(int flags)
  	}
  
  	/*
! 	 * Update the shared RedoRecPtr so that the startup process can calculate
  	 * the number of segments replayed since last restartpoint, and request a
  	 * restartpoint if it exceeds checkpoint_segments.
  	 *
--- 8046,8052 ----
  	}
  
  	/*
! 	 * Update both shared RedoRecPtr so that the startup process can calculate
  	 * the number of segments replayed since last restartpoint, and request a
  	 * restartpoint if it exceeds checkpoint_segments.
  	 *
***************
*** 8853,8859 **** do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
  	if (exclusive)
  	{
! 		if (XLogCtl->Insert.exclusiveBackup)
  		{
  			LWLockRelease(WALInsertLock);
  			ereport(ERROR,
--- 8881,8887 ----
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
  	if (exclusive)
  	{
! 		if (XLogCtl->Backup.exclusiveBackup)
  		{
  			LWLockRelease(WALInsertLock);
  			ereport(ERROR,
***************
*** 8861,8871 **** do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
  					 errmsg("a backup is already in progress"),
  					 errhint("Run pg_stop_backup() and try again.")));
  		}
! 		XLogCtl->Insert.exclusiveBackup = true;
  	}
  	else
! 		XLogCtl->Insert.nonExclusiveBackups++;
! 	XLogCtl->Insert.forcePageWrites = true;
  	LWLockRelease(WALInsertLock);
  
  	/* Ensure we release forcePageWrites if fail below */
--- 8889,8899 ----
  					 errmsg("a backup is already in progress"),
  					 errhint("Run pg_stop_backup() and try again.")));
  		}
! 		XLogCtl->Backup.exclusiveBackup = true;
  	}
  	else
! 		XLogCtl->Backup.nonExclusiveBackups++;
! 	XLogCtl->Backup.forcePageWrites = true;
  	LWLockRelease(WALInsertLock);
  
  	/* Ensure we release forcePageWrites if fail below */
***************
*** 8910,8918 **** do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
  			 * either because only few buffers have been dirtied yet.
  			 */
  			LWLockAcquire(WALInsertLock, LW_SHARED);
! 			if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint))
  			{
! 				XLogCtl->Insert.lastBackupStart = startpoint;
  				gotUniqueStartpoint = true;
  			}
  			LWLockRelease(WALInsertLock);
--- 8938,8946 ----
  			 * either because only few buffers have been dirtied yet.
  			 */
  			LWLockAcquire(WALInsertLock, LW_SHARED);
! 			if (XLByteLT(XLogCtl->Backup.lastBackupStart, startpoint))
  			{
! 				XLogCtl->Backup.lastBackupStart = startpoint;
  				gotUniqueStartpoint = true;
  			}
  			LWLockRelease(WALInsertLock);
***************
*** 9003,9021 **** pg_start_backup_callback(int code, Datum arg)
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
  	if (exclusive)
  	{
! 		Assert(XLogCtl->Insert.exclusiveBackup);
! 		XLogCtl->Insert.exclusiveBackup = false;
  	}
  	else
  	{
! 		Assert(XLogCtl->Insert.nonExclusiveBackups > 0);
! 		XLogCtl->Insert.nonExclusiveBackups--;
  	}
  
! 	if (!XLogCtl->Insert.exclusiveBackup &&
! 		XLogCtl->Insert.nonExclusiveBackups == 0)
  	{
! 		XLogCtl->Insert.forcePageWrites = false;
  	}
  	LWLockRelease(WALInsertLock);
  }
--- 9031,9049 ----
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
  	if (exclusive)
  	{
! 		Assert(XLogCtl->Backup.exclusiveBackup);
! 		XLogCtl->Backup.exclusiveBackup = false;
  	}
  	else
  	{
! 		Assert(XLogCtl->Backup.nonExclusiveBackups > 0);
! 		XLogCtl->Backup.nonExclusiveBackups--;
  	}
  
! 	if (!XLogCtl->Backup.exclusiveBackup &&
! 		XLogCtl->Backup.nonExclusiveBackups == 0)
  	{
! 		XLogCtl->Backup.forcePageWrites = false;
  	}
  	LWLockRelease(WALInsertLock);
  }
***************
*** 9073,9079 **** do_pg_stop_backup(char *labelfile, bool waitforarchive)
  	 */
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
  	if (exclusive)
! 		XLogCtl->Insert.exclusiveBackup = false;
  	else
  	{
  		/*
--- 9101,9107 ----
  	 */
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
  	if (exclusive)
! 		XLogCtl->Backup.exclusiveBackup = false;
  	else
  	{
  		/*
***************
*** 9082,9095 **** do_pg_stop_backup(char *labelfile, bool waitforarchive)
  		 * backups, it is expected that each do_pg_start_backup() call is
  		 * matched by exactly one do_pg_stop_backup() call.
  		 */
! 		Assert(XLogCtl->Insert.nonExclusiveBackups > 0);
! 		XLogCtl->Insert.nonExclusiveBackups--;
  	}
  
! 	if (!XLogCtl->Insert.exclusiveBackup &&
! 		XLogCtl->Insert.nonExclusiveBackups == 0)
  	{
! 		XLogCtl->Insert.forcePageWrites = false;
  	}
  	LWLockRelease(WALInsertLock);
  
--- 9110,9123 ----
  		 * backups, it is expected that each do_pg_start_backup() call is
  		 * matched by exactly one do_pg_stop_backup() call.
  		 */
! 		Assert(XLogCtl->Backup.nonExclusiveBackups > 0);
! 		XLogCtl->Backup.nonExclusiveBackups--;
  	}
  
! 	if (!XLogCtl->Backup.exclusiveBackup &&
! 		XLogCtl->Backup.nonExclusiveBackups == 0)
  	{
! 		XLogCtl->Backup.forcePageWrites = false;
  	}
  	LWLockRelease(WALInsertLock);
  
***************
*** 9294,9306 **** void
  do_pg_abort_backup(void)
  {
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
! 	Assert(XLogCtl->Insert.nonExclusiveBackups > 0);
! 	XLogCtl->Insert.nonExclusiveBackups--;
  
! 	if (!XLogCtl->Insert.exclusiveBackup &&
! 		XLogCtl->Insert.nonExclusiveBackups == 0)
  	{
! 		XLogCtl->Insert.forcePageWrites = false;
  	}
  	LWLockRelease(WALInsertLock);
  }
--- 9322,9334 ----
  do_pg_abort_backup(void)
  {
  	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
! 	Assert(XLogCtl->Backup.nonExclusiveBackups > 0);
! 	XLogCtl->Backup.nonExclusiveBackups--;
  
! 	if (!XLogCtl->Backup.exclusiveBackup &&
! 		XLogCtl->Backup.nonExclusiveBackups == 0)
  	{
! 		XLogCtl->Backup.forcePageWrites = false;
  	}
  	LWLockRelease(WALInsertLock);
  }
