diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 189f290..d21d187 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -47,6 +47,7 @@ typedef struct ConnCacheEntry
 								 * one level of subxact open, etc */
 	bool		have_prep_stmt; /* have we prepared any stmts in this xact? */
 	bool		have_error;		/* have any subxacts aborted in this xact? */
+	PgFdwConnState state;		/* extra per-connection state */
 } ConnCacheEntry;
 
 /*
@@ -92,7 +93,7 @@ static void pgfdw_subxact_callback(SubXactEvent event,
  * mid-transaction anyway.
  */
 PGconn *
-GetConnection(UserMapping *user, bool will_prep_stmt)
+GetConnection(UserMapping *user, bool will_prep_stmt, PgFdwConnState **state)
 {
 	bool		found;
 	ConnCacheEntry *entry;
@@ -137,6 +138,7 @@ GetConnection(UserMapping *user, bool will_prep_stmt)
 		entry->xact_depth = 0;
 		entry->have_prep_stmt = false;
 		entry->have_error = false;
+		memset(&entry->state, 0, sizeof(entry->state));
 	}
 
 	/*
@@ -171,6 +173,10 @@ GetConnection(UserMapping *user, bool will_prep_stmt)
 	/* Remember if caller will prepare statements */
 	entry->have_prep_stmt |= will_prep_stmt;
 
+	/* If caller needs access to the per-connection state, return it. */
+	if (state)
+		*state = &entry->state;
+
 	return entry->conn;
 }
 
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index d6db834..61e91bd 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -32,6 +32,7 @@
 #include "optimizer/var.h"
 #include "optimizer/tlist.h"
 #include "parser/parsetree.h"
+#include "utils/asynchrony.h"
 #include "utils/builtins.h"
 #include "utils/guc.h"
 #include "utils/lsyscache.h"
@@ -157,6 +158,9 @@ typedef struct PgFdwScanState
 	MemoryContext temp_cxt;		/* context for per-tuple temporary data */
 
 	int			fetch_size;		/* number of tuples per fetch */
+
+	/* per-connection state */
+	PgFdwConnState *conn_state;
 } PgFdwScanState;
 
 /*
@@ -346,6 +350,8 @@ static void postgresGetForeignJoinPaths(PlannerInfo *root,
 static bool postgresRecheckForeignScan(ForeignScanState *node,
 						   TupleTableSlot *slot);
 
+static int postgresReady(ForeignScanState *node);
+
 /*
  * Helper functions
  */
@@ -365,6 +371,7 @@ static bool ec_member_matches_foreign(PlannerInfo *root, RelOptInfo *rel,
 						  EquivalenceClass *ec, EquivalenceMember *em,
 						  void *arg);
 static void create_cursor(ForeignScanState *node);
+static void fetch_more_data_begin(ForeignScanState *node);
 static void fetch_more_data(ForeignScanState *node);
 static void close_cursor(PGconn *conn, unsigned int cursor_number);
 static void prepare_foreign_modify(PgFdwModifyState *fmstate);
@@ -457,6 +464,9 @@ postgres_fdw_handler(PG_FUNCTION_ARGS)
 	/* Support functions for join push-down */
 	routine->GetForeignJoinPaths = postgresGetForeignJoinPaths;
 
+	/* Support for asynchrony */
+	routine->Ready = postgresReady;
+
 	PG_RETURN_POINTER(routine);
 }
 
@@ -1288,7 +1298,7 @@ postgresBeginForeignScan(ForeignScanState *node, int eflags)
 	 * Get connection to the foreign server.  Connection manager will
 	 * establish new connection if necessary.
 	 */
-	fsstate->conn = GetConnection(user, false);
+	fsstate->conn = GetConnection(user, false, &fsstate->conn_state);
 
 	/* Assign a unique ID for my cursor */
 	fsstate->cursor_number = GetCursorNumber(fsstate->conn);
@@ -1337,6 +1347,7 @@ postgresBeginForeignScan(ForeignScanState *node, int eflags)
 							 &fsstate->param_flinfo,
 							 &fsstate->param_exprs,
 							 &fsstate->param_values);
+	fsstate->conn_state->async_query_sent = false;
 }
 
 /*
@@ -1663,7 +1674,7 @@ postgresBeginForeignModify(ModifyTableState *mtstate,
 	user = GetUserMapping(userid, table->serverid);
 
 	/* Open connection; report that we'll create a prepared statement. */
-	fmstate->conn = GetConnection(user, true);
+	fmstate->conn = GetConnection(user, true, NULL);
 	fmstate->p_name = NULL;		/* prepared statement not made yet */
 
 	/* Deconstruct fdw_private data. */
@@ -2238,7 +2249,7 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
 	 * Get connection to the foreign server.  Connection manager will
 	 * establish new connection if necessary.
 	 */
-	dmstate->conn = GetConnection(user, false);
+	dmstate->conn = GetConnection(user, false, NULL);
 
 	/* Initialize state variable */
 	dmstate->num_tuples = -1;		/* -1 means not set yet */
@@ -2500,7 +2511,7 @@ estimate_path_cost_size(PlannerInfo *root,
 								NULL);
 
 		/* Get the remote estimate */
-		conn = GetConnection(fpinfo->user, false);
+		conn = GetConnection(fpinfo->user, false, NULL);
 		get_remote_estimate(sql.data, conn, &rows, &width,
 							&startup_cost, &total_cost);
 		ReleaseConnection(conn);
@@ -2864,13 +2875,28 @@ fetch_more_data(ForeignScanState *node)
 		int			numrows;
 		int			i;
 
-		snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
-				 fsstate->fetch_size, fsstate->cursor_number);
+		if (!fsstate->conn_state->async_query_sent)
+		{
+			/* This is a regular synchronous fetch. */
+			snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
+					 fsstate->fetch_size, fsstate->cursor_number);
 
-		res = PQexec(conn, sql);
-		/* On error, report the original query, not the FETCH. */
-		if (PQresultStatus(res) != PGRES_TUPLES_OK)
-			pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
+			res = PQexec(conn, sql);
+			/* On error, report the original query, not the FETCH. */
+			if (PQresultStatus(res) != PGRES_TUPLES_OK)
+				pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
+		}
+		else
+		{
+			/*
+			 * The query was already sent by an earlier call to
+			 * fetch_more_data_begin.  So now we just fetch the result.
+			 */
+			res = PQgetResult(conn);
+			/* On error, report the original query, not the FETCH. */
+			if (PQresultStatus(res) != PGRES_TUPLES_OK)
+				pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
+		}
 
 		/* Convert the data into HeapTuples */
 		numrows = PQntuples(res);
@@ -2899,6 +2925,15 @@ fetch_more_data(ForeignScanState *node)
 		fsstate->eof_reached = (numrows < fsstate->fetch_size);
 
 		PQclear(res);
+
+		/* If this was the second part of an async request, we must fetch until NULL. */
+		if (fsstate->conn_state->async_query_sent)
+		{
+			/* call once and raise error if not NULL as expected? */
+			while (PQgetResult(conn) != NULL)
+				;
+			fsstate->conn_state->async_query_sent = false;
+		}
 		res = NULL;
 	}
 	PG_CATCH();
@@ -2913,6 +2948,35 @@ fetch_more_data(ForeignScanState *node)
 }
 
 /*
+ * Begin an asynchronous data fetch.
+ * fetch_more_data must be called to fetch the results..
+ */
+static void
+fetch_more_data_begin(ForeignScanState *node)
+{
+	PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+	PGconn	   *conn = fsstate->conn;
+	char		sql[64];
+
+	Assert(!fsstate->conn_state->async_query_sent);
+
+	/*
+	 * Create the cursor synchronously.  (With more state machine stuff we
+	 * could do this asynchronously too).
+	 */
+	if (!fsstate->cursor_exists)
+		create_cursor(node);
+
+	/* We will send this query, but not wait for the response. */
+	snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
+			 fsstate->fetch_size, fsstate->cursor_number);
+
+	if (PQsendQuery(conn, sql) < 0)
+		pgfdw_report_error(ERROR, NULL, conn, false, fsstate->query);
+	fsstate->conn_state->async_query_sent = true;
+}
+
+/*
  * Force assorted GUC parameters to settings that ensure that we'll output
  * data values in a form that is unambiguous to the remote server.
  *
@@ -3342,7 +3406,7 @@ postgresAnalyzeForeignTable(Relation relation,
 	 */
 	table = GetForeignTable(RelationGetRelid(relation));
 	user = GetUserMapping(relation->rd_rel->relowner, table->serverid);
-	conn = GetConnection(user, false);
+	conn = GetConnection(user, false, NULL);
 
 	/*
 	 * Construct command to get page count for relation.
@@ -3434,7 +3498,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
 	table = GetForeignTable(RelationGetRelid(relation));
 	server = GetForeignServer(table->serverid);
 	user = GetUserMapping(relation->rd_rel->relowner, table->serverid);
-	conn = GetConnection(user, false);
+	conn = GetConnection(user, false, NULL);
 
 	/*
 	 * Construct cursor that retrieves whole rows from remote.
@@ -3657,7 +3721,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
 	 */
 	server = GetForeignServer(serverOid);
 	mapping = GetUserMapping(GetUserId(), server->serverid);
-	conn = GetConnection(mapping, false);
+	conn = GetConnection(mapping, false, NULL);
 
 	/* Don't attempt to import collation if remote server hasn't got it */
 	if (PQserverVersion(conn) < 90100)
@@ -4264,6 +4328,41 @@ postgresGetForeignJoinPaths(PlannerInfo *root,
 	/* XXX Consider parameterized paths for the join relation */
 }
 
+static int
+postgresReady(ForeignScanState *node)
+{
+	PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+
+	if (fsstate->conn_state->async_query_sent)
+	{
+		/*
+		 * We have already started a query, for some other executor node.  We
+		 * currently can't handle two at the same time (we'd have to create
+		 * more connections for that).
+		 */
+		return ASYNC_READY_BUSY;
+	}
+	else if (fsstate->next_tuple < fsstate->num_tuples)
+	{
+		/* We already have buffered tuples. */
+		return ASYNC_READY_MORE;
+	}
+	else if (fsstate->eof_reached)
+	{
+		/* We have already hit the end of the scan. */
+		return ASYNC_READY_EOF;
+	}
+	else
+	{
+		/*
+		 * We will start a query now, and tell the caller to wait until the
+		 * file descriptor says we're ready and then call ExecProcNode.
+		 */
+		fetch_more_data_begin(node);
+		return PQsocket(fsstate->conn);
+	}
+}
+
 /*
  * Create a tuple from the specified row of the PGresult.
  *
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
index 3a11d99..25bd426 100644
--- a/contrib/postgres_fdw/postgres_fdw.h
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -21,6 +21,15 @@
 #include "libpq-fe.h"
 
 /*
+ * Extra control information relating to a connection.
+ */
+typedef struct PgFdwConnState
+{
+	/* Has an asynchronous query been sent? */
+	bool async_query_sent;
+} PgFdwConnState;
+
+/*
  * FDW-specific planner information kept in RelOptInfo.fdw_private for a
  * foreign table.  This information is collected by postgresGetForeignRelSize.
  */
@@ -99,7 +108,8 @@ extern int	set_transmission_modes(void);
 extern void reset_transmission_modes(int nestlevel);
 
 /* in connection.c */
-extern PGconn *GetConnection(UserMapping *user, bool will_prep_stmt);
+extern PGconn *GetConnection(UserMapping *user, bool will_prep_stmt,
+							 PgFdwConnState **state);
 extern void ReleaseConnection(PGconn *conn);
 extern unsigned int GetCursorNumber(PGconn *conn);
 extern unsigned int GetPrepStmtNumber(PGconn *conn);
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index a31dbc9..9565f35 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -116,6 +116,7 @@
 #include "executor/nodeWorktablescan.h"
 #include "nodes/nodeFuncs.h"
 #include "miscadmin.h"
+#include "utils/asynchrony.h"
 
 
 /* ------------------------------------------------------------------------
@@ -786,6 +787,30 @@ ExecEndNode(PlanState *node)
 }
 
 /*
+ * ExecReady
+ *
+ * Check whether the node would be able to produce a new tuple without
+ * blocking.  ASYNC_READY_MORE means a tuple can be returned by ExecProcNode
+ * immediately without waiting.  ASYNC_READY_EOF means there are no further
+ * tuples to consume.  ASYNC_READY_UNSUPPORTED means that this node doesn't
+ * support asynchronous interaction.  ASYNC_READY_BUSY means that this node
+ * currently can't provide asynchronous service.  Any other value is a file
+ * descriptor which can be used to wait until the node is ready to produce a
+ * tuple.
+ */
+int
+ExecReady(PlanState *node)
+{
+	switch (nodeTag(node))
+	{
+		case T_ForeignScanState:
+			return ExecForeignScanReady((ForeignScanState *) node);
+		default:
+			return ASYNC_READY_UNSUPPORTED;
+	}
+}
+
+/*
  * ExecShutdownNode
  *
  * Give execution nodes a chance to stop asynchronous resource consumption
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index a26bd63..7501483 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -59,6 +59,8 @@
 
 #include "executor/execdebug.h"
 #include "executor/nodeAppend.h"
+#include "storage/latch.h"
+#include "utils/asynchrony.h"
 
 static bool exec_append_initialize_next(AppendState *appendstate);
 
@@ -181,9 +183,207 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
 	appendstate->as_whichplan = 0;
 	exec_append_initialize_next(appendstate);
 
+	/*
+	 * Initially we consider all subplans to be potentially asynchronous.
+	 */
+	appendstate->asyncplans = (PlanState **) palloc(nplans * sizeof(PlanState *));
+	appendstate->asyncfds = (int *) palloc0(nplans * sizeof(int));
+	appendstate->nasyncplans = nplans;
+	memcpy(appendstate->asyncplans, appendstate->appendplans, nplans * sizeof(PlanState *));
+	appendstate->lastreadyplan = 0;
+
 	return appendstate;
 }
 
+/*
+ * Forget about an asynchronous subplan, given an async subplan index.  Return
+ * the index of the next subplan.
+ */
+static int
+forget_async_subplan(AppendState *node, int i)
+{
+	int last = node->nasyncplans - 1;
+
+	if (i == last)
+	{
+		/* This was the last subplan, forget it and move to first. */
+		i = 0;
+		if (node->lastreadyplan == last)
+			node->lastreadyplan = 0;
+	}
+	else
+	{
+		/*
+		 * Move the last one here (cheaper than memmov'ing the whole array
+		 * down and we don't care about the order).
+		 */
+		node->asyncplans[i] = node->asyncplans[last];
+		node->asyncfds[i] = node->asyncfds[last];
+	}
+	--node->nasyncplans;
+
+	return i;
+}
+
+/*
+ * Wait for the first asynchronous subplan's file descriptor to be ready to
+ * read or error, and then ask it for a tuple.
+ *
+ * This is called by append_next_async when every async subplan has provided a
+ * file descriptor to wait on, so we must begin waiting.
+ */
+static TupleTableSlot *
+append_next_async_wait(AppendState *node)
+{
+	while (node->nasyncplans > 0)
+	{
+		WaitEventSet *set;
+		WaitEvent event;
+		int i;
+
+		/*
+		 * For now there is no facility to remove fds from WaitEventSets when
+		 * they are no longer interesting, so we allocate, populate, free
+		 * every time, a la select().  If we had RemoveWaitEventFromSet, we
+		 * could use the same WaitEventSet object for the life of the append
+		 * node, and add/remove as we go, a la epoll/kqueue.
+		 *
+		 * Note: We could make a single call to WaitEventSetWait and have a
+		 * big enough output event buffer to learn about readiness on all
+		 * interesting sockets and loop over those, but one implementation can
+		 * only tell us about a single socket at a time, so we need to be
+		 * prepared to call WaitEventSetWait repeatedly.
+		 */
+		set = CreateWaitEventSet(CurrentMemoryContext, node->nasyncplans + 1);
+		AddWaitEventToSet(set, WL_POSTMASTER_DEATH, PGINVALID_SOCKET, NULL);
+		for (i = 0; i < node->nasyncplans; ++i)
+		{
+			Assert(node->asyncfds[i] > 0);
+			AddWaitEventToSet(set, WL_SOCKET_READABLE, node->asyncfds[i], NULL);
+		}
+		i = WaitEventSetWait(set, -1, &event, 1);
+		Assert(i > 0);
+		FreeWaitEventSet(set);
+
+		if (event.events & WL_POSTMASTER_DEATH)
+			exit(0);
+		if (event.events & WL_SOCKET_READABLE)
+		{
+			/* Linear search for the node that told us to wait for this fd. */
+			for (i = 0; i < node->nasyncplans; ++i)
+			{
+				if (event.fd == node->asyncfds[i])
+				{
+					TupleTableSlot *result;
+
+					/*
+					 * We assume that because the fd is ready, it can produce
+					 * a tuple now, which is not perfect.  An improvement
+					 * would be if it could say 'not yet, I'm still not
+					 * ready', so eg postgres_fdw could PQconsumeInput and
+					 * then say 'I need more input'.
+					 */
+					result = ExecProcNode(node->asyncplans[i]);
+					if (!TupIsNull(result))
+					{
+						/*
+						 * Remember this plan so that append_next_async will
+						 * keep trying this subplan first until it stops
+						 * feeding us buffered tuples.
+						 */
+						node->lastreadyplan = i;
+						/* We can stop waiting for this fd. */
+						node->asyncfds[i] = 0;
+						return result;
+					}
+					else
+					{
+						/*
+						 * This subplan has reached EOF.  We'll go back and
+						 * wait for another one.
+						 */
+						forget_async_subplan(node, i);
+						break;
+					}
+				}
+			}
+		}
+	}
+	/*
+	 * We visited every ready subplan, tried to pull a tuple, and they all
+	 * reported EOF.  There is no more async data available.
+	 */
+	return NULL;
+}
+
+/*
+ * Fetch the next tuple available from any asynchronous subplan.  If none can
+ * provide a tuple immediately, wait for the first one that is ready to
+ * provide a tuple.  Return NULL when there are no more tuples available.
+ */
+static TupleTableSlot *
+append_next_async(AppendState *node)
+{
+	int count;
+	int i;
+
+	/*
+	 * We'll start our scan of subplans at the last one that was able to give
+	 * us a tuple, if there was one.  It may be able to give us a new tuple
+	 * straight away so we can leave early.
+	 */
+	i = node->lastreadyplan;
+
+	/* Loop until we've visited each potentially async subplan. */
+	for (count = node->nasyncplans; count > 0; --count)
+	{
+		/*
+		 * If we don't already have a file descriptor to wait on for this
+		 * subplan, see if it is ready.
+		 */
+		if (node->asyncfds[i] == 0)
+		{
+			int ready = ExecReady(node->asyncplans[i]);
+
+			switch (ready)
+			{
+			case ASYNC_READY_MORE:
+				/* The node has a buffered tuple for us. */
+				return ExecProcNode(node->asyncplans[i]);
+
+			case ASYNC_READY_UNSUPPORTED:
+			case ASYNC_READY_EOF:
+			case ASYNC_READY_BUSY:
+				/* This subplan can't give us anything asynchronously. */
+				i = forget_async_subplan(node, i);
+				continue;
+
+			default:
+				/* We have a new file descriptor to wait for. */
+				Assert(ready > 0);
+				node->asyncfds[i] = ready;
+				node->lastreadyplan = 0;
+				break;
+			}
+		}
+
+		/* Move on to the next plan (circular). */
+		i = (i + 1) % node->nasyncplans;
+	}
+
+	/* We might have removed all subplans; if so we can leave now. */
+	if (node->nasyncplans == 0)
+		return NULL;
+
+	/*
+	 * If we reached here, then all remaining async subplans have given us a
+	 * file descriptor to wait for.  So do that, and pull a tuple as soon as
+	 * one is ready.
+	 */
+	return append_next_async_wait(node);
+}
+
+
 /* ----------------------------------------------------------------
  *	   ExecAppend
  *
@@ -193,6 +393,17 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
 TupleTableSlot *
 ExecAppend(AppendState *node)
 {
+	/* First, drain all asynchronous subplans as they become ready. */
+	if (node->nasyncplans > 0)
+	{
+		TupleTableSlot *result = append_next_async(node);
+
+		if (!TupIsNull(result))
+			return result;
+	}
+	Assert(node->nasyncplans == 0);
+
+	/* Next process regular synchronous nodes sequentially. */
 	for (;;)
 	{
 		PlanState  *subnode;
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index 300f947..70796d1 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -25,6 +25,7 @@
 #include "executor/executor.h"
 #include "executor/nodeForeignscan.h"
 #include "foreign/fdwapi.h"
+#include "utils/asynchrony.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -355,3 +356,14 @@ ExecForeignScanInitializeWorker(ForeignScanState *node, shm_toc *toc)
 		fdwroutine->InitializeWorkerForeignScan(node, toc, coordinate);
 	}
 }
+
+int
+ExecForeignScanReady(ForeignScanState *node)
+{
+	FdwRoutine *fdwroutine = node->fdwroutine;
+
+	if (fdwroutine->Ready)
+		return fdwroutine->Ready(node);
+	else
+		return ASYNC_READY_UNSUPPORTED;
+}
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 44fac27..e364a8d 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -224,6 +224,7 @@ extern void EvalPlanQualEnd(EPQState *epqstate);
 extern PlanState *ExecInitNode(Plan *node, EState *estate, int eflags);
 extern TupleTableSlot *ExecProcNode(PlanState *node);
 extern Node *MultiExecProcNode(PlanState *node);
+extern int ExecReady(PlanState *node);
 extern void ExecEndNode(PlanState *node);
 extern bool ExecShutdownNode(PlanState *node);
 
diff --git a/src/include/executor/nodeForeignscan.h b/src/include/executor/nodeForeignscan.h
index c255329..b1f3168 100644
--- a/src/include/executor/nodeForeignscan.h
+++ b/src/include/executor/nodeForeignscan.h
@@ -29,4 +29,6 @@ extern void ExecForeignScanInitializeDSM(ForeignScanState *node,
 extern void ExecForeignScanInitializeWorker(ForeignScanState *node,
 											shm_toc *toc);
 
+extern int ExecForeignScanReady(ForeignScanState *node);
+
 #endif   /* NODEFOREIGNSCAN_H */
diff --git a/src/include/foreign/fdwapi.h b/src/include/foreign/fdwapi.h
index 096a9c4..06aada7 100644
--- a/src/include/foreign/fdwapi.h
+++ b/src/include/foreign/fdwapi.h
@@ -153,6 +153,8 @@ typedef bool (*IsForeignScanParallelSafe_function) (PlannerInfo *root,
 															 RelOptInfo *rel,
 														 RangeTblEntry *rte);
 
+typedef int (*Ready_function) (ForeignScanState *node);
+
 /*
  * FdwRoutine is the struct returned by a foreign-data wrapper's handler
  * function.  It provides pointers to the callback functions needed by the
@@ -222,6 +224,9 @@ typedef struct FdwRoutine
 	EstimateDSMForeignScan_function EstimateDSMForeignScan;
 	InitializeDSMForeignScan_function InitializeDSMForeignScan;
 	InitializeWorkerForeignScan_function InitializeWorkerForeignScan;
+
+	/* Support functions for asynchronous processing */
+	Ready_function Ready;
 } FdwRoutine;
 
 
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 0113e5c..7d2881a 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1151,6 +1151,11 @@ typedef struct AppendState
 	PlanState **appendplans;	/* array of PlanStates for my inputs */
 	int			as_nplans;
 	int			as_whichplan;
+
+	PlanState **asyncplans;
+	int		   *asyncfds;
+	int			nasyncplans;
+	int			lastreadyplan;
 } AppendState;
 
 /* ----------------
diff --git a/src/include/utils/asynchrony.h b/src/include/utils/asynchrony.h
new file mode 100644
index 0000000..c3165e9
--- /dev/null
+++ b/src/include/utils/asynchrony.h
@@ -0,0 +1,36 @@
+/*-------------------------------------------------------------------------
+ *
+ * asynchrony.h
+ *		  Asynchrony-related types and interfaces.
+ *
+ * Portions Copyright (c) 2016, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ *		  src/include/utils/asynchrony.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef ASYNCHRONY_H
+#define ASYNCHRONY_H
+
+/*
+ * Special values used by the FDW interface and the executor, for dealing with
+ * asynchronous tuple iteration.
+ */
+
+/*
+ * Asynchronous processing is not currently available (because an asynchronous
+ * request is already in progress).
+ */
+#define ASYNC_READY_BUSY -3
+
+/* There are no more tuples. */
+#define ASYNC_READY_EOF -2
+
+/* This FDW or executor node does not support asynchronous processing. */
+#define ASYNC_READY_UNSUPPORTED -1
+
+/* More tuples are available immediately without waiting. */
+#define ASYNC_READY_MORE 0
+
+#endif
