On 30.11.2017 04:59, Michael Paquier wrote:
On Wed, Sep 13, 2017 at 2:11 AM, Konstantin Knizhnik
<k.knizh...@postgrespro.ru> wrote:
One more patch passing all regression tests with autoprepare_threshold=1.
I still do not think that it should be switch on by default...
This patch does not apply, and did not get any reviews. So I am moving
it to next CF with waiting on author as status. Please provide a
rebased version. Tsunakawa-san, you are listed as a reviewer of this
patch. If you are not planning to look at it anymore, you may want to
remove your name from the related CF entry
https://commitfest.postgresql.org/16/1150/.
Updated version of the patch is attached.
--
Konstantin Knizhnik
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index c2a93b2..0e6cc89 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -3688,6 +3688,454 @@ raw_expression_tree_walker(Node *node,
}
/*
+ * raw_expression_tree_mutator --- transform raw parse tree.
+ *
+ * This function is implementing slightly different approach for tree update than expression_tree_mutator().
+ * Callback is given pointer to pointer to the current node and can update this field instead of returning reference to new node.
+ * It makes it possible to remember changes and easily revert them without extra traversal of the tree.
+ *
+ * This function do not need QTW_DONT_COPY_QUERY flag: it never implicitly copy tree nodes, doing in-place update.
+ *
+ * Like raw_expression_tree_walker, there is no special rule about query
+ * boundaries: we descend to everything that's possibly interesting.
+ *
+ * Currently, the node type coverage here extends only to DML statements
+ * (SELECT/INSERT/UPDATE/DELETE) and nodes that can appear in them, because
+ * this is used mainly during analysis of CTEs, and only DML statements can
+ * appear in CTEs. If some other node is visited, iteration is immediately stopped and true is returned.
+ */
+bool
+raw_expression_tree_mutator(Node *node,
+ bool (*mutator) (),
+ void *context)
+{
+ ListCell *temp;
+
+ /*
+ * The walker has already visited the current node, and so we need only
+ * recurse into any sub-nodes it has.
+ */
+ if (node == NULL)
+ return false;
+
+ /* Guard against stack overflow due to overly complex expressions */
+ check_stack_depth();
+
+ switch (nodeTag(node))
+ {
+ case T_SetToDefault:
+ case T_CurrentOfExpr:
+ case T_Integer:
+ case T_Float:
+ case T_String:
+ case T_BitString:
+ case T_Null:
+ case T_ParamRef:
+ case T_A_Const:
+ case T_A_Star:
+ /* primitive node types with no subnodes */
+ break;
+ case T_Alias:
+ /* we assume the colnames list isn't interesting */
+ break;
+ case T_RangeVar:
+ return mutator(&((RangeVar *) node)->alias, context);
+ case T_GroupingFunc:
+ return mutator(&((GroupingFunc *) node)->args, context);
+ case T_SubLink:
+ {
+ SubLink *sublink = (SubLink *) node;
+
+ if (mutator(&sublink->testexpr, context))
+ return true;
+ /* we assume the operName is not interesting */
+ if (mutator(&sublink->subselect, context))
+ return true;
+ }
+ break;
+ case T_CaseExpr:
+ {
+ CaseExpr *caseexpr = (CaseExpr *) node;
+
+ if (mutator(&caseexpr->arg, context))
+ return true;
+ /* we assume mutator(& doesn't care about CaseWhens, either */
+ foreach(temp, caseexpr->args)
+ {
+ CaseWhen *when = (CaseWhen *) lfirst(temp);
+
+ Assert(IsA(when, CaseWhen));
+ if (mutator(&when->expr, context))
+ return true;
+ if (mutator(&when->result, context))
+ return true;
+ }
+ if (mutator(&caseexpr->defresult, context))
+ return true;
+ }
+ break;
+ case T_RowExpr:
+ /* Assume colnames isn't interesting */
+ return mutator(&((RowExpr *) node)->args, context);
+ case T_CoalesceExpr:
+ return mutator(&((CoalesceExpr *) node)->args, context);
+ case T_MinMaxExpr:
+ return mutator(&((MinMaxExpr *) node)->args, context);
+ case T_XmlExpr:
+ {
+ XmlExpr *xexpr = (XmlExpr *) node;
+
+ if (mutator(&xexpr->named_args, context))
+ return true;
+ /* we assume mutator(& doesn't care about arg_names */
+ if (mutator(&xexpr->args, context))
+ return true;
+ }
+ break;
+ case T_NullTest:
+ return mutator(&((NullTest *) node)->arg, context);
+ case T_BooleanTest:
+ return mutator(&((BooleanTest *) node)->arg, context);
+ case T_JoinExpr:
+ {
+ JoinExpr *join = (JoinExpr *) node;
+
+ if (mutator(&join->larg, context))
+ return true;
+ if (mutator(&join->rarg, context))
+ return true;
+ if (mutator(&join->quals, context))
+ return true;
+ if (mutator(&join->alias, context))
+ return true;
+ /* using list is deemed uninteresting */
+ }
+ break;
+ case T_IntoClause:
+ {
+ IntoClause *into = (IntoClause *) node;
+
+ if (mutator(&into->rel, context))
+ return true;
+ /* colNames, options are deemed uninteresting */
+ /* viewQuery should be null in raw parsetree, but check it */
+ if (mutator(&into->viewQuery, context))
+ return true;
+ }
+ break;
+ case T_List:
+ foreach(temp, (List *) node)
+ {
+ if (mutator(&lfirst(temp), context))
+ return true;
+ }
+ break;
+ case T_InsertStmt:
+ {
+ InsertStmt *stmt = (InsertStmt *) node;
+
+ if (mutator(&stmt->relation, context))
+ return true;
+ if (mutator(&stmt->cols, context))
+ return true;
+ if (mutator(&stmt->selectStmt, context))
+ return true;
+ if (mutator(&stmt->onConflictClause, context))
+ return true;
+ if (mutator(&stmt->returningList, context))
+ return true;
+ if (mutator(&stmt->withClause, context))
+ return true;
+ }
+ break;
+ case T_DeleteStmt:
+ {
+ DeleteStmt *stmt = (DeleteStmt *) node;
+
+ if (mutator(&stmt->relation, context))
+ return true;
+ if (mutator(&stmt->usingClause, context))
+ return true;
+ if (mutator(&stmt->whereClause, context))
+ return true;
+ if (mutator(&stmt->returningList, context))
+ return true;
+ if (mutator(&stmt->withClause, context))
+ return true;
+ }
+ break;
+ case T_UpdateStmt:
+ {
+ UpdateStmt *stmt = (UpdateStmt *) node;
+
+ if (mutator(&stmt->relation, context))
+ return true;
+ if (mutator(&stmt->targetList, context))
+ return true;
+ if (mutator(&stmt->whereClause, context))
+ return true;
+ if (mutator(&stmt->fromClause, context))
+ return true;
+ if (mutator(&stmt->returningList, context))
+ return true;
+ if (mutator(&stmt->withClause, context))
+ return true;
+ }
+ break;
+ case T_SelectStmt:
+ {
+ SelectStmt *stmt = (SelectStmt *) node;
+
+ if (mutator(&stmt->distinctClause, context))
+ return true;
+ if (mutator(&stmt->intoClause, context))
+ return true;
+ if (mutator(&stmt->targetList, context))
+ return true;
+ if (mutator(&stmt->fromClause, context))
+ return true;
+ if (mutator(&stmt->whereClause, context))
+ return true;
+ if (mutator(&stmt->groupClause, context))
+ return true;
+ if (mutator(&stmt->havingClause, context))
+ return true;
+ if (mutator(&stmt->windowClause, context))
+ return true;
+ if (mutator(&stmt->valuesLists, context))
+ return true;
+ if (mutator(&stmt->sortClause, context))
+ return true;
+ if (mutator(&stmt->limitOffset, context))
+ return true;
+ if (mutator(&stmt->limitCount, context))
+ return true;
+ if (mutator(&stmt->lockingClause, context))
+ return true;
+ if (mutator(&stmt->withClause, context))
+ return true;
+ if (mutator(&stmt->larg, context))
+ return true;
+ if (mutator(&stmt->rarg, context))
+ return true;
+ }
+ break;
+ case T_A_Expr:
+ {
+ A_Expr *expr = (A_Expr *) node;
+
+ if (mutator(&expr->lexpr, context))
+ return true;
+ if (mutator(&expr->rexpr, context))
+ return true;
+ /* operator name is deemed uninteresting */
+ }
+ break;
+ case T_BoolExpr:
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ if (mutator(&expr->args, context))
+ return true;
+ }
+ break;
+ case T_ColumnRef:
+ /* we assume the fields contain nothing interesting */
+ break;
+ case T_FuncCall:
+ {
+ FuncCall *fcall = (FuncCall *) node;
+
+ if (mutator(&fcall->args, context))
+ return true;
+ if (mutator(&fcall->agg_order, context))
+ return true;
+ if (mutator(&fcall->agg_filter, context))
+ return true;
+ if (mutator(&fcall->over, context))
+ return true;
+ /* function name is deemed uninteresting */
+ }
+ break;
+ case T_NamedArgExpr:
+ return mutator(&((NamedArgExpr *) node)->arg, context);
+ case T_A_Indices:
+ {
+ A_Indices *indices = (A_Indices *) node;
+
+ if (mutator(&indices->lidx, context))
+ return true;
+ if (mutator(&indices->uidx, context))
+ return true;
+ }
+ break;
+ case T_A_Indirection:
+ {
+ A_Indirection *indir = (A_Indirection *) node;
+
+ if (mutator(&indir->arg, context))
+ return true;
+ if (mutator(&indir->indirection, context))
+ return true;
+ }
+ break;
+ case T_A_ArrayExpr:
+ return mutator(&((A_ArrayExpr *) node)->elements, context);
+ case T_ResTarget:
+ {
+ ResTarget *rt = (ResTarget *) node;
+
+ if (mutator(&rt->indirection, context))
+ return true;
+ if (mutator(&rt->val, context))
+ return true;
+ }
+ break;
+ case T_MultiAssignRef:
+ return mutator(&((MultiAssignRef *) node)->source, context);
+ case T_TypeCast:
+ {
+ TypeCast *tc = (TypeCast *) node;
+
+ if (mutator(&tc->arg, context))
+ return true;
+ if (mutator(&tc->typeName, context))
+ return true;
+ }
+ break;
+ case T_CollateClause:
+ return mutator(&((CollateClause *) node)->arg, context);
+ case T_SortBy:
+ return mutator(&((SortBy *) node)->node, context);
+ case T_WindowDef:
+ {
+ WindowDef *wd = (WindowDef *) node;
+
+ if (mutator(&wd->partitionClause, context))
+ return true;
+ if (mutator(&wd->orderClause, context))
+ return true;
+ if (mutator(&wd->startOffset, context))
+ return true;
+ if (mutator(&wd->endOffset, context))
+ return true;
+ }
+ break;
+ case T_RangeSubselect:
+ {
+ RangeSubselect *rs = (RangeSubselect *) node;
+
+ if (mutator(&rs->subquery, context))
+ return true;
+ if (mutator(&rs->alias, context))
+ return true;
+ }
+ break;
+ case T_RangeFunction:
+ {
+ RangeFunction *rf = (RangeFunction *) node;
+
+ if (mutator(&rf->functions, context))
+ return true;
+ if (mutator(&rf->alias, context))
+ return true;
+ if (mutator(&rf->coldeflist, context))
+ return true;
+ }
+ break;
+ case T_RangeTableSample:
+ {
+ RangeTableSample *rts = (RangeTableSample *) node;
+
+ if (mutator(&rts->relation, context))
+ return true;
+ /* method name is deemed uninteresting */
+ if (mutator(&rts->args, context))
+ return true;
+ if (mutator(&rts->repeatable, context))
+ return true;
+ }
+ break;
+ case T_TypeName:
+ {
+ TypeName *tn = (TypeName *) node;
+
+ if (mutator(&tn->typmods, context))
+ return true;
+ if (mutator(&tn->arrayBounds, context))
+ return true;
+ /* type name itself is deemed uninteresting */
+ }
+ break;
+ case T_ColumnDef:
+ {
+ ColumnDef *coldef = (ColumnDef *) node;
+
+ if (mutator(&coldef->typeName, context))
+ return true;
+ if (mutator(&coldef->raw_default, context))
+ return true;
+ if (mutator(&coldef->collClause, context))
+ return true;
+ /* for now, constraints are ignored */
+ }
+ break;
+ case T_IndexElem:
+ {
+ IndexElem *indelem = (IndexElem *) node;
+
+ if (mutator(&indelem->expr, context))
+ return true;
+ /* collation and opclass names are deemed uninteresting */
+ }
+ break;
+ case T_GroupingSet:
+ return mutator(&((GroupingSet *) node)->content, context);
+ case T_LockingClause:
+ return mutator(&((LockingClause *) node)->lockedRels, context);
+ case T_XmlSerialize:
+ {
+ XmlSerialize *xs = (XmlSerialize *) node;
+
+ if (mutator(&xs->expr, context))
+ return true;
+ if (mutator(&xs->typeName, context))
+ return true;
+ }
+ break;
+ case T_WithClause:
+ return mutator(&((WithClause *) node)->ctes, context);
+ case T_InferClause:
+ {
+ InferClause *stmt = (InferClause *) node;
+
+ if (mutator(&stmt->indexElems, context))
+ return true;
+ if (mutator(&stmt->whereClause, context))
+ return true;
+ }
+ break;
+ case T_OnConflictClause:
+ {
+ OnConflictClause *stmt = (OnConflictClause *) node;
+
+ if (mutator(&stmt->infer, context))
+ return true;
+ if (mutator(&stmt->targetList, context))
+ return true;
+ if (mutator(&stmt->whereClause, context))
+ return true;
+ }
+ break;
+ case T_CommonTableExpr:
+ return mutator(&((CommonTableExpr *) node)->ctequery, context);
+ default:
+ return true;
+ }
+ return false;
+}
+
+/*
* planstate_tree_walker --- walk plan state trees
*
* The walker has already visited the current node, and so we need only
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 1ae9ac2..7fa8310 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -42,11 +42,13 @@
#include "catalog/pg_type.h"
#include "commands/async.h"
#include "commands/prepare.h"
+#include "commands/defrem.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
#include "libpq/pqsignal.h"
#include "miscadmin.h"
#include "nodes/print.h"
+#include "nodes/nodeFuncs.h"
#include "optimizer/planner.h"
#include "pgstat.h"
#include "pg_trace.h"
@@ -75,6 +77,7 @@
#include "utils/snapmgr.h"
#include "utils/timeout.h"
#include "utils/timestamp.h"
+#include "utils/int8.h"
#include "mb/pg_wchar.h"
@@ -191,10 +194,11 @@ static bool IsTransactionExitStmtList(List *pstmts);
static bool IsTransactionStmtList(List *pstmts);
static void drop_unnamed_stmt(void);
static void log_disconnections(int code, Datum arg);
+static bool exec_cached_query(const char* query, List *parsetree_list);
+static void exec_prepared_plan(Portal portal, const char *portal_name, long max_rows, CommandDest dest);
static void enable_statement_timeout(void);
static void disable_statement_timeout(void);
-
/* ----------------------------------------------------------------
* routines to obtain user input
* ----------------------------------------------------------------
@@ -967,6 +971,16 @@ exec_simple_query(const char *query_string)
use_implicit_block = (list_length(parsetree_list) > 1);
/*
+ * Try to find cached plan.
+ */
+ if (autoprepare_threshold != 0
+ && list_length(parsetree_list) == 1 /* we can prepare only single statement commands */
+ && exec_cached_query(query_string, parsetree_list))
+ {
+ return;
+ }
+
+ /*
* Run through the raw parsetree(s) and process each one.
*/
foreach(parsetree_item, parsetree_list)
@@ -1865,9 +1879,28 @@ exec_bind_message(StringInfo input_message)
static void
exec_execute_message(const char *portal_name, long max_rows)
{
- CommandDest dest;
+ Portal portal = GetPortalByName(portal_name);
+ CommandDest dest = whereToSendOutput;
+
+ /* Adjust destination to tell printtup.c what to do */
+ if (dest == DestRemote)
+ dest = DestRemoteExecute;
+
+ if (!PortalIsValid(portal))
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_CURSOR),
+ errmsg("portal \"%s\" does not exist", portal_name)));
+
+ exec_prepared_plan(portal, portal_name, max_rows, dest);
+}
+
+/*
+ * Execute prepared plan.
+ */
+static void
+exec_prepared_plan(Portal portal, const char *portal_name, long max_rows, CommandDest dest)
+{
DestReceiver *receiver;
- Portal portal;
bool completed;
char completionTag[COMPLETION_TAG_BUFSIZE];
const char *sourceText;
@@ -1879,17 +1912,6 @@ exec_execute_message(const char *portal_name, long max_rows)
bool was_logged = false;
char msec_str[32];
- /* Adjust destination to tell printtup.c what to do */
- dest = whereToSendOutput;
- if (dest == DestRemote)
- dest = DestRemoteExecute;
-
- portal = GetPortalByName(portal_name);
- if (!PortalIsValid(portal))
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("portal \"%s\" does not exist", portal_name)));
-
/*
* If the original query was a null string, just return
* EmptyQueryResponse.
@@ -1952,7 +1974,7 @@ exec_execute_message(const char *portal_name, long max_rows)
* context, because that may get deleted if portal contains VACUUM).
*/
receiver = CreateDestReceiver(dest);
- if (dest == DestRemoteExecute)
+ if (dest == DestRemoteExecute || dest == DestRemote)
SetRemoteDestReceiverParams(receiver, portal);
/*
@@ -4572,6 +4594,786 @@ log_disconnections(int code, Datum arg)
}
/*
+ * Autoprepare implementation.
+ * Autoprepare consists of raw parse tree mutator, hash table of cached plans and exec_cached_query function
+ * which combines exec_parse_message + exec_bind_message + exec_execute_message
+ */
+
+/*
+ * Mapping between parameters and replaced literals
+ */
+typedef struct ParamBinding
+{
+ A_Const* literal; /* Original literal */
+ ParamRef* paramref;/* Constructed parameter reference */
+ Param* param; /* Constructed parameter */
+ Node** ref; /* Pointer to pointer to literal node (used to revert raw parse tree update) */
+ Oid raw_type;/* Parameter raw type */
+ Oid type; /* Parameter type after analysis */
+ struct ParamBinding* next; /* L1-list of query parameter bindings */
+} ParamBinding;
+
+/*
+ * Plan cache entry
+ */
+typedef struct
+{
+ Node* parse_tree; /* tree is used as hash key */
+ dlist_node lru; /* double linked list to implement LRU */
+ int64 exec_count; /* counter of execution of this query */
+ CachedPlanSource* plan;
+ uint32 hash; /* hash calculated for this parsed tree */
+ Oid* param_types;/* types of parameters */
+ int n_params; /* number of parameters extracted for this query */
+ int16 format; /* portal output format */
+ bool disable_autoprepare; /* disable preparing of this query */
+} plan_cache_entry;
+
+static uint32 plan_cache_hash_fn(const void *key, Size keysize)
+{
+ return ((plan_cache_entry*)key)->hash;
+}
+
+static int plan_cache_match_fn(const void *key1, const void *key2, Size keysize)
+{
+ plan_cache_entry* e1 = (plan_cache_entry*)key1;
+ plan_cache_entry* e2 = (plan_cache_entry*)key2;
+
+ return equal(e1->parse_tree, e2->parse_tree)
+ && memcmp(e1->param_types, e2->param_types, sizeof(Oid)*e1->n_params) == 0 ? 0 : 1;
+}
+
+static void* plan_cache_keycopy_fn(void *dest, const void *src, Size keysize)
+{
+ plan_cache_entry* dst_entry = (plan_cache_entry*)dest;
+ plan_cache_entry* src_entry = (plan_cache_entry*)src;
+ dst_entry->parse_tree = copyObject(src_entry->parse_tree);
+ dst_entry->param_types = palloc(src_entry->n_params*sizeof(Oid));
+ dst_entry->n_params = src_entry->n_params;
+ memcpy(dst_entry->param_types, src_entry->param_types, src_entry->n_params*sizeof(Oid));
+ dst_entry->hash = src_entry->hash;
+ return dest;
+}
+
+#define PLAN_CACHE_SIZE 113
+
+/*
+ * Plan cache access statistic
+ */
+size_t autoprepare_hits;
+size_t autoprepare_misses;
+size_t autoprepare_cached_plans;
+
+/*
+ * Context for raw_expression_tree_mutator
+ */
+typedef struct {
+ int n_params; /* Number of extracted parameters */
+ uint32 hash; /* We calculate hash for parse tree during plan traversal */
+ ParamBinding** param_list_tail; /* pointer to last element "next" field address, used to construct L1 list of parameters */
+} GeneralizerCtx;
+
+
+static HTAB* plan_cache_hash; /* hash table for plan cache */
+static dlist_head plan_cache_lru; /* LRU L2-list for cached queries */
+static MemoryContext plan_cache_context; /* memory context used for plan cache */
+
+/*
+ * Check if expression is constant (used to eliminate substitution of literals with parameters in such expressions
+ */
+static bool is_constant_expression(Node* node)
+{
+ return node != NULL
+ && (IsA(node, A_Const)
+ || (IsA(node, A_Expr)
+ && is_constant_expression(((A_Expr*)node)->lexpr)
+ && is_constant_expression(((A_Expr*)node)->rexpr)));
+}
+
+/*
+ * Infer type of literal expression. Null literals should not be replaced with parameters.
+ */
+static Oid get_literal_type(Value* val)
+{
+ int64 val64;
+ switch (val->type)
+ {
+ case T_Integer:
+ return INT4OID;
+ case T_Float:
+ /* could be an oversize integer as well as a float ... */
+ if (scanint8(strVal(val), true, &val64))
+ {
+ /*
+ * It might actually fit in int32. Probably only INT_MIN can
+ * occur, but we'll code the test generally just to be sure.
+ */
+ int32 val32 = (int32) val64;
+ return (val64 == (int64)val32) ? INT4OID : INT8OID;
+ }
+ else
+ {
+ return NUMERICOID;
+ }
+ case T_BitString:
+ return BITOID;
+ case T_String:
+ return UNKNOWNOID;
+ default:
+ Assert(false);
+ return InvalidOid;
+ }
+}
+
+static Datum get_param_value(Oid type, Value* val)
+{
+ if (val->type == T_Integer && type == INT4OID)
+ {
+ /*
+ * Integer constant
+ */
+ return Int32GetDatum((int32)val->val.ival);
+ }
+ else
+ {
+ /*
+ * Convert from string literal
+ */
+ Oid typinput;
+ Oid typioparam;
+
+ getTypeInputInfo(type, &typinput, &typioparam);
+ return OidInputFunctionCall(typinput, val->val.str, typioparam, -1);
+ }
+}
+
+
+/*
+ * Callback for raw_expression_tree_mutator performing substitution of literals with parameters
+ */
+static bool
+raw_parse_tree_generalizer(Node** ref, void *context)
+{
+ Node* node = *ref;
+ GeneralizerCtx* ctx = (GeneralizerCtx*)context;
+ if (node == NULL)
+ {
+ return false;
+ }
+ /*
+ * Calculate hash for parse tree. We consider only node tags here, precise comparison of trees is done using equal() function.
+ * Here we calculate hash for original (unpatched) tree, without ParamRef nodes.
+ * It is non principle, because hash calculation doesn't take in account types and values of Const nodes. So the same generalized queries
+ * will have the same hash value. There are about 1000 different nodes tags, this is why we rotate hash on 10 bits.
+ */
+ ctx->hash = (ctx->hash << 10) ^ (ctx->hash >> 22) ^ nodeTag(node);
+
+ switch (nodeTag(node))
+ {
+ case T_A_Expr:
+ {
+ /*
+ * Do not perform substitution of literals in constant expression (which is likely to be the same for all queries and optimized by compiler)
+ */
+ if (!is_constant_expression(node))
+ {
+ A_Expr *expr = (A_Expr *) node;
+ if (raw_parse_tree_generalizer((Node**)&expr->lexpr, context))
+ return true;
+ if (raw_parse_tree_generalizer((Node**)&expr->rexpr, context))
+ return true;
+ }
+ break;
+ }
+ case T_A_Const:
+ {
+ /*
+ * Do substitution of literals with parameters here
+ */
+ A_Const* literal = (A_Const*)node;
+ if (literal->val.type != T_Null)
+ {
+ /*
+ * Do not substitute null literals with parameters
+ */
+ ParamBinding* cp = palloc0(sizeof(ParamBinding));
+ ParamRef* param = makeNode(ParamRef);
+ param->number = ++ctx->n_params;
+ param->location = literal->location;
+ cp->ref = ref;
+ cp->paramref = param;
+ cp->literal = literal;
+ cp->raw_type = get_literal_type(&literal->val);
+ *ctx->param_list_tail = cp;
+ ctx->param_list_tail = &cp->next;
+ *ref = (Node*)param;
+ }
+ break;
+ }
+ case T_SelectStmt:
+ {
+ /*
+ * Substitute literals only in target list, WHERE, VALUES and WITH clause,
+ * skipping target and from lists, which is unlikely contains some parameterized values
+ */
+ SelectStmt *stmt = (SelectStmt *) node;
+ if (stmt->intoClause)
+ return true; /* Utility statement can not be prepared */
+ if (raw_parse_tree_generalizer((Node**)&stmt->targetList, context))
+ return true;
+ if (raw_parse_tree_generalizer((Node**)&stmt->whereClause, context))
+ return true;
+ if (raw_parse_tree_generalizer((Node**)&stmt->valuesLists, context))
+ return true;
+ if (raw_parse_tree_generalizer((Node**)&stmt->withClause, context))
+ return true;
+ if (raw_parse_tree_generalizer((Node**)&stmt->larg, context))
+ return true;
+ if (raw_parse_tree_generalizer((Node**)&stmt->rarg, context))
+ return true;
+ break;
+ }
+ case T_TypeName:
+ case T_SortGroupClause:
+ case T_SortBy:
+ case T_A_ArrayExpr:
+ case T_TypeCast:
+ /*
+ * Literals in this clauses should not be replaced with parameters
+ */
+ break;
+ default:
+ /*
+ * Default traversal. raw_expression_tree_mutator returns true for all not recognized nodes, for example right now
+ * all transaction control statements are not covered by raw_expression_tree_mutator and so will not auto prepared.
+ * My experiments show that effect of non-preparing start/commit transaction statements is positive.
+ */
+ return raw_expression_tree_mutator(node, raw_parse_tree_generalizer, context);
+ }
+ return false;
+}
+
+static Node*
+parse_tree_generalizer(Node *node, void *context)
+{
+ ParamBinding* binding;
+ ParamBinding* binding_list = (ParamBinding*)context;
+ if (node == NULL)
+ {
+ return NULL;
+ }
+ if (IsA(node, Query))
+ {
+ return (Node*)query_tree_mutator((Query*)node,
+ parse_tree_generalizer,
+ context,
+ QTW_DONT_COPY_QUERY);
+ }
+ if (IsA(node, Const))
+ {
+ Const* c = (Const*)node;
+ int paramno = 1;
+ for (binding = binding_list; binding != NULL && binding->literal->location != c->location; binding = binding->next, paramno++);
+ if (binding != NULL)
+ {
+ if (binding->param != NULL)
+ {
+ /* Parameter can be used only once */
+ binding->type = UNKNOWNOID;
+ //return (Node*)binding->param;
+ }
+ else
+ {
+ Param* param = makeNode(Param);
+ param->paramkind = PARAM_EXTERN;
+ param->paramid = paramno;
+ param->paramtype = c->consttype;
+ param->paramtypmod = c->consttypmod;
+ param->paramcollid = c->constcollid;
+ param->location = c->location;
+ binding->type = c->consttype;
+ binding->param = param;
+ return (Node*)param;
+ }
+ }
+ return node;
+ }
+ return expression_tree_mutator(node, parse_tree_generalizer, context);
+}
+
+/*
+ * Restore original parse tree, replacing all ParamRef back with Const nodes.
+ * Such undo operation seems to be more efficient than copying the whole parse tree by raw_expression_tree_mutator
+ */
+static void undo_query_plan_changes(ParamBinding* cp)
+{
+ while (cp != NULL) {
+ *cp->ref = (Node*)cp->literal;
+ cp = cp->next;
+ }
+}
+
+/*
+ * Callback for raw_expression_tree_walker dropping parse tree
+ */
+static bool drop_tree_node(Node* node, void* context)
+{
+ if (node) {
+ raw_expression_tree_walker(node, drop_tree_node, NULL);
+ pfree(node);
+ }
+ return false;
+}
+
+/*
+ * Location of converted literal in query.
+ * Used for precise error reporting (line number)
+ */
+static int param_location;
+
+/*
+ * Error callback adding information about error location
+ */
+static void
+prepare_error_callback(void *arg)
+{
+ CachedPlanSource *psrc = (CachedPlanSource*)arg;
+ /* And pass it to the ereport mechanism */
+ if (geterrcode() != ERRCODE_QUERY_CANCELED) {
+ int pos = pg_mbstrlen_with_len(psrc->query_string, param_location) + 1;
+ (void)errposition(pos);
+ }
+}
+/*
+ * Try to generalize query, find cached plan for it and execute
+ */
+static bool exec_cached_query(const char *query_string, List *parsetree_list)
+{
+ int n_params;
+ plan_cache_entry *entry;
+ bool found;
+ MemoryContext old_context;
+ CachedPlanSource *psrc;
+ ParamListInfo params;
+ int paramno;
+ CachedPlan *cplan;
+ Portal portal;
+ bool snapshot_set = false;
+ GeneralizerCtx ctx;
+ ParamBinding* binding;
+ ParamBinding* binding_list;
+ plan_cache_entry pattern;
+ Oid* param_types;
+ RawStmt *raw_parse_tree;
+
+ raw_parse_tree = castNode(RawStmt, linitial(parsetree_list));
+
+ /*
+ * Substitute literals with parameters and calculate hash for raw parse tree
+ */
+ ctx.param_list_tail = &binding_list;
+ ctx.n_params = 0;
+ ctx.hash = 0;
+ if (raw_parse_tree_generalizer(&raw_parse_tree->stmt, &ctx))
+ {
+ *ctx.param_list_tail = NULL;
+ undo_query_plan_changes(binding_list);
+ autoprepare_misses += 1;
+ return false;
+ }
+ *ctx.param_list_tail = NULL;
+ n_params = ctx.n_params;
+
+ /*
+ * Extract array of parameters types: it is needed for cached plan lookup
+ */
+ param_types = (Oid*)palloc(sizeof(Oid)*n_params);
+ for (paramno = 0, binding = binding_list; paramno < n_params; paramno++, binding = binding->next)
+ {
+ param_types[paramno] = binding->raw_type;
+ }
+
+ /*
+ * Construct plan cache context if not constructed yet.
+ */
+ if (plan_cache_context == NULL)
+ {
+ plan_cache_context = AllocSetContextCreate(TopMemoryContext,
+ "plan cache context",
+ ALLOCSET_DEFAULT_SIZES);
+ }
+ /* Manipulations with hash table are performed in plan_cache_context memory context */
+ old_context = MemoryContextSwitchTo(plan_cache_context);
+
+ /*
+ * Initialize hash table if not initialized yet
+ */
+ if (plan_cache_hash == NULL)
+ {
+ static HASHCTL info;
+ info.keysize = sizeof(plan_cache_entry);
+ info.entrysize = sizeof(plan_cache_entry);
+ info.hash = plan_cache_hash_fn;
+ info.match = plan_cache_match_fn;
+ info.keycopy = plan_cache_keycopy_fn;
+ plan_cache_hash = hash_create("plan_cache", autoprepare_limit != 0 ? autoprepare_limit : PLAN_CACHE_SIZE,
+ &info, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_KEYCOPY);
+ dlist_init(&plan_cache_lru);
+ }
+
+ /*
+ * Lookup generalized query
+ */
+ pattern.parse_tree = raw_parse_tree->stmt;
+ pattern.hash = ctx.hash;
+ pattern.n_params = n_params;
+ pattern.param_types = param_types;
+ entry = (plan_cache_entry*)hash_search(plan_cache_hash, &pattern, HASH_ENTER, &found);
+ if (!found)
+ {
+ /* Check number of cached queries */
+ if (++autoprepare_cached_plans > autoprepare_limit && autoprepare_limit != 0)
+ {
+ /* Drop least recently accessed query */
+ plan_cache_entry* victim = dlist_container(plan_cache_entry, lru, plan_cache_lru.head.prev);
+ Node* dropped_tree = victim->parse_tree;
+ dlist_delete(&victim->lru);
+ if (victim->plan)
+ {
+ DropCachedPlan(victim->plan);
+ }
+ pfree(victim->param_types);
+ hash_search(plan_cache_hash, victim, HASH_REMOVE, NULL);
+ raw_expression_tree_walker(dropped_tree, drop_tree_node, NULL);
+ autoprepare_cached_plans -= 1;
+ }
+ entry->exec_count = 0;
+ entry->plan = NULL;
+ entry->disable_autoprepare = false;
+ }
+ else
+ {
+ dlist_delete(&entry->lru); /* accessed entry will be moved to the head of LRU list */
+ if (entry->plan != NULL && !entry->plan->is_valid)
+ {
+ /* Drop invalidated plan: it will be reconstructed later */
+ DropCachedPlan(entry->plan);
+ entry->plan = NULL;
+ }
+ }
+ dlist_insert_after(&plan_cache_lru.head, &entry->lru); /* prepend entry to the head of LRU list */
+ MemoryContextSwitchTo(old_context); /* Done with plan_cache_context memory context */
+
+
+ /*
+ * Prepare query only when it is executed more than autoprepare_threshold times
+ */
+ if (entry->disable_autoprepare || entry->exec_count++ < autoprepare_threshold)
+ {
+ undo_query_plan_changes(binding_list);
+ autoprepare_misses += 1;
+ return false;
+ }
+
+ if (entry->plan == NULL)
+ {
+ bool snapshot_set = false;
+ const char *commandTag;
+ List *querytree_list;
+
+ /*
+ * Switch to appropriate context for preparing plan.
+ */
+ old_context = MemoryContextSwitchTo(MessageContext);
+
+ /*
+ * Get the command name for use in status display (it also becomes the
+ * default completion tag, down inside PortalRun). Set ps_status and
+ * do any special start-of-SQL-command processing needed by the
+ * destination.
+ */
+ commandTag = CreateCommandTag(raw_parse_tree->stmt);
+
+ /*
+ * If we are in an aborted transaction, reject all commands except
+ * COMMIT/ABORT. It is important that this test occur before we try
+ * to do parse analysis, rewrite, or planning, since all those phases
+ * try to do database accesses, which may fail in abort state. (It
+ * might be safe to allow some additional utility commands in this
+ * state, but not many...)
+ */
+ if (IsAbortedTransactionBlockState() &&
+ !IsTransactionExitStmt(raw_parse_tree->stmt))
+ ereport(ERROR,
+ (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
+ errmsg("current transaction is aborted, "
+ "commands ignored until end of transaction block"),
+ errdetail_abort()));
+
+ /*
+ * Create the CachedPlanSource before we do parse analysis, since it
+ * needs to see the unmodified raw parse tree.
+ */
+ psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
+
+ /*
+ * Revert raw plan to use literals
+ */
+ undo_query_plan_changes(binding_list);
+
+ /*
+ * Set up a snapshot if parse analysis/planning will need one.
+ */
+ if (analyze_requires_snapshot(raw_parse_tree))
+ {
+ PushActiveSnapshot(GetTransactionSnapshot());
+ snapshot_set = true;
+ }
+
+ querytree_list = pg_analyze_and_rewrite(raw_parse_tree, query_string,
+ NULL, 0, NULL);
+ /*
+ * Replace Const with Param nodes
+ */
+ (void)query_tree_mutator((Query*)linitial(querytree_list),
+ parse_tree_generalizer,
+ binding_list,
+ QTW_DONT_COPY_QUERY);
+
+ /* Done with the snapshot used for parsing/planning */
+ if (snapshot_set)
+ PopActiveSnapshot();
+
+ param_types = (Oid*)palloc(sizeof(Oid)*n_params);
+ psrc->param_types = param_types;
+ for (paramno = 0, binding = binding_list; paramno < n_params; paramno++, binding = binding->next)
+ {
+ if (binding->param == NULL || binding->type == UNKNOWNOID)
+ {
+ /* Failed to resolve parameter type */
+ entry->disable_autoprepare = true;
+ autoprepare_misses += 1;
+ MemoryContextSwitchTo(old_context);
+ return false;
+ }
+ param_types[paramno] = binding->type;
+ }
+
+ /* Finish filling in the CachedPlanSource */
+ CompleteCachedPlan(psrc,
+ querytree_list,
+ NULL,
+ param_types,
+ n_params,
+ NULL,
+ NULL,
+ CURSOR_OPT_PARALLEL_OK, /* allow parallel mode */
+ true); /* fixed result */
+
+ /* If we got a cancel signal during analysis, quit */
+ CHECK_FOR_INTERRUPTS();
+
+ SaveCachedPlan(psrc);
+
+ /*
+ * We do NOT close the open transaction command here; that only happens
+ * when the client sends Sync. Instead, do CommandCounterIncrement just
+ * in case something happened during parse/plan.
+ */
+ CommandCounterIncrement();
+
+ MemoryContextSwitchTo(old_context); /* Done with MessageContext memory context */
+
+ entry->plan = psrc;
+
+ /*
+ * Determine output format
+ */
+ entry->format = 0; /* TEXT is default */
+ if (IsA(raw_parse_tree->stmt, FetchStmt))
+ {
+ FetchStmt *stmt = (FetchStmt *)raw_parse_tree->stmt;
+
+ if (!stmt->ismove)
+ {
+ Portal fportal = GetPortalByName(stmt->portalname);
+
+ if (PortalIsValid(fportal) &&
+ (fportal->cursorOptions & CURSOR_OPT_BINARY))
+ entry->format = 1; /* BINARY */
+ }
+ }
+ }
+ else
+ {
+ /* Plan found */
+ psrc = entry->plan;
+ Assert(n_params == entry->n_params);
+ }
+
+ /*
+ * If we are in aborted transaction state, the only portals we can
+ * actually run are those containing COMMIT or ROLLBACK commands. We
+ * disallow binding anything else to avoid problems with infrastructure
+ * that expects to run inside a valid transaction. We also disallow
+ * binding any parameters, since we can't risk calling user-defined I/O
+ * functions.
+ */
+ if (IsAbortedTransactionBlockState() &&
+ (!IsTransactionExitStmt(psrc->raw_parse_tree->stmt) ||
+ n_params != 0))
+ ereport(ERROR,
+ (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
+ errmsg("current transaction is aborted, "
+ "commands ignored until end of transaction block"),
+ errdetail_abort()));
+
+ /*
+ * Create unnamed portal to run the query or queries in. If there
+ * already is one, silently drop it.
+ */
+ portal = CreatePortal("", true, true);
+ /* Don't display the portal in pg_cursors */
+ portal->visible = false;
+
+ /*
+ * Prepare to copy stuff into the portal's memory context. We do all this
+ * copying first, because it could possibly fail (out-of-memory) and we
+ * don't want a failure to occur between GetCachedPlan and
+ * PortalDefineQuery; that would result in leaking our plancache refcount.
+ */
+ old_context = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
+
+ /* Copy the plan's query string into the portal */
+ query_string = pstrdup(psrc->query_string);
+
+ /*
+ * Set a snapshot if we have parameters to fetch (since the input
+ * functions might need it) or the query isn't a utility command (and
+ * hence could require redoing parse analysis and planning). We keep the
+ * snapshot active till we're done, so that plancache.c doesn't have to
+ * take new ones.
+ */
+ if (n_params > 0 ||
+ (psrc->raw_parse_tree &&
+ analyze_requires_snapshot(psrc->raw_parse_tree)))
+ {
+ PushActiveSnapshot(GetTransactionSnapshot());
+ snapshot_set = true;
+ }
+
+ /*
+ * Fetch parameters, if any, and store in the portal's memory context.
+ */
+ if (n_params > 0)
+ {
+ ErrorContextCallback errcallback;
+
+ params = (ParamListInfo) palloc0(offsetof(ParamListInfoData, params) +
+ n_params * sizeof(ParamExternData));
+ params->numParams = n_params;
+
+ /*
+ * Register error callback to precisely report error in case of conversion error while storig parameter value.
+ */
+ errcallback.callback = prepare_error_callback;
+ errcallback.arg = (void *) psrc;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ for (paramno = 0, binding = binding_list;
+ paramno < n_params;
+ paramno++, binding = binding->next)
+ {
+ Oid ptype = psrc->param_types[paramno];
+
+ param_location = binding->literal->location;
+
+ params->params[paramno].isnull = false;
+ params->params[paramno].value = get_param_value(ptype, &binding->literal->val);
+ /*
+ * We mark the params as CONST. This ensures that any custom plan
+ * makes full use of the parameter values.
+ */
+ params->params[paramno].pflags = PARAM_FLAG_CONST;
+ params->params[paramno].ptype = ptype;
+ }
+ error_context_stack = errcallback.previous;
+ }
+ else
+ {
+ params = NULL;
+ }
+
+ /* Done storing stuff in portal's context */
+ MemoryContextSwitchTo(old_context);
+
+ /*
+ * Obtain a plan from the CachedPlanSource. Any cruft from (re)planning
+ * will be generated in MessageContext. The plan refcount will be
+ * assigned to the Portal, so it will be released at portal destruction.
+ */
+ cplan = GetCachedPlan(psrc, params, false, NULL);
+
+ /*
+ * Now we can define the portal.
+ *
+ * DO NOT put any code that could possibly throw an error between the
+ * above GetCachedPlan call and here.
+ */
+ PortalDefineQuery(portal,
+ NULL,
+ query_string,
+ psrc->commandTag,
+ cplan->stmt_list,
+ cplan);
+
+ /* Done with the snapshot used for parameter I/O and parsing/planning */
+ if (snapshot_set)
+ {
+ PopActiveSnapshot();
+ }
+
+ /*
+ * And we're ready to start portal execution.
+ */
+ PortalStart(portal, params, 0, InvalidSnapshot);
+
+ /*
+ * Apply the result format requests to the portal.
+ */
+ PortalSetResultFormat(portal, 1, &entry->format);
+
+ /*
+ * Finally execute prepared statement
+ */
+ exec_prepared_plan(portal, "", FETCH_ALL, whereToSendOutput);
+
+ /*
+ * Close down transaction statement, if one is open.
+ */
+ finish_xact_command();
+
+ autoprepare_hits += 1;
+
+ return true;
+}
+
+
+void ResetAutoprepareCache(void)
+{
+ if (plan_cache_hash != NULL)
+ {
+ hash_destroy(plan_cache_hash);
+ MemoryContextReset(plan_cache_context);
+ dlist_init(&plan_cache_lru);
+ autoprepare_cached_plans = 0;
+ plan_cache_hash = 0;
+ }
+}
+
+
+/*
* Start statement timeout timer, if enabled.
*
* If there's already a timeout running, don't restart the timer. That
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 4da1f8f..a64d2b1 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -688,10 +688,12 @@ standard_ProcessUtility(PlannedStmt *pstmt,
case T_AlterSystemStmt:
PreventTransactionChain(isTopLevel, "ALTER SYSTEM");
AlterSystemSetConfigFile((AlterSystemStmt *) parsetree);
+ ResetAutoprepareCache();
break;
case T_VariableSetStmt:
ExecSetVariableStmt((VariableSetStmt *) parsetree, isTopLevel);
+ ResetAutoprepareCache();
break;
case T_VariableShowStmt:
@@ -964,6 +966,8 @@ ProcessUtilitySlow(ParseState *pstate,
/* All event trigger calls are done only when isCompleteQuery is true */
needCleanup = isCompleteQuery && EventTriggerBeginCompleteQuery();
+ ResetAutoprepareCache();
+
/* PG_TRY block is to ensure we call EventTriggerEndCompleteQuery */
PG_TRY();
{
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 0e61b4b..4980da7 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -111,6 +111,7 @@
#include "utils/relmapper.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
+#include "tcop/pquery.h"
/*
@@ -644,6 +645,7 @@ InvalidateSystemCaches(void)
InvalidateCatalogSnapshot();
ResetCatalogCaches();
+ ResetAutoprepareCache();
RelationCacheInvalidate(); /* gets smgr and relmap too */
for (i = 0; i < syscache_callback_count; i++)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 6dcd738..0481c24 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -473,6 +473,10 @@ int tcp_keepalives_idle;
int tcp_keepalives_interval;
int tcp_keepalives_count;
+
+int autoprepare_threshold;
+int autoprepare_limit;
+
/*
* SSL renegotiation was been removed in PostgreSQL 9.5, but we tolerate it
* being set to zero (meaning never renegotiate) for backward compatibility.
@@ -1968,6 +1972,28 @@ static struct config_int ConfigureNamesInt[] =
check_max_stack_depth, assign_max_stack_depth, NULL
},
+ /*
+ * Threshold for implicit preparing of frequently executed queries
+ */
+ {
+ {"autoprepare_threshold", PGC_USERSET, QUERY_TUNING_OTHER,
+ gettext_noop("Threshold for autopreparing query."),
+ gettext_noop("0 value disables autoprepare.")
+ },
+ &autoprepare_threshold,
+ 0, 0, INT_MAX,
+ NULL, NULL, NULL
+ },
+ {
+ {"autoprepare_limit", PGC_USERSET, QUERY_TUNING_OTHER,
+ gettext_noop("Maximal number of autoprepared queries."),
+ gettext_noop("0 means unlimited number of autoprepared queries. Too large number of prepared queries can cause backend memory overflow and slowdown execution speed (because of increased lookup time)")
+ },
+ &autoprepare_limit,
+ 113, 0, INT_MAX,
+ NULL, NULL, NULL
+ },
+
{
{"temp_file_limit", PGC_SUSET, RESOURCES_DISK,
gettext_noop("Limits the total size of all temporary files used by each process."),
diff --git a/src/include/nodes/nodeFuncs.h b/src/include/nodes/nodeFuncs.h
index 3366983..263584d 100644
--- a/src/include/nodes/nodeFuncs.h
+++ b/src/include/nodes/nodeFuncs.h
@@ -73,6 +73,9 @@ extern Node *query_or_expression_tree_mutator(Node *node, Node *(*mutator) (),
extern bool raw_expression_tree_walker(Node *node, bool (*walker) (),
void *context);
+extern bool raw_expression_tree_mutator(Node *node, bool (*mutator) (),
+ void *context);
+
struct PlanState;
extern bool planstate_tree_walker(struct PlanState *planstate, bool (*walker) (),
void *context);
diff --git a/src/include/tcop/pquery.h b/src/include/tcop/pquery.h
index 6abfe7b..d25e744 100644
--- a/src/include/tcop/pquery.h
+++ b/src/include/tcop/pquery.h
@@ -42,4 +42,6 @@ extern uint64 PortalRunFetch(Portal portal,
long count,
DestReceiver *dest);
+extern void ResetAutoprepareCache(void);
+
#endif /* PQUERY_H */
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 41335b8..85964e0 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -253,6 +253,9 @@ extern int client_min_messages;
extern int log_min_duration_statement;
extern int log_temp_files;
+extern int autoprepare_threshold;
+extern int autoprepare_limit;
+
extern int temp_file_limit;
extern int num_temp_buffers;
diff --git a/src/test/regress/expected/date_1.out b/src/test/regress/expected/date_1.out
new file mode 100644
index 0000000..b6101c7
--- /dev/null
+++ b/src/test/regress/expected/date_1.out
@@ -0,0 +1,1477 @@
+--
+-- DATE
+--
+CREATE TABLE DATE_TBL (f1 date);
+INSERT INTO DATE_TBL VALUES ('1957-04-09');
+INSERT INTO DATE_TBL VALUES ('1957-06-13');
+INSERT INTO DATE_TBL VALUES ('1996-02-28');
+INSERT INTO DATE_TBL VALUES ('1996-02-29');
+INSERT INTO DATE_TBL VALUES ('1996-03-01');
+INSERT INTO DATE_TBL VALUES ('1996-03-02');
+INSERT INTO DATE_TBL VALUES ('1997-02-28');
+INSERT INTO DATE_TBL VALUES ('1997-02-29');
+ERROR: date/time field value out of range: "1997-02-29"
+LINE 1: INSERT INTO DATE_TBL VALUES ('1997-02-29');
+ ^
+INSERT INTO DATE_TBL VALUES ('1997-03-01');
+INSERT INTO DATE_TBL VALUES ('1997-03-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-01');
+INSERT INTO DATE_TBL VALUES ('2000-04-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-03');
+INSERT INTO DATE_TBL VALUES ('2038-04-08');
+INSERT INTO DATE_TBL VALUES ('2039-04-09');
+INSERT INTO DATE_TBL VALUES ('2040-04-10');
+SELECT f1 AS "Fifteen" FROM DATE_TBL;
+ Fifteen
+------------
+ 04-09-1957
+ 06-13-1957
+ 02-28-1996
+ 02-29-1996
+ 03-01-1996
+ 03-02-1996
+ 02-28-1997
+ 03-01-1997
+ 03-02-1997
+ 04-01-2000
+ 04-02-2000
+ 04-03-2000
+ 04-08-2038
+ 04-09-2039
+ 04-10-2040
+(15 rows)
+
+SELECT f1 AS "Nine" FROM DATE_TBL WHERE f1 < '2000-01-01';
+ Nine
+------------
+ 04-09-1957
+ 06-13-1957
+ 02-28-1996
+ 02-29-1996
+ 03-01-1996
+ 03-02-1996
+ 02-28-1997
+ 03-01-1997
+ 03-02-1997
+(9 rows)
+
+SELECT f1 AS "Three" FROM DATE_TBL
+ WHERE f1 BETWEEN '2000-01-01' AND '2001-01-01';
+ Three
+------------
+ 04-01-2000
+ 04-02-2000
+ 04-03-2000
+(3 rows)
+
+--
+-- Check all the documented input formats
+--
+SET datestyle TO iso; -- display results in ISO
+SET datestyle TO ymd;
+SELECT date 'January 8, 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-18';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '1/8/1999';
+ERROR: date/time field value out of range: "1/8/1999"
+LINE 1: SELECT date '1/8/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1/18/1999';
+ERROR: date/time field value out of range: "1/18/1999"
+LINE 1: SELECT date '1/18/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '18/1/1999';
+ERROR: date/time field value out of range: "18/1/1999"
+LINE 1: SELECT date '18/1/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01/02/03';
+ date
+------------
+ 2001-02-03
+(1 row)
+
+SELECT date '19990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999.008';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'J2451187';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'January 8, 99 BC';
+ERROR: date/time field value out of range: "January 8, 99 BC"
+LINE 1: SELECT date 'January 8, 99 BC';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '99-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-99';
+ERROR: date/time field value out of range: "08-Jan-99"
+LINE 1: SELECT date '08-Jan-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08-Jan-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-99';
+ERROR: date/time field value out of range: "Jan-08-99"
+LINE 1: SELECT date 'Jan-08-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date 'Jan-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-Jan';
+ERROR: invalid input syntax for type date: "99-08-Jan"
+LINE 1: SELECT date '99-08-Jan';
+ ^
+SELECT date '1999-08-Jan';
+ERROR: invalid input syntax for type date: "1999-08-Jan"
+LINE 1: SELECT date '1999-08-Jan';
+ ^
+SELECT date '99 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 99';
+ERROR: date/time field value out of range: "08 Jan 99"
+LINE 1: SELECT date '08 Jan 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08 Jan 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 99';
+ERROR: date/time field value out of range: "Jan 08 99"
+LINE 1: SELECT date 'Jan 08 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date 'Jan 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-99';
+ERROR: date/time field value out of range: "08-01-99"
+LINE 1: SELECT date '08-01-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08-01-1999';
+ERROR: date/time field value out of range: "08-01-1999"
+LINE 1: SELECT date '08-01-1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01-08-99';
+ERROR: date/time field value out of range: "01-08-99"
+LINE 1: SELECT date '01-08-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01-08-1999';
+ERROR: date/time field value out of range: "01-08-1999"
+LINE 1: SELECT date '01-08-1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '99-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '1999-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 99';
+ERROR: date/time field value out of range: "08 01 99"
+LINE 1: SELECT date '08 01 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08 01 1999';
+ERROR: date/time field value out of range: "08 01 1999"
+LINE 1: SELECT date '08 01 1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01 08 99';
+ERROR: date/time field value out of range: "01 08 99"
+LINE 1: SELECT date '01 08 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01 08 1999';
+ERROR: date/time field value out of range: "01 08 1999"
+LINE 1: SELECT date '01 08 1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '99 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '1999 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SET datestyle TO dmy;
+SELECT date 'January 8, 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-18';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '1/8/1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '1/18/1999';
+ERROR: date/time field value out of range: "1/18/1999"
+LINE 1: SELECT date '1/18/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '18/1/1999';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '01/02/03';
+ date
+------------
+ 2003-02-01
+(1 row)
+
+SELECT date '19990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999.008';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'J2451187';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'January 8, 99 BC';
+ date
+---------------
+ 0099-01-08 BC
+(1 row)
+
+SELECT date '99-Jan-08';
+ERROR: date/time field value out of range: "99-Jan-08"
+LINE 1: SELECT date '99-Jan-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-Jan';
+ERROR: invalid input syntax for type date: "99-08-Jan"
+LINE 1: SELECT date '99-08-Jan';
+ ^
+SELECT date '1999-08-Jan';
+ERROR: invalid input syntax for type date: "1999-08-Jan"
+LINE 1: SELECT date '1999-08-Jan';
+ ^
+SELECT date '99 Jan 08';
+ERROR: date/time field value out of range: "99 Jan 08"
+LINE 1: SELECT date '99 Jan 08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 Jan';
+ERROR: invalid input syntax for type date: "99 08 Jan"
+LINE 1: SELECT date '99 08 Jan';
+ ^
+SELECT date '1999 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-01-08';
+ERROR: date/time field value out of range: "99-01-08"
+LINE 1: SELECT date '99-01-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01-08-99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01-08-1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99-08-01';
+ERROR: date/time field value out of range: "99-08-01"
+LINE 1: SELECT date '99-08-01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 01 08';
+ERROR: date/time field value out of range: "99 01 08"
+LINE 1: SELECT date '99 01 08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01 08 99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01 08 1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 08 01';
+ERROR: date/time field value out of range: "99 08 01"
+LINE 1: SELECT date '99 08 01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SET datestyle TO mdy;
+SELECT date 'January 8, 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-18';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '1/8/1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1/18/1999';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '18/1/1999';
+ERROR: date/time field value out of range: "18/1/1999"
+LINE 1: SELECT date '18/1/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01/02/03';
+ date
+------------
+ 2003-01-02
+(1 row)
+
+SELECT date '19990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999.008';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'J2451187';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'January 8, 99 BC';
+ date
+---------------
+ 0099-01-08 BC
+(1 row)
+
+SELECT date '99-Jan-08';
+ERROR: date/time field value out of range: "99-Jan-08"
+LINE 1: SELECT date '99-Jan-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-Jan';
+ERROR: invalid input syntax for type date: "99-08-Jan"
+LINE 1: SELECT date '99-08-Jan';
+ ^
+SELECT date '1999-08-Jan';
+ERROR: invalid input syntax for type date: "1999-08-Jan"
+LINE 1: SELECT date '1999-08-Jan';
+ ^
+SELECT date '99 Jan 08';
+ERROR: invalid input syntax for type date: "99 Jan 08"
+LINE 1: SELECT date '99 Jan 08';
+ ^
+SELECT date '1999 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 Jan';
+ERROR: invalid input syntax for type date: "99 08 Jan"
+LINE 1: SELECT date '99 08 Jan';
+ ^
+SELECT date '1999 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-01-08';
+ERROR: date/time field value out of range: "99-01-08"
+LINE 1: SELECT date '99-01-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '08-01-1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01-08-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-01';
+ERROR: date/time field value out of range: "99-08-01"
+LINE 1: SELECT date '99-08-01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 01 08';
+ERROR: date/time field value out of range: "99 01 08"
+LINE 1: SELECT date '99 01 08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '08 01 1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01 08 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 01';
+ERROR: date/time field value out of range: "99 08 01"
+LINE 1: SELECT date '99 08 01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+-- Check upper and lower limits of date range
+SELECT date '4714-11-24 BC';
+ date
+---------------
+ 4714-11-24 BC
+(1 row)
+
+SELECT date '4714-11-23 BC'; -- out of range
+ERROR: date out of range: "4714-11-23 BC"
+LINE 1: SELECT date '4714-11-23 BC';
+ ^
+SELECT date '5874897-12-31';
+ date
+---------------
+ 5874897-12-31
+(1 row)
+
+SELECT date '5874898-01-01'; -- out of range
+ERROR: date out of range: "5874898-01-01"
+LINE 1: SELECT date '5874898-01-01';
+ ^
+RESET datestyle;
+--
+-- Simple math
+-- Leave most of it for the horology tests
+--
+SELECT f1 - date '2000-01-01' AS "Days From 2K" FROM DATE_TBL;
+ Days From 2K
+--------------
+ -15607
+ -15542
+ -1403
+ -1402
+ -1401
+ -1400
+ -1037
+ -1036
+ -1035
+ 91
+ 92
+ 93
+ 13977
+ 14343
+ 14710
+(15 rows)
+
+SELECT f1 - date 'epoch' AS "Days From Epoch" FROM DATE_TBL;
+ Days From Epoch
+-----------------
+ -4650
+ -4585
+ 9554
+ 9555
+ 9556
+ 9557
+ 9920
+ 9921
+ 9922
+ 11048
+ 11049
+ 11050
+ 24934
+ 25300
+ 25667
+(15 rows)
+
+SELECT date 'yesterday' - date 'today' AS "One day";
+ One day
+---------
+ -1
+(1 row)
+
+SELECT date 'today' - date 'tomorrow' AS "One day";
+ One day
+---------
+ -1
+(1 row)
+
+SELECT date 'yesterday' - date 'tomorrow' AS "Two days";
+ Two days
+----------
+ -2
+(1 row)
+
+SELECT date 'tomorrow' - date 'today' AS "One day";
+ One day
+---------
+ 1
+(1 row)
+
+SELECT date 'today' - date 'yesterday' AS "One day";
+ One day
+---------
+ 1
+(1 row)
+
+SELECT date 'tomorrow' - date 'yesterday' AS "Two days";
+ Two days
+----------
+ 2
+(1 row)
+
+--
+-- test extract!
+--
+-- epoch
+--
+SELECT EXTRACT(EPOCH FROM DATE '1970-01-01'); -- 0
+ date_part
+-----------
+ 0
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM TIMESTAMP '1970-01-01'); -- 0
+ date_part
+-----------
+ 0
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM TIMESTAMPTZ '1970-01-01+00'); -- 0
+ date_part
+-----------
+ 0
+(1 row)
+
+--
+-- century
+--
+SELECT EXTRACT(CENTURY FROM DATE '0101-12-31 BC'); -- -2
+ date_part
+-----------
+ -2
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0100-12-31 BC'); -- -1
+ date_part
+-----------
+ -1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); -- -1
+ date_part
+-----------
+ -1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); -- 1
+ date_part
+-----------
+ 1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0001-01-01 AD'); -- 1
+ date_part
+-----------
+ 1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '1900-12-31'); -- 19
+ date_part
+-----------
+ 19
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '1901-01-01'); -- 20
+ date_part
+-----------
+ 20
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '2000-12-31'); -- 20
+ date_part
+-----------
+ 20
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '2001-01-01'); -- 21
+ date_part
+-----------
+ 21
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM CURRENT_DATE)>=21 AS True; -- true
+ true
+------
+ t
+(1 row)
+
+--
+-- millennium
+--
+SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); -- -1
+ date_part
+-----------
+ -1
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01 AD'); -- 1
+ date_part
+-----------
+ 1
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '1000-12-31'); -- 1
+ date_part
+-----------
+ 1
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '1001-01-01'); -- 2
+ date_part
+-----------
+ 2
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '2000-12-31'); -- 2
+ date_part
+-----------
+ 2
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '2001-01-01'); -- 3
+ date_part
+-----------
+ 3
+(1 row)
+
+-- next test to be fixed on the turn of the next millennium;-)
+SELECT EXTRACT(MILLENNIUM FROM CURRENT_DATE); -- 3
+ date_part
+-----------
+ 3
+(1 row)
+
+--
+-- decade
+--
+SELECT EXTRACT(DECADE FROM DATE '1994-12-25'); -- 199
+ date_part
+-----------
+ 199
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0010-01-01'); -- 1
+ date_part
+-----------
+ 1
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); -- 0
+ date_part
+-----------
+ 0
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); -- 0
+ date_part
+-----------
+ 0
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0002-12-31 BC'); -- -1
+ date_part
+-----------
+ -1
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0011-01-01 BC'); -- -1
+ date_part
+-----------
+ -1
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0012-12-31 BC'); -- -2
+ date_part
+-----------
+ -2
+(1 row)
+
+--
+-- some other types:
+--
+-- on a timestamp.
+SELECT EXTRACT(CENTURY FROM NOW())>=21 AS True; -- true
+ true
+------
+ t
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM TIMESTAMP '1970-03-20 04:30:00.00000'); -- 20
+ date_part
+-----------
+ 20
+(1 row)
+
+-- on an interval
+SELECT EXTRACT(CENTURY FROM INTERVAL '100 y'); -- 1
+ date_part
+-----------
+ 1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '99 y'); -- 0
+ date_part
+-----------
+ 0
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '-99 y'); -- 0
+ date_part
+-----------
+ 0
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '-100 y'); -- -1
+ date_part
+-----------
+ -1
+(1 row)
+
+--
+-- test trunc function!
+--
+SELECT DATE_TRUNC('MILLENNIUM', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1001
+ date_trunc
+--------------------------
+ Thu Jan 01 00:00:00 1001
+(1 row)
+
+SELECT DATE_TRUNC('MILLENNIUM', DATE '1970-03-20'); -- 1001-01-01
+ date_trunc
+------------------------------
+ Thu Jan 01 00:00:00 1001 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1901
+ date_trunc
+--------------------------
+ Tue Jan 01 00:00:00 1901
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '1970-03-20'); -- 1901
+ date_trunc
+------------------------------
+ Tue Jan 01 00:00:00 1901 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '2004-08-10'); -- 2001-01-01
+ date_trunc
+------------------------------
+ Mon Jan 01 00:00:00 2001 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '0002-02-04'); -- 0001-01-01
+ date_trunc
+------------------------------
+ Mon Jan 01 00:00:00 0001 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '0055-08-10 BC'); -- 0100-01-01 BC
+ date_trunc
+---------------------------------
+ Tue Jan 01 00:00:00 0100 PST BC
+(1 row)
+
+SELECT DATE_TRUNC('DECADE', DATE '1993-12-25'); -- 1990-01-01
+ date_trunc
+------------------------------
+ Mon Jan 01 00:00:00 1990 PST
+(1 row)
+
+SELECT DATE_TRUNC('DECADE', DATE '0004-12-25'); -- 0001-01-01 BC
+ date_trunc
+---------------------------------
+ Sat Jan 01 00:00:00 0001 PST BC
+(1 row)
+
+SELECT DATE_TRUNC('DECADE', DATE '0002-12-31 BC'); -- 0011-01-01 BC
+ date_trunc
+---------------------------------
+ Mon Jan 01 00:00:00 0011 PST BC
+(1 row)
+
+--
+-- test infinity
+--
+select 'infinity'::date, '-infinity'::date;
+ date | date
+----------+-----------
+ infinity | -infinity
+(1 row)
+
+select 'infinity'::date > 'today'::date as t;
+ t
+---
+ t
+(1 row)
+
+select '-infinity'::date < 'today'::date as t;
+ t
+---
+ t
+(1 row)
+
+select isfinite('infinity'::date), isfinite('-infinity'::date), isfinite('today'::date);
+ isfinite | isfinite | isfinite
+----------+----------+----------
+ f | f | t
+(1 row)
+
+--
+-- oscillating fields from non-finite date/timestamptz:
+--
+SELECT EXTRACT(HOUR FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(HOUR FROM DATE '-infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(HOUR FROM TIMESTAMP 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(HOUR FROM TIMESTAMP '-infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(HOUR FROM TIMESTAMPTZ 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(HOUR FROM TIMESTAMPTZ '-infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+-- all possible fields
+SELECT EXTRACT(MICROSECONDS FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(MILLISECONDS FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(SECOND FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(MINUTE FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(HOUR FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(DAY FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(MONTH FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(QUARTER FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(WEEK FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(DOW FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(ISODOW FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(DOY FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(TIMEZONE FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(TIMEZONE_M FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+SELECT EXTRACT(TIMEZONE_H FROM DATE 'infinity'); -- NULL
+ date_part
+-----------
+
+(1 row)
+
+--
+-- monotonic fields from non-finite date/timestamptz:
+--
+SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM DATE '-infinity'); -- -Infinity
+ date_part
+-----------
+ -Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM TIMESTAMP 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM TIMESTAMP '-infinity'); -- -Infinity
+ date_part
+-----------
+ -Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM TIMESTAMPTZ 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM TIMESTAMPTZ '-infinity'); -- -Infinity
+ date_part
+-----------
+ -Infinity
+(1 row)
+
+-- all possible fields
+SELECT EXTRACT(YEAR FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(JULIAN FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(ISOYEAR FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity
+ date_part
+-----------
+ Infinity
+(1 row)
+
+--
+-- wrong fields from non-finite date:
+--
+SELECT EXTRACT(MICROSEC FROM DATE 'infinity'); -- ERROR: timestamp units "microsec" not recognized
+ERROR: timestamp units "microsec" not recognized
+SELECT EXTRACT(UNDEFINED FROM DATE 'infinity'); -- ERROR: timestamp units "undefined" not supported
+ERROR: timestamp units "undefined" not supported
+-- test constructors
+select make_date(2013, 7, 15);
+ make_date
+------------
+ 07-15-2013
+(1 row)
+
+select make_date(-44, 3, 15);
+ make_date
+---------------
+ 03-15-0044 BC
+(1 row)
+
+select make_time(8, 20, 0.0);
+ make_time
+-----------
+ 08:20:00
+(1 row)
+
+-- should fail
+select make_date(2013, 2, 30);
+ERROR: date field value out of range: 2013-02-30
+select make_date(2013, 13, 1);
+ERROR: date field value out of range: 2013-13-01
+select make_date(2013, 11, -1);
+ERROR: date field value out of range: 2013-11--1
+select make_time(10, 55, 100.1);
+ERROR: time field value out of range: 10:55:100.1
+select make_time(24, 0, 2.1);
+ERROR: time field value out of range: 24:00:2.1