Hi everybody, thanks a lot for your work. This is just a stupid patch to fix some typos. Thanks a lot to Magnus for the review.
You can see it also on GitHub,¹ if you prefer, or apply it on top of today latest GIT.² It passed "make check" on Linux. Ciao, Gelma --- ¹ https://github.com/Gelma/postgres/commit/6c59961f91b89f55b103c57fffa94308dc29546a ² commit: d5ec46bf224d2ea1b010b2bc10a65e44d4456553
diff --git a/config/pkg.m4 b/config/pkg.m4 index 13a8890178..f9075e56c8 100644 --- a/config/pkg.m4 +++ b/config/pkg.m4 @@ -86,7 +86,7 @@ dnl Check to see whether a particular set of modules exists. Similar to dnl PKG_CHECK_MODULES(), but does not set variables or print errors. dnl dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -dnl only at the first occurence in configure.ac, so if the first place +dnl only at the first occurrence in configure.ac, so if the first place dnl it's called might be skipped (such as if it is within an "if", you dnl have to call PKG_CHECK_EXISTS manually AC_DEFUN([PKG_CHECK_EXISTS], diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index de0a98f6d9..ff13b0c9e7 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -1278,7 +1278,7 @@ bt_right_page_check_scankey(BtreeCheckState *state) * Routines like _bt_search() don't require *any* page split interlock * when descending the tree, including something very light like a buffer * pin. That's why it's okay that we don't either. This avoidance of any - * need to "couple" buffer locks is the raison d' etre of the Lehman & Yao + * need to "couple" buffer locks is the reason d'etre of the Lehman & Yao * algorithm, in fact. * * That leaves deletion. A deleted page won't actually be recycled by diff --git a/contrib/citext/expected/citext.out b/contrib/citext/expected/citext.out index 94aba67cdb..96800be9c0 100644 --- a/contrib/citext/expected/citext.out +++ b/contrib/citext/expected/citext.out @@ -2609,7 +2609,7 @@ SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true; t (1 row) --- Multi-byte tests below are diabled like the sanity tests above. +-- Multi-byte tests below are disabled like the sanity tests above. -- Uncomment to run them. -- Test ~<~ and ~<=~ SELECT 'a'::citext ~<~ 'B'::citext AS t; diff --git a/contrib/citext/expected/citext_1.out b/contrib/citext/expected/citext_1.out index 187d3b5d2c..33e3676d3c 100644 --- a/contrib/citext/expected/citext_1.out +++ b/contrib/citext/expected/citext_1.out @@ -2609,7 +2609,7 @@ SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true; t (1 row) --- Multi-byte tests below are diabled like the sanity tests above. +-- Multi-byte tests below are disabled like the sanity tests above. -- Uncomment to run them. -- Test ~<~ and ~<=~ SELECT 'a'::citext ~<~ 'B'::citext AS t; diff --git a/contrib/citext/sql/citext.sql b/contrib/citext/sql/citext.sql index 0cc909eb52..261b73cfa6 100644 --- a/contrib/citext/sql/citext.sql +++ b/contrib/citext/sql/citext.sql @@ -810,7 +810,7 @@ SELECT citext_pattern_ge('b'::citext, 'a'::citext) AS true; SELECT citext_pattern_ge('B'::citext, 'a'::citext) AS true; SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true; --- Multi-byte tests below are diabled like the sanity tests above. +-- Multi-byte tests below are disabled like the sanity tests above. -- Uncomment to run them. -- Test ~<~ and ~<=~ diff --git a/contrib/isn/EAN13.h b/contrib/isn/EAN13.h index 7023ebdf63..c1bea862da 100644 --- a/contrib/isn/EAN13.h +++ b/contrib/isn/EAN13.h @@ -54,7 +54,7 @@ const char *EAN13_range[][2] = { {"484", "484"}, /* GS1 Moldova */ {"485", "485"}, /* GS1 Armenia */ {"486", "486"}, /* GS1 Georgia */ - {"487", "487"}, /* GS1 Kazakstan */ + {"487", "487"}, /* GS1 Kazakhstan */ {"489", "489"}, /* GS1 Hong Kong */ {"490", "499"}, /* GS1 Japan */ {"500", "509"}, /* GS1 UK */ diff --git a/contrib/pg_trgm/data/trgm2.data b/contrib/pg_trgm/data/trgm2.data index 664e079fec..d52bcf7127 100644 --- a/contrib/pg_trgm/data/trgm2.data +++ b/contrib/pg_trgm/data/trgm2.data @@ -18,7 +18,7 @@ Daikalay Bakall Stubaital Neustift im Stubaital -Anonyme Appartments Stubaital +Anonyme Apartments Stubaital Barkaladja Pool Awabakal Nature Reserve Awabakal Field Studies Centre diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c index 6936d2cdca..9ad5b24644 100644 --- a/contrib/pgcrypto/imath.c +++ b/contrib/pgcrypto/imath.c @@ -3366,14 +3366,14 @@ s_udiv_knuth(mp_int u, mp_int v) * D4,D5,D6: Multiply qhat * v and test for a correct value of q * * We proceed a bit different than the way described by Knuth. This - * way is simpler but less efficent. Instead of doing the multiply and + * way is simpler but less efficient. Instead of doing the multiply and * subtract then checking for underflow, we first do the multiply of * qhat * v and see if it is larger than the current remainder r. If * it is larger, we decrease qhat by one and try again. We may need to * decrease qhat one more time before we get a value that is smaller * than r. * - * This way is less efficent than Knuth becuase we do more multiplies, + * This way is less efficient than Knuth because we do more multiplies, * but we do not need to worry about underflow this way. */ /* t = qhat * v */ diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index fc300f605f..ddf2181dba 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -3382,7 +3382,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays </indexterm> <para> - Polygons are represented by lists of points (the vertexes of the + Polygons are represented by lists of points (the vertices of the polygon). Polygons are very similar to closed paths, but are stored differently and have their own set of support routines. </para> diff --git a/doc/src/sgml/ref/create_cast.sgml b/doc/src/sgml/ref/create_cast.sgml index 84317047c2..64e9f9d4ce 100644 --- a/doc/src/sgml/ref/create_cast.sgml +++ b/doc/src/sgml/ref/create_cast.sgml @@ -137,7 +137,7 @@ SELECT CAST ( 2 AS numeric ) + 4.0; <productname>PostgreSQL</productname> to choose surprising interpretations of commands, or to be unable to resolve commands at all because there are multiple possible interpretations. A good - rule of thumb is to make a cast implicitly invokable only for + rule of thumb is to make a cast implicitly invocable only for information-preserving transformations between types in the same general type category. For example, the cast from <type>int2</type> to <type>int4</type> can reasonably be implicit, but the cast from @@ -324,7 +324,7 @@ SELECT CAST ( 2 AS numeric ) + 4.0; from string types are explicit-only. You can override this behavior by declaring your own cast to replace an automatic cast, but usually the only reason to - do so is if you want the conversion to be more easily invokable than the + do so is if you want the conversion to be more easily invocable than the standard assignment-only or explicit-only setting. Another possible reason is that you want the conversion to behave differently from the type's I/O function; but that is sufficiently surprising that you diff --git a/doc/src/sgml/spi.sgml b/doc/src/sgml/spi.sgml index 66eced6c94..9439ea27eb 100644 --- a/doc/src/sgml/spi.sgml +++ b/doc/src/sgml/spi.sgml @@ -321,7 +321,7 @@ SPI_execute("INSERT INTO foo SELECT * FROM bar RETURNING *", false, 5); typedef struct { MemoryContext tuptabcxt; /* memory context of result table */ - uint64 alloced; /* number of alloced vals */ + uint64 alloced; /* number of allocated vals */ uint64 free; /* number of free vals */ TupleDesc tupdesc; /* row descriptor */ HeapTuple *vals; /* rows */ diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 45c00aaa87..470b121e7d 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1533,7 +1533,7 @@ initGISTstate(Relation index) * The truncated tupdesc for non-leaf index tuples, which doesn't contain * the INCLUDE attributes. * - * It is used to form tuples during tuple adjustement and page split. + * It is used to form tuples during tuple adjustment and page split. * B-tree creates shortened tuple descriptor for every truncated tuple, * because it is doing this less often: it does not have to form truncated * tuples during page split. Also, B-tree is not adjusting tuples on diff --git a/src/backend/access/hash/README b/src/backend/access/hash/README index 2227ebfe9b..6c9415a72c 100644 --- a/src/backend/access/hash/README +++ b/src/backend/access/hash/README @@ -532,7 +532,7 @@ Freeing an overflow page requires the process to hold buffer content lock in exclusive mode on the containing bucket, so need not worry about other accessors of pages in the bucket. The algorithm is: - delink overflow page from bucket chain + unlink overflow page from bucket chain (this requires read/update/write/release of fore and aft siblings) pin meta page and take buffer content lock in shared mode determine which bitmap page contains the free space bit for page diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 419da8784a..7ee01d07b3 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2032,7 +2032,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, ReleaseBuffer(vmbuffer); /* - * If tuple is cachable, mark it for invalidation from the caches in case + * If tuple is cacheable, mark it for invalidation from the caches in case * we abort. Note it is OK to do this after releasing the buffer, because * the heaptup data structure is all in local memory, not in the shared * buffer. @@ -2367,7 +2367,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); /* - * If tuples are cachable, mark them for invalidation from the caches in + * If tuples are cacheable, mark them for invalidation from the caches in * case we abort. Note it is OK to do this after releasing the buffer, * because the heaptuples data structure is all in local memory, not in * the shared buffer. diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index a4a28e88ec..92ea1d163e 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -2362,7 +2362,7 @@ heapam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate) if (blockno >= hscan->rs_nblocks) { - /* wrap to begining of rel, might not have started at 0 */ + /* wrap to beginning of rel, might not have started at 0 */ blockno = 0; } diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index e34d4ccc14..2b5ef7cfcf 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -5335,7 +5335,7 @@ find_composite_type_dependencies(Oid typeOid, Relation origRelation, continue; } - /* Else, ignore dependees that aren't user columns of relations */ + /* Else, ignore dependencies that aren't user columns of relations */ /* (we assume system columns are never of interesting types) */ if (pg_depend->classid != RelationRelationId || pg_depend->objsubid <= 0) diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 316692b7c2..49d933b287 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -4384,7 +4384,7 @@ AfterTriggerExecute(EState *estate, * When immediate_only is true, do not invoke currently-deferred triggers. * (This will be false only at main transaction exit.) * - * Returns true if any invokable events were found. + * Returns true if any invocable events were found. */ static bool afterTriggerMarkEvents(AfterTriggerEventList *events, diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index e9c8873ade..7c78519c0e 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -2944,7 +2944,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode) continue; } - /* Else, ignore dependees that aren't user columns of relations */ + /* Else, ignore dependencies that aren't user columns of relations */ /* (we assume system columns are never of domain types) */ if (pg_depend->classid != RelationRelationId || pg_depend->objsubid <= 0) diff --git a/src/backend/jit/llvm/llvmjit_inline.cpp b/src/backend/jit/llvm/llvmjit_inline.cpp index 07b5fc7b38..8005d43a84 100644 --- a/src/backend/jit/llvm/llvmjit_inline.cpp +++ b/src/backend/jit/llvm/llvmjit_inline.cpp @@ -308,7 +308,7 @@ llvm_build_inline_plan(llvm::Module *mod) * Check whether function and all its dependencies are too * big. Dependencies already counted for other functions that * will get inlined are not counted again. While this make - * things somewhat order dependant, I can't quite see a point + * things somewhat order dependent, I can't quite see a point * in a different behaviour. */ if (running_instcount > inlineState.costLimit) diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index 67eeba938d..b137f8b49a 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -1046,7 +1046,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, /* * Replace all of the top query's references to the subquery's outputs * with copies of the adjusted subtlist items, being careful not to - * replace any of the jointree structure. (This'd be a lot cleaner if we + * replace any of the jointree structure. (This would be a lot cleaner if we * could use query_tree_mutator.) We have to use PHVs in the targetList, * returningList, and havingQual, since those are certainly above any * outer join. replace_vars_in_jointree tracks its location in the @@ -1607,7 +1607,7 @@ pull_up_simple_values(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte) /* * Replace all of the top query's references to the RTE's outputs with * copies of the adjusted VALUES expressions, being careful not to replace - * any of the jointree structure. (This'd be a lot cleaner if we could use + * any of the jointree structure. (This would be a lot cleaner if we could use * query_tree_mutator.) Much of this should be no-ops in the dummy Query * that surrounds a VALUES RTE, but it's not enough code to be worth * removing. diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c index 903478d8ca..24c999aea0 100644 --- a/src/backend/parser/parse_coerce.c +++ b/src/backend/parser/parse_coerce.c @@ -2121,7 +2121,7 @@ IsPreferredType(TYPCATEGORY category, Oid type) * * As of 7.3, binary coercibility isn't hardwired into the code anymore. * We consider two types binary-coercible if there is an implicitly - * invokable, no-function-needed pg_cast entry. Also, a domain is always + * invocable, no-function-needed pg_cast entry. Also, a domain is always * binary-coercible to its base type, though *not* vice versa (in the other * direction, one must apply domain constraint checks before accepting the * value as legitimate). We also need to special-case various polymorphic diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index e7c32f2a13..20bb928016 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2279,7 +2279,7 @@ ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) /* * store in segment in which it belongs by start lsn, don't split over - * multiple segments tho + * multiple segments to */ if (fd == -1 || !XLByteInSeg(change->lsn, curOpenSegNo, wal_segment_size)) diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c index e20158b962..6189a06853 100644 --- a/src/backend/rewrite/rewriteManip.c +++ b/src/backend/rewrite/rewriteManip.c @@ -1015,7 +1015,7 @@ AddQual(Query *parsetree, Node *qual) errmsg("conditional UNION/INTERSECT/EXCEPT statements are not implemented"))); } - /* INTERSECT want's the original, but we need to copy - Jan */ + /* INTERSECT wants the original, but we need to copy - Jan */ copy = copyObject(qual); parsetree->jointree->quals = make_and_qual(parsetree->jointree->quals, diff --git a/src/backend/snowball/dict_snowball.c b/src/backend/snowball/dict_snowball.c index 5166738310..16dbba17b8 100644 --- a/src/backend/snowball/dict_snowball.c +++ b/src/backend/snowball/dict_snowball.c @@ -149,7 +149,7 @@ typedef struct DictSnowball int (*stem) (struct SN_env *z); /* - * snowball saves alloced memory between calls, so we should run it in our + * snowball saves allocated memory between calls, so we should run it in our * private memory context. Note, init function is executed in long lived * context, so we just remember CurrentMemoryContext */ diff --git a/src/backend/snowball/libstemmer/stem_ISO_8859_1_french.c b/src/backend/snowball/libstemmer/stem_ISO_8859_1_french.c index 1a1732b216..d343104ed7 100644 --- a/src/backend/snowball/libstemmer/stem_ISO_8859_1_french.c +++ b/src/backend/snowball/libstemmer/stem_ISO_8859_1_french.c @@ -1072,7 +1072,7 @@ static int r_un_double(struct SN_env * z) { /* backwardmode */ static int r_un_accent(struct SN_env * z) { /* backwardmode */ { int i = 1; - while(1) { /* atleast, line 216 */ + while(1) { /* at least, line 216 */ if (out_grouping_b(z, g_v, 97, 251, 0)) goto lab0; /* non v, line 216 */ i--; continue; diff --git a/src/backend/snowball/libstemmer/stem_UTF_8_arabic.c b/src/backend/snowball/libstemmer/stem_UTF_8_arabic.c index 30bf1d9964..247f386e07 100644 --- a/src/backend/snowball/libstemmer/stem_UTF_8_arabic.c +++ b/src/backend/snowball/libstemmer/stem_UTF_8_arabic.c @@ -1449,7 +1449,7 @@ lab1: if (!(z->B[1])) goto lab4; /* Boolean test is_verb, line 509 */ { int m4 = z->l - z->c; (void)m4; /* or, line 515 */ { int i = 1; - while(1) { /* atleast, line 512 */ + while(1) { /* at least, line 512 */ int m5 = z->l - z->c; (void)m5; { int ret = r_Suffix_Verb_Step1(z); /* call Suffix_Verb_Step1, line 512 */ if (ret == 0) goto lab7; diff --git a/src/backend/snowball/libstemmer/stem_UTF_8_french.c b/src/backend/snowball/libstemmer/stem_UTF_8_french.c index 80564a8e36..03d1b6a82e 100644 --- a/src/backend/snowball/libstemmer/stem_UTF_8_french.c +++ b/src/backend/snowball/libstemmer/stem_UTF_8_french.c @@ -1085,7 +1085,7 @@ static int r_un_double(struct SN_env * z) { /* backwardmode */ static int r_un_accent(struct SN_env * z) { /* backwardmode */ { int i = 1; - while(1) { /* atleast, line 216 */ + while(1) { /* at least, line 216 */ if (out_grouping_b_U(z, g_v, 97, 251, 0)) goto lab0; /* non v, line 216 */ i--; continue; diff --git a/src/backend/snowball/libstemmer/stem_UTF_8_turkish.c b/src/backend/snowball/libstemmer/stem_UTF_8_turkish.c index 85eee41ada..4d721d91bd 100644 --- a/src/backend/snowball/libstemmer/stem_UTF_8_turkish.c +++ b/src/backend/snowball/libstemmer/stem_UTF_8_turkish.c @@ -2017,7 +2017,7 @@ static int r_is_reserved_word(struct SN_env * z) { /* backwardmode */ static int r_more_than_one_syllable_word(struct SN_env * z) { /* forwardmode */ { int c_test1 = z->c; /* test, line 447 */ { int i = 2; - while(1) { /* atleast, line 447 */ + while(1) { /* at least, line 447 */ int c2 = z->c; { /* gopast */ /* grouping vowel, line 447 */ int ret = out_grouping_U(z, g_vowel, 97, 305, 1); diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index b0ee3a26d6..ccd2c31c0b 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -179,7 +179,7 @@ BufferShmemSize(void) * and benchmarking has shown that keeping every BufferDesc aligned on a * cache line boundary is important for performance. So, instead, the * array of I/O locks is allocated in a separate tranche. Because those - * locks are not highly contentended, we lay out the array with minimal + * locks are not highly contended, we lay out the array with minimal * padding. */ size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded))); diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 842fcabd97..25b7e314af 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -202,7 +202,7 @@ WaitExceedsMaxStandbyDelay(void) /* * Progressively increase the sleep times, but not to more than 1s, since - * pg_usleep isn't interruptable on some platforms. + * pg_usleep isn't interruptible on some platforms. */ standbyWait_us *= 2; if (standbyWait_us > 1000000) diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 0da5b19719..dc346be4db 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -16,7 +16,7 @@ * Interface (a): * ProcSleep(), ProcWakeup(), * ProcQueueAlloc() -- create a shm queue for sleeping processes - * ProcQueueInit() -- create a queue without allocing memory + * ProcQueueInit() -- create a queue without allocating memory * * Waiting for a lock causes the backend to be put to sleep. Whoever releases * the lock wakes the process up again (and gives it an error code so it knows diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c index 2ecc73b772..373784fcc1 100644 --- a/src/backend/utils/adt/geo_ops.c +++ b/src/backend/utils/adt/geo_ops.c @@ -5315,7 +5315,7 @@ lseg_crossing(float8 x, float8 y, float8 prev_x, float8 prev_y) /* both non-positive so do not cross positive X-axis */ return 0; - /* x and y cross axises, see URL above point_inside() */ + /* x and y cross axes, see URL above point_inside() */ z = float8_mi(float8_mul(float8_mi(x, prev_x), y), float8_mul(float8_mi(y, prev_y), x)); if (FPzero(z)) diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c index 6f3ece1209..c28809fad6 100644 --- a/src/backend/utils/adt/inet_net_pton.c +++ b/src/backend/utils/adt/inet_net_pton.c @@ -189,7 +189,7 @@ inet_cidr_pton_ipv4(const char *src, u_char *dst, size_t size) goto emsgsize; } - /* Firey death and destruction unless we prefetched EOS. */ + /* Fiery death and destruction unless we prefetched EOS. */ if (ch != '\0') goto enoent; @@ -309,7 +309,7 @@ inet_net_pton_ipv4(const char *src, u_char *dst) goto emsgsize; } - /* Firey death and destruction unless we prefetched EOS. */ + /* Fiery death and destruction unless we prefetched EOS. */ if (ch != '\0') goto enoent; diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c index 10cd38a531..d5da155867 100644 --- a/src/backend/utils/adt/jsonpath.c +++ b/src/backend/utils/adt/jsonpath.c @@ -229,7 +229,7 @@ static int flattenJsonPathParseItem(StringInfo buf, JsonPathParseItem *item, int nestingLevel, bool insideArraySubscript) { - /* position from begining of jsonpath data */ + /* position from beginning of jsonpath data */ int32 pos = buf->len - JSONPATH_HDRSZ; int32 chld; int32 next; diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 00def27881..be8b8962d7 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -471,7 +471,7 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct) return; /* nothing left to do */ } - /* delink from linked list */ + /* unlink from linked list */ dlist_delete(&ct->cache_elem); /* @@ -503,7 +503,7 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl) Assert(cl->refcount == 0); Assert(cl->my_cache == cache); - /* delink from member tuples */ + /* unlink from member tuples */ for (i = cl->n_members; --i >= 0;) { CatCTup *ct = cl->members[i]; @@ -519,7 +519,7 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl) CatCacheRemoveCTup(cache, ct); } - /* delink from linked list */ + /* unlink from linked list */ dlist_delete(&cl->cache_elem); /* free associated column data */ @@ -713,7 +713,7 @@ ResetCatalogCaches(void) * kinds of trouble if a cache flush occurs while loading cache entries. * We now avoid the need to do it by copying cc_tupdesc out of the relcache, * rather than relying on the relcache to keep a tupdesc for us. Of course - * this assumes the tupdesc of a cachable system table will not change...) + * this assumes the tupdesc of a cacheable system table will not change...) */ void CatalogCacheFlushCatalog(Oid catId) diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 3b4f21bc54..403435df52 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -146,7 +146,7 @@ static void perform_relmap_update(bool shared, const RelMapFile *updates); /* * RelationMapOidToFilenode * - * The raison d' etre ... given a relation OID, look up its filenode. + * The reason d'etre... given a relation OID, look up its filenode. * * Although shared and local relation OIDs should never overlap, the caller * always knows which we need --- so pass that information to avoid useless @@ -907,7 +907,7 @@ write_relmap_file(bool shared, RelMapFile *newmap, * Make sure that the files listed in the map are not deleted if the outer * transaction aborts. This had better be within the critical section * too: it's not likely to fail, but if it did, we'd arrive at transaction - * abort with the files still vulnerable. PANICing will leave things in a + * abort with the files still vulnerable. Panicking will leave things in a * good state on-disk. * * Note: we're cheating a little bit here by assuming that mapped files diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 0c9e98973a..ef3d97292e 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -1652,7 +1652,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages, if (result.split_pages > 0) { /* - * NB: We could consider various coping strategies here to avoid a + * NB: We could consider various copying strategies here to avoid a * split; most obviously, if np != result.page, we could target that * page instead. More complicated shuffling strategies could be * possible as well; basically, unless every single leaf page is 100% diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index b07be12236..101cda5d4a 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -205,7 +205,7 @@ MemoryContextResetChildren(MemoryContext context) * * The type-specific delete routine removes all storage for the context, * but we have to recurse to handle the children. - * We must also delink the context from its parent, if it has one. + * We must also unlink the context from its parent, if it has one. */ void MemoryContextDelete(MemoryContext context) @@ -229,7 +229,7 @@ MemoryContextDelete(MemoryContext context) MemoryContextCallResetCallbacks(context); /* - * We delink the context from its parent before deleting it, so that if + * We unlink the context from its parent before deleting it, so that if * there's an error we won't have deleted/busted contexts still attached * to the context tree. Better a leak than a crash. */ @@ -258,7 +258,7 @@ MemoryContextDeleteChildren(MemoryContext context) AssertArg(MemoryContextIsValid(context)); /* - * MemoryContextDelete will delink the child from me, so just iterate as + * MemoryContextDelete will unlink the child from me, so just iterate as * long as there is a child. */ while (context->firstchild != NULL) @@ -360,7 +360,7 @@ MemoryContextSetParent(MemoryContext context, MemoryContext new_parent) if (new_parent == context->parent) return; - /* Delink from existing parent, if any */ + /* Unlink from existing parent, if any */ if (context->parent) { MemoryContext parent = context->parent; diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index 7d59d3dffa..b7d36b65dd 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -44,7 +44,7 @@ $node->command_fails( ok(!-d "$tempdir/backup", 'backup directory was cleaned up'); -# Create a backup directory that is not empty so the next commnd will fail +# Create a backup directory that is not empty so the next command will fail # but leave the data directory behind mkdir("$tempdir/backup") or BAIL_OUT("unable to create $tempdir/backup"); diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index a3280eeb18..695d6ba9f1 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -2937,7 +2937,7 @@ do_connect(enum trivalue reuse_previous_specification, if (host && strcmp(host, PQhost(o_conn)) == 0) { /* - * if we are targetting the same host, reuse its hostaddr for + * if we are targeting the same host, reuse its hostaddr for * consistency */ hostaddr = PQhostaddr(o_conn); diff --git a/src/common/pg_lzcompress.c b/src/common/pg_lzcompress.c index 988b3987d0..4cdab67900 100644 --- a/src/common/pg_lzcompress.c +++ b/src/common/pg_lzcompress.c @@ -287,7 +287,7 @@ static PGLZ_HistEntry hist_entries[PGLZ_HISTORY_SIZE + 1]; * Adds a new entry to the history table. * * If _recycle is true, then we are recycling a previously used entry, - * and must first delink it from its old hashcode's linked list. + * and must first unlink it from its old hashcode's linked list. * * NOTE: beware of multiple evaluations of macro's arguments, and note that * _hn and _recycle are modified in the macro. diff --git a/src/common/unicode/generate-norm_test_table.pl b/src/common/unicode/generate-norm_test_table.pl index bb19786f0c..3ff5ee3c5e 100644 --- a/src/common/unicode/generate-norm_test_table.pl +++ b/src/common/unicode/generate-norm_test_table.pl @@ -56,7 +56,7 @@ HEADER print $OUTPUT "static const pg_unicode_test UnicodeNormalizationTests[] =\n{\n"; -# Helper routine to conver a space-separated list of Unicode characters to +# Helper routine to convert a space-separated list of Unicode characters to # hexadecimal list format, suitable for outputting in a C array. sub codepoint_string_to_hex { diff --git a/src/include/access/gistxlog.h b/src/include/access/gistxlog.h index 969a5376b5..dc2e3770cc 100644 --- a/src/include/access/gistxlog.h +++ b/src/include/access/gistxlog.h @@ -67,7 +67,7 @@ typedef struct gistxlogPageSplit { BlockNumber origrlink; /* rightlink of the page before split */ GistNSN orignsn; /* NSN of the page before split */ - bool origleaf; /* was splitted page a leaf page? */ + bool origleaf; /* was split page a leaf page? */ uint16 npage; /* # of pages in the split */ bool markfollowright; /* set F_FOLLOW_RIGHT flags */ diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index 6eaa678a1e..d912280386 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -200,7 +200,7 @@ struct TupleTableSlotOps * copy needs to be palloc'd in the current memory context. The slot * itself is expected to remain unaffected. It is *not* expected to have * meaningful "system columns" in the copy. The copy is not be "owned" by - * the slot i.e. the caller has to take responsibilty to free memory + * the slot i.e. the caller has to take responsibility to free memory * consumed by the slot. */ HeapTuple (*copy_heap_tuple) (TupleTableSlot *slot); @@ -210,7 +210,7 @@ struct TupleTableSlotOps * The copy needs to be palloc'd in the current memory context. The slot * itself is expected to remain unaffected. It is *not* expected to have * meaningful "system columns" in the copy. The copy is not be "owned" by - * the slot i.e. the caller has to take responsibilty to free memory + * the slot i.e. the caller has to take responsibility to free memory * consumed by the slot. */ MinimalTuple (*copy_minimal_tuple) (TupleTableSlot *slot); diff --git a/src/include/fe_utils/conditional.h b/src/include/fe_utils/conditional.h index 539b929b6a..7faa74e7be 100644 --- a/src/include/fe_utils/conditional.h +++ b/src/include/fe_utils/conditional.h @@ -5,8 +5,8 @@ * allow a manage nested conditionals. * * It is used by: - * - "psql" interpretor for handling \if ... \endif - * - "pgbench" interpretor for handling \if ... \endif + * - "psql" interpreter for handling \if ... \endif + * - "pgbench" interpreter for handling \if ... \endif * - "pgbench" syntax checker to test for proper nesting * * The stack holds the state of enclosing conditionals (are we in diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 64122bc1e3..112a9a5368 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -134,7 +134,7 @@ typedef struct ExprState * ExclusionOps Per-column exclusion operators, or NULL if none * ExclusionProcs Underlying function OIDs for ExclusionOps * ExclusionStrats Opclass strategy numbers for ExclusionOps - * UniqueOps Theses are like Exclusion*, but for unique indexes + * UniqueOps These are like Exclusion*, but for unique indexes * UniqueProcs * UniqueStrats * Unique is it a unique index? diff --git a/src/include/nodes/memnodes.h b/src/include/nodes/memnodes.h index dbae98d3d9..f36d25df42 100644 --- a/src/include/nodes/memnodes.h +++ b/src/include/nodes/memnodes.h @@ -77,7 +77,7 @@ typedef struct MemoryContextData { NodeTag type; /* identifies exact kind of context */ /* these two fields are placed here to minimize alignment wastage: */ - bool isReset; /* T = no space alloced since last reset */ + bool isReset; /* T = no space allocated since last reset */ bool allowInCritSection; /* allow palloc in critical section */ const MemoryContextMethods *methods; /* virtual function table */ MemoryContext parent; /* NULL if no parent (toplevel context) */ diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index 467563d7a4..0f1cb5d068 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -1239,7 +1239,7 @@ reportErrorPosition(PQExpBuffer msg, const char *query, int loc, int encoding) /* * Replace tabs with spaces in the writable copy. (Later we might - * want to think about coping with their variable screen width, but + * want to think about copying with their variable screen width, but * not today.) */ if (ch == '\t') diff --git a/src/tools/pginclude/pgcheckdefines b/src/tools/pginclude/pgcheckdefines index 4edf7fc56e..4943edd5d1 100755 --- a/src/tools/pginclude/pgcheckdefines +++ b/src/tools/pginclude/pgcheckdefines @@ -37,7 +37,7 @@ my $MAKE = "make"; # We ignore .h files under src/include/port/, since only the one exposed as # src/include/port.h is interesting. (XXX Windows ports have additional # files there?) Ditto for .h files in src/backend/port/ subdirectories. -# Including these .h files would clutter the list of define'd symbols and +# Including these .h files would clutter the list of defined symbols and # cause a lot of false-positive results. # my (@cfiles, @hfiles);
signature.asc
Description: PGP signature