On Sat, 13 Apr 2024 at 09:17, Daniel Gustafsson <dan...@yesql.se> wrote:
>
> > On 12 Apr 2024, at 23:15, Heikki Linnakangas <hlinn...@iki.fi> wrote:
> > Here's a few more. I've accumulate these over the past couple of months, 
> > keeping them stashed in a branch, adding to it whenever I've spotted a 
> > minor typo while reading the code.
>
> Nice, let's lot all these together.

Here are a few additional ones to add to that.

Found with a manual trawl through git grep -E
'\b([a-zA-Z]{2,}[^long|^that])\s+\1\b' -- ':!*.po' ':!*.dat'

David
diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c
index f71f1854e0..20da4a46ba 100644
--- a/contrib/amcheck/verify_nbtree.c
+++ b/contrib/amcheck/verify_nbtree.c
@@ -3036,7 +3036,7 @@ bt_normalize_tuple(BtreeCheckState *state, IndexTuple 
itup)
         * In the heap, tuples may contain short varlena datums with both 1B
         * header and 4B headers.  But the corresponding index tuple should 
always
         * have such varlena's with 1B headers.  So, if there is a short varlena
-        * with 4B header, we need to convert it for for fingerprinting.
+        * with 4B header, we need to convert it for fingerprinting.
         *
         * Note that we rely on deterministic index_form_tuple() TOAST 
compression
         * of normalized input.
diff --git a/src/backend/access/nbtree/nbtutils.c 
b/src/backend/access/nbtree/nbtutils.c
index 2eff34c4aa..725811034f 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -1756,7 +1756,7 @@ _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
  *
  * (The rules are the same for backwards scans, except that the operators are
  * flipped: just replace the precondition's >= operator with a <=, and the
- * postcondition's <= operator with with a >=.  In other words, just swap the
+ * postcondition's <= operator with a >=.  In other words, just swap the
  * precondition with the postcondition.)
  *
  * We also deal with "advancing" non-required arrays here.  Callers whose
diff --git a/src/backend/partitioning/partbounds.c 
b/src/backend/partitioning/partbounds.c
index c0c49b0a0b..af3b1a90d2 100644
--- a/src/backend/partitioning/partbounds.c
+++ b/src/backend/partitioning/partbounds.c
@@ -5145,7 +5145,7 @@ get_partition_bound_spec(Oid partOid, RangeVar *name)
  * the first of new partitions) then lower bound of "spec" should be equal (or
  * greater than or equal in case defaultPart=true) to lower bound of split
  * partition. If last=true (this means that "spec" is the last of new
- * partitions) then upper bound of of "spec" should be equal (or less than or
+ * partitions) then upper bound of "spec" should be equal (or less than or
  * equal in case defaultPart=true) to upper bound of split partition.
  *
  * parent:                     partitioned table
@@ -5244,7 +5244,7 @@ check_partition_bounds_for_split_range(Relation parent,
                                                                                
  false, split_upper);
 
                        /*
-                        * Upper bound of of "spec" should be equal (or less 
than or equal
+                        * Upper bound of "spec" should be equal (or less than 
or equal
                         * in case defaultPart=true) to upper bound of split 
partition.
                         */
                        if ((!defaultPart && cmpval) || (defaultPart && cmpval 
> 0))
diff --git a/src/backend/storage/aio/read_stream.c 
b/src/backend/storage/aio/read_stream.c
index f54dacdd91..634cf4f0d1 100644
--- a/src/backend/storage/aio/read_stream.c
+++ b/src/backend/storage/aio/read_stream.c
@@ -541,9 +541,9 @@ read_stream_begin_relation(int flags,
                stream->distance = 1;
 
        /*
-        * Since we always always access the same relation, we can initialize
-        * parts of the ReadBuffersOperation objects and leave them that way, to
-        * avoid wasting CPU cycles writing to them for each read.
+        * Since we always access the same relation, we can initialize parts of
+        * the ReadBuffersOperation objects and leave them that way, to avoid
+        * wasting CPU cycles writing to them for each read.
         */
        for (int i = 0; i < max_ios; ++i)
        {
diff --git a/src/backend/utils/mmgr/bump.c b/src/backend/utils/mmgr/bump.c
index 449bd29344..26d2907fb7 100644
--- a/src/backend/utils/mmgr/bump.c
+++ b/src/backend/utils/mmgr/bump.c
@@ -501,8 +501,8 @@ BumpAlloc(MemoryContext context, Size size, int flags)
 #endif
 
        /*
-        * If requested size exceeds maximum for chunks we hand the the request
-        * off to BumpAllocLarge().
+        * If requested size exceeds maximum for chunks we hand the request off 
to
+        * BumpAllocLarge().
         */
        if (chunk_size > set->allocChunkLimit)
                return BumpAllocLarge(context, size, flags);
diff --git a/src/bin/pg_combinebackup/reconstruct.c 
b/src/bin/pg_combinebackup/reconstruct.c
index 15f62c18df..d481a5c565 100644
--- a/src/bin/pg_combinebackup/reconstruct.c
+++ b/src/bin/pg_combinebackup/reconstruct.c
@@ -756,7 +756,7 @@ write_block(int fd, char *output_filename,
 }
 
 /*
- * Read a block of data (BLCKSZ bytes) into the the buffer.
+ * Read a block of data (BLCKSZ bytes) into the buffer.
  */
 static void
 read_block(rfile *s, off_t off, uint8 *buffer)
diff --git a/src/bin/pg_upgrade/t/004_subscription.pl 
b/src/bin/pg_upgrade/t/004_subscription.pl
index 48918e8c29..a038928fe7 100644
--- a/src/bin/pg_upgrade/t/004_subscription.pl
+++ b/src/bin/pg_upgrade/t/004_subscription.pl
@@ -241,7 +241,7 @@ my $tab_upgraded2_oid = $old_sub->safe_psql('postgres',
 
 $old_sub->stop;
 
-# Change configuration so that initial table sync sync does not get started
+# Change configuration so that initial table sync does not get started
 # automatically
 $new_sub->append_conf('postgresql.conf',
        "max_logical_replication_workers = 0");
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index 551a0370aa..839f74d768 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -354,9 +354,9 @@ typedef struct xlhp_freeze_plan
  *
  * The backup block's data contains an array of xlhp_freeze_plan structs (with
  * nplans elements).  The individual item offsets are located in an array at
- * the end of the entire record with with nplans * (each plan's ntuples)
- * members.  Those offsets are in the same order as the plans.  The REDO
- * routine uses the offsets to freeze the corresponding heap tuples.
+ * the end of the entire record with nplans * (each plan's ntuples) members
+ * Those offsets are in the same order as the plans.  The REDO routine uses
+ * the offsets to freeze the corresponding heap tuples.
  *
  * (As of PostgreSQL 17, XLOG_HEAP2_PRUNE_VACUUM_SCAN records replace the
  * separate XLOG_HEAP2_FREEZE_PAGE records.)
diff --git a/src/include/common/hashfn_unstable.h 
b/src/include/common/hashfn_unstable.h
index 7b647470ab..0adb0f82f9 100644
--- a/src/include/common/hashfn_unstable.h
+++ b/src/include/common/hashfn_unstable.h
@@ -73,7 +73,7 @@
  *
  * For longer or variable-length input, fasthash_accum() is a more
  * flexible, but more verbose method. The standalone functions use this
- * internally, so see fasthash64() for an an example of this.
+ * internally, so see fasthash64() for an example of this.
  *
  * After all inputs have been mixed in, finalize the hash:
  *
diff --git a/src/test/regress/expected/copy.out 
b/src/test/regress/expected/copy.out
index b48365ec98..44114089a6 100644
--- a/src/test/regress/expected/copy.out
+++ b/src/test/regress/expected/copy.out
@@ -276,8 +276,8 @@ CREATE TABLE parted_si_p_even PARTITION OF parted_si FOR 
VALUES IN (0);
 CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1);
 -- Test that bulk relation extension handles reusing a single BulkInsertState
 -- across partitions.  Without the fix applied, this reliably reproduces
--- #18130 unless shared_buffers is extremely small (preventing any use use of
--- bulk relation extension). See
+-- #18130 unless shared_buffers is extremely small (preventing any use of bulk
+-- relation extension). See
 -- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org
 -- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us
 \set filename :abs_srcdir '/data/desc.data'
diff --git a/src/test/regress/sql/copy.sql b/src/test/regress/sql/copy.sql
index 43d2e906dd..e2dd24cb35 100644
--- a/src/test/regress/sql/copy.sql
+++ b/src/test/regress/sql/copy.sql
@@ -306,8 +306,8 @@ CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR 
VALUES IN (1);
 
 -- Test that bulk relation extension handles reusing a single BulkInsertState
 -- across partitions.  Without the fix applied, this reliably reproduces
--- #18130 unless shared_buffers is extremely small (preventing any use use of
--- bulk relation extension). See
+-- #18130 unless shared_buffers is extremely small (preventing any use of bulk
+-- relation extension). See
 -- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org
 -- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us
 \set filename :abs_srcdir '/data/desc.data'

Reply via email to