Correct, and there is no need to create an index on a unique constraint or 
primary key as they are already implemented via indexes.  I can’t count how 
many duplicate indexes I’ve dropped in the past.  I use this view help find 
duplicates in a given system.  Duplicate indexes just use up space and rob 
performance during updates and inserts.   

/*======================================================================================================
   q$Id: duplicate_index.sql 1 2015-12-19 15:40:29Z rui $
   Description: Find duplicate indexes
======================================================================================================*/
create or replace view duplicate_index
as
select base.indrelid::regclass as table_name
  , string_agg((dup.indexrelid::regclass)::text, E'\n') as indexes
  , pg_size_pretty(avg(pg_relation_size(dup.indexrelid))) as avg_size
from pg_index base
join pg_index dup on dup.indrelid = base.indrelid  -- table identifier
  and dup.indkey = base.indkey  --  columns indexed
  and dup.indclass = base.indclass  -- columns types
  and (
    dup.indexprs = base.indexprs -- expression predicate for columns
    or  (
      dup.indexprs is null 
      and base.indexprs is null
    )
  )          
  and (
    dup.indpred = base.indpred  -- expression predicate for where clause
    or (
       dup.indpred is null
       and base.indpred is null
        )
  )
  and dup.indexrelid != base.indexrelid  --index identifier
group by base.indrelid::regclass
  , concat(base.indkey::text, base.indclass::text, base.indexprs, base.indpred)
order by avg_size desc
  , base.indrelid::regclass
;





Reply via email to