We can spare the irq_desc lookup in the interrupt entry code if we
store the descriptor pointer in the vector array instead the interrupt
number.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
---
 arch/x86/include/asm/hw_irq.h |    6 ++--
 arch/x86/include/asm/irq.h    |    4 ++-
 arch/x86/kernel/apic/vector.c |   51 +++++++++++++++++++-----------------------
 arch/x86/kernel/irq.c         |   36 +++++++++++++----------------
 arch/x86/kernel/irq_32.c      |    8 ++----
 arch/x86/kernel/irq_64.c      |    9 ++-----
 arch/x86/kernel/irqinit.c     |    4 +--
 arch/x86/lguest/boot.c        |    4 ++-
 8 files changed, 57 insertions(+), 65 deletions(-)

Index: tip/arch/x86/include/asm/hw_irq.h
===================================================================
--- tip.orig/arch/x86/include/asm/hw_irq.h
+++ tip/arch/x86/include/asm/hw_irq.h
@@ -182,10 +182,10 @@ extern char irq_entries_start[];
 #define trace_irq_entries_start irq_entries_start
 #endif
 
-#define VECTOR_UNUSED          (-1)
-#define VECTOR_RETRIGGERED     (-2)
+#define VECTOR_UNUSED          NULL
+#define VECTOR_RETRIGGERED     ((void *)~0UL)
 
-typedef int vector_irq_t[NR_VECTORS];
+typedef struct irq_desc* vector_irq_t[NR_VECTORS];
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
 
 #endif /* !ASSEMBLY_ */
Index: tip/arch/x86/include/asm/irq.h
===================================================================
--- tip.orig/arch/x86/include/asm/irq.h
+++ tip/arch/x86/include/asm/irq.h
@@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_h
 
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
-extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+
+struct irq_desc;
+extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);
 
Index: tip/arch/x86/kernel/apic/vector.c
===================================================================
--- tip.orig/arch/x86/kernel/apic/vector.c
+++ tip/arch/x86/kernel/apic/vector.c
@@ -169,7 +169,7 @@ next:
                        goto next;
 
                for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
-                       if (per_cpu(vector_irq, new_cpu)[vector] > 
VECTOR_UNUSED)
+                       if (!IS_ERR_OR_NULL(per_cpu(vector_irq, 
new_cpu)[vector]))
                                goto next;
                }
                /* Found one! */
@@ -181,7 +181,7 @@ next:
                           cpumask_intersects(d->old_domain, cpu_online_mask);
                }
                for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
-                       per_cpu(vector_irq, new_cpu)[vector] = irq;
+                       per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
                d->cfg.vector = vector;
                cpumask_copy(d->domain, vector_cpumask);
                err = 0;
@@ -223,8 +223,9 @@ static int assign_irq_vector_policy(int
 
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
-       int cpu, vector;
+       struct irq_desc *desc;
        unsigned long flags;
+       int cpu, vector;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
        BUG_ON(!data->cfg.vector);
@@ -241,10 +242,11 @@ static void clear_irq_vector(int irq, st
                return;
        }
 
+       desc = irq_to_desc(irq);
        for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
                for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
                     vector++) {
-                       if (per_cpu(vector_irq, cpu)[vector] != irq)
+                       if (per_cpu(vector_irq, cpu)[vector] != desc)
                                continue;
                        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
                        break;
@@ -402,30 +404,30 @@ int __init arch_early_irq_init(void)
        return arch_early_ioapic_init();
 }
 
+/* Initialize vector_irq on a new cpu */
 static void __setup_vector_irq(int cpu)
 {
-       /* Initialize vector_irq on a new cpu */
-       int irq, vector;
        struct apic_chip_data *data;
+       struct irq_desc *desc;
+       int irq, vector;
 
        /* Mark the inuse vectors */
-       for_each_active_irq(irq) {
-               data = apic_chip_data(irq_get_irq_data(irq));
-               if (!data)
-                       continue;
+       for_each_irq_desc(irq, desc) {
+               struct irq_data *idata = irq_desc_get_irq_data(desc);
 
-               if (!cpumask_test_cpu(cpu, data->domain))
+               data = apic_chip_data(idata);
+               if (!data || !cpumask_test_cpu(cpu, data->domain))
                        continue;
                vector = data->cfg.vector;
-               per_cpu(vector_irq, cpu)[vector] = irq;
+               per_cpu(vector_irq, cpu)[vector] = desc;
        }
        /* Mark the free vectors */
        for (vector = 0; vector < NR_VECTORS; ++vector) {
-               irq = per_cpu(vector_irq, cpu)[vector];
-               if (irq <= VECTOR_UNUSED)
+               desc = per_cpu(vector_irq, cpu)[vector];
+               if (IS_ERR_OR_NULL(desc))
                        continue;
 
-               data = apic_chip_data(irq_get_irq_data(irq));
+               data = apic_chip_data(irq_desc_get_irq_data(desc));
                if (!cpumask_test_cpu(cpu, data->domain))
                        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
        }
@@ -447,7 +449,7 @@ void setup_vector_irq(int cpu)
         * legacy vector to irq mapping:
         */
        for (irq = 0; irq < nr_legacy_irqs(); irq++)
-               per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq;
+               per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = 
irq_to_desc(irq);
 
        __setup_vector_irq(cpu);
 }
@@ -543,19 +545,13 @@ asmlinkage __visible void smp_irq_move_c
 
        me = smp_processor_id();
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-               int irq;
-               unsigned int irr;
-               struct irq_desc *desc;
                struct apic_chip_data *data;
+               struct irq_desc *desc;
+               unsigned int irr;
 
        retry:
-               irq = __this_cpu_read(vector_irq[vector]);
-
-               if (irq <= VECTOR_UNUSED)
-                       continue;
-
-               desc = irq_to_desc(irq);
-               if (!desc)
+               desc = __this_cpu_read(vector_irq[vector]);
+               if (IS_ERR_OR_NULL(desc))
                        continue;
 
                if (!raw_spin_trylock(&desc->lock)) {
@@ -565,9 +561,10 @@ asmlinkage __visible void smp_irq_move_c
                        goto retry;
                }
 
-               data = apic_chip_data(&desc->irq_data);
+               data = apic_chip_data(irq_desc_get_irq_data(desc));
                if (!data)
                        goto unlock;
+
                /*
                 * Check if the irq migration is in progress. If so, we
                 * haven't received the cleanup request yet for this irq.
Index: tip/arch/x86/kernel/irq.c
===================================================================
--- tip.orig/arch/x86/kernel/irq.c
+++ tip/arch/x86/kernel/irq.c
@@ -214,10 +214,9 @@ u64 arch_irq_stat(void)
 __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
-
+       struct irq_desc *desc;
        /* high bit used in ret_from_ code  */
        unsigned vector = ~regs->orig_ax;
-       unsigned irq;
 
        /*
         * NB: Unlike exception entries, IRQ entries do not reliably
@@ -236,15 +235,16 @@ __visible unsigned int __irq_entry do_IR
        /* entering_irq() tells RCU that we're not quiescent.  Check it. */
        rcu_lockdep_assert(rcu_is_watching(), "IRQ failed to wake up RCU");
 
-       irq = __this_cpu_read(vector_irq[vector]);
+       desc = __this_cpu_read(vector_irq[vector]);
 
-       if (!handle_irq(irq, regs)) {
+       if (!handle_irq(desc, regs)) {
                ack_APIC_irq();
 
-               if (irq != VECTOR_RETRIGGERED) {
+               if (desc != VECTOR_RETRIGGERED) {
                        pr_emerg_ratelimited("%s: %d.%d No irq handler for 
vector (irq %d)\n",
                                             __func__, smp_processor_id(),
-                                            vector, irq);
+                                            vector,
+                                            desc ? irq_desc_get_irq(desc) : 
-1);
                } else {
                        __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
                }
@@ -348,10 +348,10 @@ static struct cpumask affinity_new, onli
  */
 int check_irq_vectors_for_cpu_disable(void)
 {
-       int irq, cpu;
        unsigned int this_cpu, vector, this_count, count;
        struct irq_desc *desc;
        struct irq_data *data;
+       int cpu;
 
        this_cpu = smp_processor_id();
        cpumask_copy(&online_new, cpu_online_mask);
@@ -359,24 +359,21 @@ int check_irq_vectors_for_cpu_disable(vo
 
        this_count = 0;
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-               irq = __this_cpu_read(vector_irq[vector]);
-               if (irq < 0)
+               desc = __this_cpu_read(vector_irq[vector]);
+               if (IS_ERR_OR_NULL(desc))
                        continue;
-               desc = irq_to_desc(irq);
-               if (!desc)
-                       continue;
-
                /*
                 * Protect against concurrent action removal, affinity
                 * changes etc.
                 */
                raw_spin_lock(&desc->lock);
                data = irq_desc_get_irq_data(desc);
-               cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data));
+               cpumask_copy(&affinity_new,
+                            irq_data_get_affinity_mask(data));
                cpumask_clear_cpu(this_cpu, &affinity_new);
 
                /* Do not count inactive or per-cpu irqs. */
-               if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
+               if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
                        raw_spin_unlock(&desc->lock);
                        continue;
                }
@@ -417,8 +414,8 @@ int check_irq_vectors_for_cpu_disable(vo
                for (vector = FIRST_EXTERNAL_VECTOR;
                     vector < first_system_vector; vector++) {
                        if (!test_bit(vector, used_vectors) &&
-                           per_cpu(vector_irq, cpu)[vector] <= VECTOR_UNUSED)
-                               count++;
+                           IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
+                           count++;
                }
        }
 
@@ -522,14 +519,13 @@ void fixup_irqs(void)
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
                unsigned int irr;
 
-               if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNUSED)
+               if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
                        continue;
 
                irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
                if (irr  & (1 << (vector % 32))) {
-                       irq = __this_cpu_read(vector_irq[vector]);
+                       desc = __this_cpu_read(vector_irq[vector]);
 
-                       desc = irq_to_desc(irq);
                        raw_spin_lock(&desc->lock);
                        data = irq_desc_get_irq_data(desc);
                        chip = irq_data_get_irq_chip(data);
Index: tip/arch/x86/kernel/irq_32.c
===================================================================
--- tip.orig/arch/x86/kernel/irq_32.c
+++ tip/arch/x86/kernel/irq_32.c
@@ -148,21 +148,19 @@ void do_softirq_own_stack(void)
        call_on_stack(__do_softirq, isp);
 }
 
-bool handle_irq(unsigned irq, struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 {
-       struct irq_desc *desc;
        int overflow;
 
        overflow = check_stack_overflow();
 
-       desc = irq_to_desc(irq);
-       if (unlikely(!desc))
+       if (IS_ERR_OR_NULL(desc))
                return false;
 
        if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
                if (unlikely(overflow))
                        print_stack_overflow();
-               desc->handle_irq(irq, desc);
+               generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
        }
 
        return true;
Index: tip/arch/x86/kernel/irq_64.c
===================================================================
--- tip.orig/arch/x86/kernel/irq_64.c
+++ tip/arch/x86/kernel/irq_64.c
@@ -68,16 +68,13 @@ static inline void stack_overflow_check(
 #endif
 }
 
-bool handle_irq(unsigned irq, struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 {
-       struct irq_desc *desc;
-
        stack_overflow_check(regs);
 
-       desc = irq_to_desc(irq);
-       if (unlikely(!desc))
+       if (unlikely(IS_ERR_OR_NULL(desc)))
                return false;
 
-       generic_handle_irq_desc(irq, desc);
+       generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
        return true;
 }
Index: tip/arch/x86/kernel/irqinit.c
===================================================================
--- tip.orig/arch/x86/kernel/irqinit.c
+++ tip/arch/x86/kernel/irqinit.c
@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned i
        int cpu;
 
        for_each_online_cpu(cpu) {
-               if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNUSED)
+               if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
                        return 1;
        }
 
@@ -94,7 +94,7 @@ void __init init_IRQ(void)
         * irq's migrate etc.
         */
        for (i = 0; i < nr_legacy_irqs(); i++)
-               per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i;
+               per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
 
        x86_init.irqs.intr_init();
 }
Index: tip/arch/x86/lguest/boot.c
===================================================================
--- tip.orig/arch/x86/lguest/boot.c
+++ tip/arch/x86/lguest/boot.c
@@ -882,6 +882,7 @@ static void __init lguest_init_IRQ(void)
  */
 int lguest_setup_irq(unsigned int irq)
 {
+       struct irq_desc *desc;
        int err;
 
        /* Returns -ve error or vector number. */
@@ -897,7 +898,8 @@ int lguest_setup_irq(unsigned int irq)
                                      handle_level_irq, "level");
 
        /* Some systems map "vectors" to interrupts weirdly.  Not us! */
-       __this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq, irq);
+       desc = irq_to_desc(irq);
+       __this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
        return 0;
 }
 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to