The current amd64 code allows for a fairly limited number of device
interrupts on the primary CPU. The exact number is a bit fuzzy, but
is somewhere between 11 and 27. If you go beyond this limit, the
interrupts will be sent to a secondary CPU if you have one and are
running GENERIC.MP. Otherwise any additional devices won't get any
interrupts at all, and typically won't function. And having USB or
block device interrupts on a secondary CPU causes problems as well.
On some big servers and many modern laprops we're almost out. There
may even be machines that don't boot because they already need more
interrupts than we can handle on the primary CPU. And MSI is
increasing the pressure.
The diff below increases the number of interrupts that can be handled
on a CPU to 64. I've sucessfully baked a muild and built xenocara on
my amd64 box running GENERIC. Further testing, especially on
GENERIC.MP is needed.
Thanks,
Mark
Index: amd64/intr.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/intr.c,v
retrieving revision 1.28
diff -u -p -r1.28 intr.c
--- amd64/intr.c 28 May 2011 21:10:40 -0000 1.28
+++ amd64/intr.c 8 Jun 2011 22:05:42 -0000
@@ -119,11 +119,12 @@ x86_nmi(void)
void
intr_calculatemasks(struct cpu_info *ci)
{
- int irq, level, unusedirqs, intrlevel[MAX_INTR_SOURCES];
+ int irq, level;
+ u_int64_t unusedirqs, intrlevel[MAX_INTR_SOURCES];
struct intrhand *q;
/* First, figure out which levels each IRQ uses. */
- unusedirqs = 0xffffffff;
+ unusedirqs = 0xffffffffffffffffUL;
for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
int levels = 0;
@@ -132,23 +133,23 @@ intr_calculatemasks(struct cpu_info *ci)
continue;
}
for (q = ci->ci_isources[irq]->is_handlers; q; q = q->ih_next)
- levels |= 1 << q->ih_level;
+ levels |= (1 << q->ih_level);
intrlevel[irq] = levels;
if (levels)
- unusedirqs &= ~(1 << irq);
+ unusedirqs &= ~(1UL << irq);
}
/* Then figure out which IRQs use each level. */
for (level = 0; level < NIPL; level++) {
- int irqs = 0;
+ u_int64_t irqs = 0;
for (irq = 0; irq < MAX_INTR_SOURCES; irq++)
if (intrlevel[irq] & (1 << level))
- irqs |= 1 << irq;
+ irqs |= (1UL << irq);
ci->ci_imask[level] = irqs | unusedirqs;
}
- for (level = 0; level<(NIPL-1); level++)
- ci->ci_imask[level+1] |= ci->ci_imask[level];
+ for (level = 0; level< (NIPL - 1); level++)
+ ci->ci_imask[level + 1] |= ci->ci_imask[level];
for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
int maxlevel = IPL_NONE;
@@ -502,7 +503,7 @@ intr_disestablish(struct intrhand *ih)
simple_lock(&ci->ci_slock);
pic->pic_hwmask(pic, ih->ih_pin);
- x86_atomic_clearbits_u32(&ci->ci_ipending, (1 << ih->ih_slot));
+ x86_atomic_clearbits_u64(&ci->ci_ipending, (1UL << ih->ih_slot));
/*
* Remove the handler from the chain.
@@ -710,7 +711,7 @@ spllower(int nlevel)
{
int olevel;
struct cpu_info *ci = curcpu();
- u_int32_t imask;
+ u_int64_t imask;
u_long psl;
imask = IUNMASK(ci, nlevel);
@@ -740,6 +741,6 @@ softintr(int sir)
{
struct cpu_info *ci = curcpu();
- __asm __volatile("lock ; orl %1, %0" :
- "=m"(ci->ci_ipending) : "ir" (1 << sir));
+ __asm __volatile("lock; orq %1, %0" :
+ "=m"(ci->ci_ipending) : "ir" (1UL << sir));
}
Index: amd64/spl.S
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/spl.S,v
retrieving revision 1.7
diff -u -p -r1.7 spl.S
--- amd64/spl.S 27 Jun 2008 06:03:08 -0000 1.7
+++ amd64/spl.S 8 Jun 2011 22:05:42 -0000
@@ -122,12 +122,12 @@ IDTVEC(spllower)
movl %edi,%ebx
leaq 1f(%rip),%r13 # address to resume loop at
1: movl %ebx,%eax # get cpl
- movl CPUVAR(IUNMASK)(,%rax,4),%eax
+ movq CPUVAR(IUNMASK)(,%rax,8),%rax
cli
- andl CPUVAR(IPENDING),%eax # any non-masked bits left?
+ andq CPUVAR(IPENDING),%rax # any non-masked bits left?
jz 2f
- bsrl %eax,%eax
- btrl %eax,CPUVAR(IPENDING)
+ bsrq %rax,%rax
+ btrq %rax,CPUVAR(IPENDING)
movq CPUVAR(ISOURCES)(,%rax,8),%rax
jmp *IS_RECURSE(%rax)
2:
@@ -150,12 +150,12 @@ IDTVEC(doreti)
decl CPUVAR(IDEPTH)
leaq 1f(%rip),%r13
1: movl %ebx,%eax
- movl CPUVAR(IUNMASK)(,%rax,4),%eax
+ movq CPUVAR(IUNMASK)(,%rax,8),%rax
cli
- andl CPUVAR(IPENDING),%eax
+ andq CPUVAR(IPENDING),%rax
jz 2f
- bsrl %eax,%eax # slow, but not worth optimizing
- btrl %eax,CPUVAR(IPENDING)
+ bsrq %rax,%rax # slow, but not worth optimizing
+ btrq %rax,CPUVAR(IPENDING)
movq CPUVAR(ISOURCES)(,%rax, 8),%rax
jmp *IS_RESUME(%rax)
2: /* Check for ASTs on exit to user mode. */
Index: amd64/vector.S
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/vector.S,v
retrieving revision 1.30
diff -u -p -r1.30 vector.S
--- amd64/vector.S 19 Apr 2011 06:07:03 -0000 1.30
+++ amd64/vector.S 8 Jun 2011 22:05:43 -0000
@@ -337,7 +337,8 @@ IDTVEC(resume_lapic_ipi)
call _C_LABEL(x86_ipi_handler)
jmp _C_LABEL(Xdoreti)
2:
- orl $(1 << LIR_IPI),CPUVAR(IPENDING)
+ movq $(1 << LIR_IPI),%rax
+ orq %rax,CPUVAR(IPENDING)
sti
INTRFASTEXIT
@@ -418,7 +419,8 @@ IDTVEC(resume_lapic_ltimer)
call _C_LABEL(lapic_clockintr)
jmp _C_LABEL(Xdoreti)
2:
- orl $(1 << LIR_TIMER),CPUVAR(IPENDING)
+ movq $(1 << LIR_TIMER),%rax
+ orq %rax,CPUVAR(IPENDING)
sti
INTRFASTEXIT
#endif /* NLAPIC > 0 */
@@ -502,14 +504,16 @@ IDTVEC(intr_##name##num)
;\
7: \
UNLOCK_KERNEL ;\
cli ;\
- orl $(1 << num),CPUVAR(IPENDING) ;\
+ movq $(1 << num),%rax ;\
+ orq %rax,CPUVAR(IPENDING) ;\
level_mask(num) ;\
late_ack(num) ;\
sti ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
10: \
cli ;\
- orl $(1 << num),CPUVAR(IPENDING) ;\
+ movq $(1 << num),%rax ;\
+ orq %rax,CPUVAR(IPENDING) ;\
level_mask(num) ;\
late_ack(num) ;\
sti ;\
@@ -592,6 +596,38 @@ INTRSTUB(ioapic_edge,28,voidop,ioapic_as
INTRSTUB(ioapic_edge,29,voidop,ioapic_asm_ack,voidop,voidop,voidop)
INTRSTUB(ioapic_edge,30,voidop,ioapic_asm_ack,voidop,voidop,voidop)
INTRSTUB(ioapic_edge,31,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,32,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,33,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,34,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,35,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,36,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,37,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,38,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,39,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,40,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,41,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,42,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,43,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,44,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,45,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,46,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,47,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,48,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,49,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,50,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,51,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,52,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,53,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,54,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,55,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,56,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,57,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,58,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,59,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,60,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,61,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,62,voidop,ioapic_asm_ack,voidop,voidop,voidop)
+INTRSTUB(ioapic_edge,63,voidop,ioapic_asm_ack,voidop,voidop,voidop)
INTRSTUB(ioapic_level,0,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
INTRSTUB(ioapic_level,1,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
@@ -625,6 +661,38 @@ INTRSTUB(ioapic_level,28,voidop,ioapic_a
INTRSTUB(ioapic_level,29,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
INTRSTUB(ioapic_level,30,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
INTRSTUB(ioapic_level,31,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,32,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,33,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,34,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,35,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,36,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,37,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,38,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,39,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,40,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,41,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,42,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,43,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,44,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,45,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,46,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,47,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,48,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,49,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,50,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,51,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,52,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,53,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,54,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,55,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,56,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,57,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,58,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,59,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,60,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,61,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,62,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
+INTRSTUB(ioapic_level,63,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
#endif
@@ -730,6 +798,70 @@ _C_LABEL(ioapic_edge_stubs):
.quad _C_LABEL(Xresume_ioapic_edge30)
.quad _C_LABEL(Xintr_ioapic_edge31), _C_LABEL(Xrecurse_ioapic_edge31)
.quad _C_LABEL(Xresume_ioapic_edge31)
+ .quad _C_LABEL(Xintr_ioapic_edge32), _C_LABEL(Xrecurse_ioapic_edge32)
+ .quad _C_LABEL(Xresume_ioapic_edge32)
+ .quad _C_LABEL(Xintr_ioapic_edge33), _C_LABEL(Xrecurse_ioapic_edge33)
+ .quad _C_LABEL(Xresume_ioapic_edge33)
+ .quad _C_LABEL(Xintr_ioapic_edge34), _C_LABEL(Xrecurse_ioapic_edge34)
+ .quad _C_LABEL(Xresume_ioapic_edge34)
+ .quad _C_LABEL(Xintr_ioapic_edge35), _C_LABEL(Xrecurse_ioapic_edge35)
+ .quad _C_LABEL(Xresume_ioapic_edge35)
+ .quad _C_LABEL(Xintr_ioapic_edge36), _C_LABEL(Xrecurse_ioapic_edge36)
+ .quad _C_LABEL(Xresume_ioapic_edge36)
+ .quad _C_LABEL(Xintr_ioapic_edge37), _C_LABEL(Xrecurse_ioapic_edge37)
+ .quad _C_LABEL(Xresume_ioapic_edge37)
+ .quad _C_LABEL(Xintr_ioapic_edge38), _C_LABEL(Xrecurse_ioapic_edge38)
+ .quad _C_LABEL(Xresume_ioapic_edge38)
+ .quad _C_LABEL(Xintr_ioapic_edge39), _C_LABEL(Xrecurse_ioapic_edge39)
+ .quad _C_LABEL(Xresume_ioapic_edge39)
+ .quad _C_LABEL(Xintr_ioapic_edge40), _C_LABEL(Xrecurse_ioapic_edge40)
+ .quad _C_LABEL(Xresume_ioapic_edge40)
+ .quad _C_LABEL(Xintr_ioapic_edge41), _C_LABEL(Xrecurse_ioapic_edge41)
+ .quad _C_LABEL(Xresume_ioapic_edge41)
+ .quad _C_LABEL(Xintr_ioapic_edge42), _C_LABEL(Xrecurse_ioapic_edge42)
+ .quad _C_LABEL(Xresume_ioapic_edge42)
+ .quad _C_LABEL(Xintr_ioapic_edge43), _C_LABEL(Xrecurse_ioapic_edge43)
+ .quad _C_LABEL(Xresume_ioapic_edge43)
+ .quad _C_LABEL(Xintr_ioapic_edge44), _C_LABEL(Xrecurse_ioapic_edge44)
+ .quad _C_LABEL(Xresume_ioapic_edge44)
+ .quad _C_LABEL(Xintr_ioapic_edge45), _C_LABEL(Xrecurse_ioapic_edge45)
+ .quad _C_LABEL(Xresume_ioapic_edge45)
+ .quad _C_LABEL(Xintr_ioapic_edge46), _C_LABEL(Xrecurse_ioapic_edge46)
+ .quad _C_LABEL(Xresume_ioapic_edge46)
+ .quad _C_LABEL(Xintr_ioapic_edge47), _C_LABEL(Xrecurse_ioapic_edge47)
+ .quad _C_LABEL(Xresume_ioapic_edge47)
+ .quad _C_LABEL(Xintr_ioapic_edge48), _C_LABEL(Xrecurse_ioapic_edge48)
+ .quad _C_LABEL(Xresume_ioapic_edge48)
+ .quad _C_LABEL(Xintr_ioapic_edge49), _C_LABEL(Xrecurse_ioapic_edge49)
+ .quad _C_LABEL(Xresume_ioapic_edge49)
+ .quad _C_LABEL(Xintr_ioapic_edge50), _C_LABEL(Xrecurse_ioapic_edge50)
+ .quad _C_LABEL(Xresume_ioapic_edge50)
+ .quad _C_LABEL(Xintr_ioapic_edge51), _C_LABEL(Xrecurse_ioapic_edge51)
+ .quad _C_LABEL(Xresume_ioapic_edge51)
+ .quad _C_LABEL(Xintr_ioapic_edge52), _C_LABEL(Xrecurse_ioapic_edge52)
+ .quad _C_LABEL(Xresume_ioapic_edge52)
+ .quad _C_LABEL(Xintr_ioapic_edge53), _C_LABEL(Xrecurse_ioapic_edge53)
+ .quad _C_LABEL(Xresume_ioapic_edge53)
+ .quad _C_LABEL(Xintr_ioapic_edge54), _C_LABEL(Xrecurse_ioapic_edge54)
+ .quad _C_LABEL(Xresume_ioapic_edge54)
+ .quad _C_LABEL(Xintr_ioapic_edge55), _C_LABEL(Xrecurse_ioapic_edge55)
+ .quad _C_LABEL(Xresume_ioapic_edge55)
+ .quad _C_LABEL(Xintr_ioapic_edge56), _C_LABEL(Xrecurse_ioapic_edge56)
+ .quad _C_LABEL(Xresume_ioapic_edge56)
+ .quad _C_LABEL(Xintr_ioapic_edge57), _C_LABEL(Xrecurse_ioapic_edge57)
+ .quad _C_LABEL(Xresume_ioapic_edge57)
+ .quad _C_LABEL(Xintr_ioapic_edge58), _C_LABEL(Xrecurse_ioapic_edge58)
+ .quad _C_LABEL(Xresume_ioapic_edge58)
+ .quad _C_LABEL(Xintr_ioapic_edge59), _C_LABEL(Xrecurse_ioapic_edge59)
+ .quad _C_LABEL(Xresume_ioapic_edge59)
+ .quad _C_LABEL(Xintr_ioapic_edge60), _C_LABEL(Xrecurse_ioapic_edge60)
+ .quad _C_LABEL(Xresume_ioapic_edge60)
+ .quad _C_LABEL(Xintr_ioapic_edge61), _C_LABEL(Xrecurse_ioapic_edge61)
+ .quad _C_LABEL(Xresume_ioapic_edge61)
+ .quad _C_LABEL(Xintr_ioapic_edge62), _C_LABEL(Xrecurse_ioapic_edge62)
+ .quad _C_LABEL(Xresume_ioapic_edge62)
+ .quad _C_LABEL(Xintr_ioapic_edge63), _C_LABEL(Xrecurse_ioapic_edge63)
+ .quad _C_LABEL(Xresume_ioapic_edge63)
.globl _C_LABEL(ioapic_level_stubs)
_C_LABEL(ioapic_level_stubs):
@@ -797,6 +929,70 @@ _C_LABEL(ioapic_level_stubs):
.quad _C_LABEL(Xresume_ioapic_level30)
.quad _C_LABEL(Xintr_ioapic_level31), _C_LABEL(Xrecurse_ioapic_level31)
.quad _C_LABEL(Xresume_ioapic_level31)
+ .quad _C_LABEL(Xintr_ioapic_level32), _C_LABEL(Xrecurse_ioapic_level32)
+ .quad _C_LABEL(Xresume_ioapic_level32)
+ .quad _C_LABEL(Xintr_ioapic_level33), _C_LABEL(Xrecurse_ioapic_level33)
+ .quad _C_LABEL(Xresume_ioapic_level33)
+ .quad _C_LABEL(Xintr_ioapic_level34), _C_LABEL(Xrecurse_ioapic_level34)
+ .quad _C_LABEL(Xresume_ioapic_level34)
+ .quad _C_LABEL(Xintr_ioapic_level35), _C_LABEL(Xrecurse_ioapic_level35)
+ .quad _C_LABEL(Xresume_ioapic_level35)
+ .quad _C_LABEL(Xintr_ioapic_level36), _C_LABEL(Xrecurse_ioapic_level36)
+ .quad _C_LABEL(Xresume_ioapic_level36)
+ .quad _C_LABEL(Xintr_ioapic_level37), _C_LABEL(Xrecurse_ioapic_level37)
+ .quad _C_LABEL(Xresume_ioapic_level37)
+ .quad _C_LABEL(Xintr_ioapic_level38), _C_LABEL(Xrecurse_ioapic_level38)
+ .quad _C_LABEL(Xresume_ioapic_level38)
+ .quad _C_LABEL(Xintr_ioapic_level39), _C_LABEL(Xrecurse_ioapic_level39)
+ .quad _C_LABEL(Xresume_ioapic_level39)
+ .quad _C_LABEL(Xintr_ioapic_level40), _C_LABEL(Xrecurse_ioapic_level40)
+ .quad _C_LABEL(Xresume_ioapic_level40)
+ .quad _C_LABEL(Xintr_ioapic_level41), _C_LABEL(Xrecurse_ioapic_level41)
+ .quad _C_LABEL(Xresume_ioapic_level41)
+ .quad _C_LABEL(Xintr_ioapic_level42), _C_LABEL(Xrecurse_ioapic_level42)
+ .quad _C_LABEL(Xresume_ioapic_level42)
+ .quad _C_LABEL(Xintr_ioapic_level43), _C_LABEL(Xrecurse_ioapic_level43)
+ .quad _C_LABEL(Xresume_ioapic_level43)
+ .quad _C_LABEL(Xintr_ioapic_level44), _C_LABEL(Xrecurse_ioapic_level44)
+ .quad _C_LABEL(Xresume_ioapic_level44)
+ .quad _C_LABEL(Xintr_ioapic_level45), _C_LABEL(Xrecurse_ioapic_level45)
+ .quad _C_LABEL(Xresume_ioapic_level45)
+ .quad _C_LABEL(Xintr_ioapic_level46), _C_LABEL(Xrecurse_ioapic_level46)
+ .quad _C_LABEL(Xresume_ioapic_level46)
+ .quad _C_LABEL(Xintr_ioapic_level47), _C_LABEL(Xrecurse_ioapic_level47)
+ .quad _C_LABEL(Xresume_ioapic_level47)
+ .quad _C_LABEL(Xintr_ioapic_level48), _C_LABEL(Xrecurse_ioapic_level48)
+ .quad _C_LABEL(Xresume_ioapic_level48)
+ .quad _C_LABEL(Xintr_ioapic_level49), _C_LABEL(Xrecurse_ioapic_level49)
+ .quad _C_LABEL(Xresume_ioapic_level49)
+ .quad _C_LABEL(Xintr_ioapic_level50), _C_LABEL(Xrecurse_ioapic_level50)
+ .quad _C_LABEL(Xresume_ioapic_level50)
+ .quad _C_LABEL(Xintr_ioapic_level51), _C_LABEL(Xrecurse_ioapic_level51)
+ .quad _C_LABEL(Xresume_ioapic_level51)
+ .quad _C_LABEL(Xintr_ioapic_level52), _C_LABEL(Xrecurse_ioapic_level52)
+ .quad _C_LABEL(Xresume_ioapic_level52)
+ .quad _C_LABEL(Xintr_ioapic_level53), _C_LABEL(Xrecurse_ioapic_level53)
+ .quad _C_LABEL(Xresume_ioapic_level53)
+ .quad _C_LABEL(Xintr_ioapic_level54), _C_LABEL(Xrecurse_ioapic_level54)
+ .quad _C_LABEL(Xresume_ioapic_level54)
+ .quad _C_LABEL(Xintr_ioapic_level55), _C_LABEL(Xrecurse_ioapic_level55)
+ .quad _C_LABEL(Xresume_ioapic_level55)
+ .quad _C_LABEL(Xintr_ioapic_level56), _C_LABEL(Xrecurse_ioapic_level56)
+ .quad _C_LABEL(Xresume_ioapic_level56)
+ .quad _C_LABEL(Xintr_ioapic_level57), _C_LABEL(Xrecurse_ioapic_level57)
+ .quad _C_LABEL(Xresume_ioapic_level57)
+ .quad _C_LABEL(Xintr_ioapic_level58), _C_LABEL(Xrecurse_ioapic_level58)
+ .quad _C_LABEL(Xresume_ioapic_level58)
+ .quad _C_LABEL(Xintr_ioapic_level59), _C_LABEL(Xrecurse_ioapic_level59)
+ .quad _C_LABEL(Xresume_ioapic_level59)
+ .quad _C_LABEL(Xintr_ioapic_level60), _C_LABEL(Xrecurse_ioapic_level60)
+ .quad _C_LABEL(Xresume_ioapic_level60)
+ .quad _C_LABEL(Xintr_ioapic_level61), _C_LABEL(Xrecurse_ioapic_level61)
+ .quad _C_LABEL(Xresume_ioapic_level61)
+ .quad _C_LABEL(Xintr_ioapic_level62), _C_LABEL(Xrecurse_ioapic_level62)
+ .quad _C_LABEL(Xresume_ioapic_level62)
+ .quad _C_LABEL(Xintr_ioapic_level63), _C_LABEL(Xrecurse_ioapic_level63)
+ .quad _C_LABEL(Xresume_ioapic_level63)
#endif
.data
Index: include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/cpu.h,v
retrieving revision 1.67
diff -u -p -r1.67 cpu.h
--- include/cpu.h 23 May 2011 09:52:24 -0000 1.67
+++ include/cpu.h 8 Jun 2011 22:05:43 -0000
@@ -82,11 +82,11 @@ struct cpu_info {
struct pcb *ci_idle_pcb;
struct intrsource *ci_isources[MAX_INTR_SOURCES];
- u_int32_t ci_ipending;
+ u_int64_t ci_ipending;
int ci_ilevel;
int ci_idepth;
- u_int32_t ci_imask[NIPL];
- u_int32_t ci_iunmask[NIPL];
+ u_int64_t ci_imask[NIPL];
+ u_int64_t ci_iunmask[NIPL];
#ifdef DIAGNOSTIC
int ci_mutex_level;
#endif
Index: include/intrdefs.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/intrdefs.h,v
retrieving revision 1.8
diff -u -p -r1.8 intrdefs.h
--- include/intrdefs.h 13 Nov 2010 04:16:42 -0000 1.8
+++ include/intrdefs.h 8 Jun 2011 22:05:43 -0000
@@ -44,22 +44,22 @@
* Local APIC masks. Must not conflict with SIR_* above, and must
* be >= NUM_LEGACY_IRQs. Note that LIR_IPI must be first.
*/
-#define LIR_IPI 31
-#define LIR_TIMER 30
+#define LIR_IPI 63
+#define LIR_TIMER 62
/* Soft interrupt masks. */
-#define SIR_CLOCK 29
-#define SIR_NET 28
-#define SIR_TTY 27
+#define SIR_CLOCK 61
+#define SIR_NET 60
+#define SIR_TTY 59
/*
- * Maximum # of interrupt sources per CPU. 32 to fit in one word.
+ * Maximum # of interrupt sources per CPU. 64 to fit in one word.
* ioapics can theoretically produce more, but it's not likely to
* happen. For multiple ioapics, things can be routed to different
* CPUs.
*/
-#define MAX_INTR_SOURCES 32
+#define MAX_INTR_SOURCES 64
#define NUM_LEGACY_IRQS 16
/*