X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fhardirq.h;h=27c381fa1c9d0e15eb0ff5e60c901a24336bd714;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=9ce1a78084e866cec5700f23ede694bfaae5fb99;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h index 9ce1a7808..27c381fa1 100644 --- a/include/asm-x86_64/hardirq.h +++ b/include/asm-x86_64/hardirq.h @@ -5,6 +5,7 @@ #include #include #include +#include #define __ARCH_IRQ_STAT 1 @@ -15,47 +16,22 @@ #include /* Standard mappings for irq_cpustat_t above */ /* - * We put the hardirq and softirq counter into the preemption - * counter. The bitmask has the following meaning: - * - * - bits 0-7 are the preemption count (max preemption depth: 256) - * - bits 8-15 are the softirq count (max # of softirqs: 256) - * - bits 16-23 are the hardirq count (max # of hardirqs: 256) - * - * - ( bit 26 is the PREEMPT_ACTIVE flag. ) - * - * PREEMPT_MASK: 0x000000ff - * HARDIRQ_MASK: 0x0000ff00 - * SOFTIRQ_MASK: 0x00ff0000 + * 'what should we do if we get a hw irq event on an illegal vector'. + * each architecture has to answer this themselves. */ - -#define PREEMPT_BITS 8 -#define SOFTIRQ_BITS 8 -#define HARDIRQ_BITS 8 - -#define PREEMPT_SHIFT 0 -#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) -#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! +static inline void ack_bad_irq(unsigned int irq) +{ + printk("unexpected IRQ trap at vector %02x\n", irq); +#ifdef CONFIG_X86_LOCAL_APIC + /* + * Currently unexpected vectors happen only on SMP and APIC. + * We _must_ ack these because every local APIC has only N + * irq slots per priority level, and a 'hanging, unacked' IRQ + * holds up an irq slot - in excessive cases (when multiple + * unexpected vectors occur) that might lock up the APIC + * completely. + */ + ack_APIC_irq(); #endif - -#define nmi_enter() (irq_enter()) -#define nmi_exit() (preempt_count() -= HARDIRQ_OFFSET) - -#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) -#define irq_exit() \ -do { \ - preempt_count() -= IRQ_EXIT_OFFSET; \ - if (!in_interrupt() && softirq_pending(smp_processor_id())) \ - do_softirq(); \ - preempt_enable_no_resched(); \ -} while (0) - +} #endif /* __ASM_HARDIRQ_H */