X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fhardirq.h;h=27c381fa1c9d0e15eb0ff5e60c901a24336bd714;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=d3c08b2a07695a134856c3e659738764a76b9009;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h index d3c08b2a0..27c381fa1 100644 --- a/include/asm-x86_64/hardirq.h +++ b/include/asm-x86_64/hardirq.h @@ -5,6 +5,7 @@ #include #include #include +#include #define __ARCH_IRQ_STAT 1 @@ -15,88 +16,22 @@ #include /* Standard mappings for irq_cpustat_t above */ /* - * We put the hardirq and softirq counter into the preemption - * counter. The bitmask has the following meaning: - * - * - bits 0-7 are the preemption count (max preemption depth: 256) - * - bits 8-15 are the softirq count (max # of softirqs: 256) - * - bits 16-23 are the hardirq count (max # of hardirqs: 256) - * - * - ( bit 26 is the PREEMPT_ACTIVE flag. ) - * - * PREEMPT_MASK: 0x000000ff - * HARDIRQ_MASK: 0x0000ff00 - * SOFTIRQ_MASK: 0x00ff0000 + * 'what should we do if we get a hw irq event on an illegal vector'. + * each architecture has to answer this themselves. */ - -#define PREEMPT_BITS 8 -#define SOFTIRQ_BITS 8 -#define HARDIRQ_BITS 8 - -#define PREEMPT_SHIFT 0 -#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) -#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) - -#define __MASK(x) ((1UL << (x))-1) - -#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT) -#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) -#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) - -#define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) -#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) - -#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) -#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) -#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - -/* - * Are we doing bottom half or hardware interrupt processing? - * Are we in a softirq context? Interrupt context? - */ -#define in_irq() (hardirq_count()) -#define in_softirq() (softirq_count()) -#define in_interrupt() (irq_count()) - - -#define hardirq_trylock() (!in_interrupt()) -#define hardirq_endlock() do { } while (0) - -#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) -#define nmi_enter() (irq_enter()) -#define nmi_exit() (preempt_count() -= HARDIRQ_OFFSET) - - -#ifdef CONFIG_PREEMPT -# include -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) -# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) -#else -# define in_atomic() (preempt_count() != 0) -# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET +static inline void ack_bad_irq(unsigned int irq) +{ + printk("unexpected IRQ trap at vector %02x\n", irq); +#ifdef CONFIG_X86_LOCAL_APIC + /* + * Currently unexpected vectors happen only on SMP and APIC. + * We _must_ ack these because every local APIC has only N + * irq slots per priority level, and a 'hanging, unacked' IRQ + * holds up an irq slot - in excessive cases (when multiple + * unexpected vectors occur) that might lock up the APIC + * completely. + */ + ack_APIC_irq(); #endif -#define irq_exit() \ -do { \ - preempt_count() -= IRQ_EXIT_OFFSET; \ - if (!in_interrupt() && softirq_pending(smp_processor_id())) \ - do_softirq(); \ - preempt_enable_no_resched(); \ -} while (0) - -#ifndef CONFIG_SMP -# define synchronize_irq(irq) barrier() -#else - extern void synchronize_irq(unsigned int irq); -#endif /* CONFIG_SMP */ - +} #endif /* __ASM_HARDIRQ_H */