2 * Code to handle x86 style IRQs plus some generic interrupt stuff.
4 * Copyright (C) 1992 Linus Torvalds
5 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
6 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
7 * Copyright (C) 1999-2000 Grant Grundler
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/bitops.h>
24 #include <linux/config.h>
25 #include <linux/eisa.h>
26 #include <linux/errno.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/signal.h>
30 #include <linux/types.h>
31 #include <linux/ioport.h>
32 #include <linux/timex.h>
33 #include <linux/slab.h>
34 #include <linux/random.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/irq.h>
39 #include <linux/seq_file.h>
40 #include <linux/spinlock.h>
42 #include <asm/cache.h>
46 #undef PARISC_IRQ_CR16_COUNTS
48 extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
49 extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
52 #define DBG_IRQ(irq, x) if ((irq) != TIMER_IRQ) printk x
54 #define DBG_IRQ(irq, x) do { } while (0)
55 #endif /* DEBUG_IRQ */
57 #define EIEM_MASK(irq) (1UL<<(MAX_CPU_IRQ-IRQ_OFFSET(irq)))
59 /* Bits in EIEM correlate with cpu_irq_action[].
60 ** Numbered *Big Endian*! (ie bit 0 is MSB)
62 static volatile unsigned long cpu_eiem = 0;
64 static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */
66 static void cpu_set_eiem(void *info)
68 set_eiem((unsigned long) info);
71 static inline void disable_cpu_irq(void *unused, int irq)
73 unsigned long eirr_bit = EIEM_MASK(irq);
75 cpu_eiem &= ~eirr_bit;
76 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
79 static void enable_cpu_irq(void *unused, int irq)
81 unsigned long eirr_bit = EIEM_MASK(irq);
83 mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
85 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
88 /* mask and disable are the same at the CPU level
89 ** Difference is enable clears pending interrupts
91 #define mask_cpu_irq disable_cpu_irq
93 static inline void unmask_cpu_irq(void *unused, int irq)
95 unsigned long eirr_bit = EIEM_MASK(irq);
97 /* NOTE: sending an IPI will cause do_cpu_irq_mask() to
98 ** handle *any* unmasked pending interrupts.
99 ** ie We don't need to check for pending interrupts here.
101 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
105 * XXX cpu_irq_actions[] will become 2 dimensional for per CPU EIR support.
106 * correspond changes needed in:
107 * processor_probe() initialize additional action arrays
108 * request_irq() handle CPU IRQ region specially
109 * do_cpu_irq_mask() index into the matching irq_action array.
111 struct irqaction cpu_irq_actions[IRQ_PER_REGION] = {
112 [IRQ_OFFSET(TIMER_IRQ)] = {
113 .handler = timer_interrupt,
117 [IRQ_OFFSET(IPI_IRQ)] = {
118 .handler = ipi_interrupt,
125 struct irq_region cpu0_irq_region = {
127 .disable_irq = disable_cpu_irq,
128 .enable_irq = enable_cpu_irq,
129 .mask_irq = unmask_cpu_irq,
130 .unmask_irq = unmask_cpu_irq
134 .name = "PARISC-CPU",
135 .irqbase = IRQ_FROM_REGION(CPU_IRQ_REGION),
137 .action = cpu_irq_actions,
140 struct irq_region *irq_region[NR_IRQ_REGS] = {
141 [ 0 ] = NULL, /* reserved for EISA, else causes data page fault (aka code 15) */
142 [ CPU_IRQ_REGION ] = &cpu0_irq_region,
147 ** Generic interfaces that device drivers can use:
148 ** mask_irq() block IRQ
149 ** unmask_irq() re-enable IRQ and trigger if IRQ is pending
150 ** disable_irq() block IRQ
151 ** enable_irq() clear pending and re-enable IRQ
154 void mask_irq(int irq)
156 struct irq_region *region;
158 DBG_IRQ(irq, ("mask_irq(%d) %d+%d eiem 0x%lx\n", irq,
159 IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
160 irq = irq_canonicalize(irq);
161 region = irq_region[IRQ_REGION(irq)];
162 if (region->ops.mask_irq)
163 region->ops.mask_irq(region->data.dev, IRQ_OFFSET(irq));
166 void unmask_irq(int irq)
168 struct irq_region *region;
170 DBG_IRQ(irq, ("unmask_irq(%d) %d+%d eiem 0x%lx\n", irq,
171 IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
172 irq = irq_canonicalize(irq);
173 region = irq_region[IRQ_REGION(irq)];
174 if (region->ops.unmask_irq)
175 region->ops.unmask_irq(region->data.dev, IRQ_OFFSET(irq));
178 void disable_irq(int irq)
180 struct irq_region *region;
182 DBG_IRQ(irq, ("disable_irq(%d) %d+%d eiem 0x%lx\n", irq,
183 IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
184 irq = irq_canonicalize(irq);
185 region = irq_region[IRQ_REGION(irq)];
186 if (region->ops.disable_irq)
187 region->ops.disable_irq(region->data.dev, IRQ_OFFSET(irq));
191 EXPORT_SYMBOL(disable_irq);
193 void enable_irq(int irq)
195 struct irq_region *region;
197 DBG_IRQ(irq, ("enable_irq(%d) %d+%d EIRR 0x%lx EIEM 0x%lx\n", irq,
198 IRQ_REGION(irq), IRQ_OFFSET(irq), mfctl(23), mfctl(15)));
199 irq = irq_canonicalize(irq);
200 region = irq_region[IRQ_REGION(irq)];
202 if (region->ops.enable_irq)
203 region->ops.enable_irq(region->data.dev, IRQ_OFFSET(irq));
207 EXPORT_SYMBOL(enable_irq);
209 int show_interrupts(struct seq_file *p, void *v)
211 #ifdef CONFIG_PROC_FS
212 unsigned int regnr = *(loff_t *) v, i = 0;
217 for (i = 0; i < NR_CPUS; i++)
220 seq_printf(p, " CPU%02d ", i);
222 #ifdef PARISC_IRQ_CR16_COUNTS
223 seq_printf(p, "[min/avg/max] (CPU cycle counts)");
228 /* We don't need *irqsave lock variants since this is
229 ** only allowed to change while in the base context.
231 spin_lock(&irq_lock);
232 if (regnr < NR_IRQ_REGS) {
233 struct irq_region *region = irq_region[regnr];
235 if (!region || !region->action)
238 for (i = 0; i <= MAX_CPU_IRQ; i++) {
239 struct irqaction *action = ®ion->action[i];
240 unsigned int irq_no = IRQ_FROM_REGION(regnr) + i;
242 if (!action->handler)
245 seq_printf(p, "%3d: ", irq_no);
247 for (; j < NR_CPUS; j++)
250 seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq_no]);
252 seq_printf(p, " %14s",
253 region->data.name ? region->data.name : "N/A");
254 #ifndef PARISC_IRQ_CR16_COUNTS
255 seq_printf(p, " %s", action->name);
257 while ((action = action->next))
258 seq_printf(p, ", %s", action->name);
260 for ( ;action; action = action->next) {
261 unsigned int k, avg, min, max;
263 min = max = action->cr16_hist[0];
265 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
266 int hist = action->cr16_hist[k];
273 if (hist > max) max = hist;
274 if (hist < min) min = hist;
278 seq_printf(p, " %s[%d/%d/%d]", action->name,
287 spin_unlock(&irq_lock);
289 #endif /* CONFIG_PROC_FS */
296 ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
297 ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
299 ** To use txn_XXX() interfaces, get a Virtual IRQ first.
300 ** Then use that to get the Transaction address and data.
308 /* never return irq 0 cause that's the interval timer */
309 for (irq = 1; irq <= MAX_CPU_IRQ; irq++) {
310 if (cpu_irq_actions[irq].handler == NULL) {
311 return (IRQ_FROM_REGION(CPU_IRQ_REGION) + irq);
315 /* unlikely, but be prepared */
320 txn_claim_irq(int irq)
322 if (irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)].handler ==NULL)
325 /* unlikely, but be prepared */
330 txn_alloc_addr(int virt_irq)
332 static int next_cpu = -1;
334 next_cpu++; /* assign to "next" CPU we want this bugger on */
337 while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
338 !cpu_online(next_cpu)))
341 if (next_cpu >= NR_CPUS)
342 next_cpu = 0; /* nothing else, assign monarch */
344 return cpu_data[next_cpu].txn_addr;
349 ** The alloc process needs to accept a parameter to accommodate limitations
350 ** of the HW/SW which use these bits:
351 ** Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
352 ** V-class (EPIC): 6 bits
353 ** N/L-class/A500: 8 bits (iosapic)
354 ** PCI 2.2 MSI: 16 bits (I think)
355 ** Existing PCI devices: 32-bits (all Symbios SCSI/ATM/HyperFabric)
357 ** On the service provider side:
358 ** o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
359 ** o PA 2.0 wide mode 6-bits (per processor)
360 ** o IA64 8-bits (0-256 total)
362 ** So a Legacy PA I/O device on a PA 2.0 box can't use all
363 ** the bits supported by the processor...and the N/L-class
364 ** I/O subsystem supports more bits than PA2.0 has. The first
365 ** case is the problem.
368 txn_alloc_data(int virt_irq, unsigned int bits_wide)
370 /* XXX FIXME : bits_wide indicates how wide the transaction
371 ** data is allowed to be...we may need a different virt_irq
372 ** if this one won't work. Another reason to index virtual
373 ** irq's into a table which can manage CPU/IRQ bit separately.
375 if (IRQ_OFFSET(virt_irq) > (1 << (bits_wide -1)))
377 panic("Sorry -- didn't allocate valid IRQ for this device\n");
380 return (IRQ_OFFSET(virt_irq));
383 void do_irq(struct irqaction *action, int irq, struct pt_regs * regs)
385 int cpu = smp_processor_id();
388 ++kstat_cpu(cpu).irqs[irq];
390 DBG_IRQ(irq, ("do_irq(%d) %d+%d eiem 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
392 for (; action; action = action->next) {
393 #ifdef PARISC_IRQ_CR16_COUNTS
394 unsigned long cr_start = mfctl(16);
397 if (action->handler == NULL) {
398 if (IRQ_REGION(irq) == EISA_IRQ_REGION && irq_region[EISA_IRQ_REGION]) {
399 /* were we called due to autodetecting (E)ISA irqs ? */
400 unsigned int *status;
401 status = &irq_region[EISA_IRQ_REGION]->data.status[IRQ_OFFSET(irq)];
402 if (*status & IRQ_AUTODETECT) {
403 *status &= ~IRQ_WAITING;
407 printk(KERN_ERR "IRQ: CPU:%d No handler for IRQ %d !\n", cpu, irq);
411 action->handler(irq, action->dev_id, regs);
413 #ifdef PARISC_IRQ_CR16_COUNTS
415 unsigned long cr_end = mfctl(16);
416 unsigned long tmp = cr_end - cr_start;
417 /* check for roll over */
418 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
420 action->cr16_hist[action->cr16_idx++] = (int) cr_start;
421 action->cr16_idx &= PARISC_CR16_HIST_SIZE - 1;
429 /* ONLY called from entry.S:intr_extint() */
430 void do_cpu_irq_mask(struct pt_regs *regs)
432 unsigned long eirr_val;
433 unsigned int i=3; /* limit time in interrupt context */
436 * PSW_I or EIEM bits cannot be enabled until after the
437 * interrupts are processed.
438 * timer_interrupt() assumes it won't get interrupted when it
439 * holds the xtime_lock...an unmasked interrupt source could
440 * interrupt and deadlock by trying to grab xtime_lock too.
441 * Keeping PSW_I and EIEM disabled avoids this.
443 set_eiem(0UL); /* disable all extr interrupt for now */
445 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
446 * 2) We loop here on EIRR contents in order to avoid
447 * nested interrupts or having to take another interrupt
448 * when we could have just handled it right away.
449 * 3) Limit the number of times we loop to make sure other
450 * processing can occur.
452 while ((eirr_val = (mfctl(23) & cpu_eiem)) && --i) {
453 unsigned long bit = (1UL<<MAX_CPU_IRQ);
456 mtctl(eirr_val, 23); /* reset bits we are going to process */
459 if (eirr_val != (1UL << MAX_CPU_IRQ))
460 printk(KERN_DEBUG "do_cpu_irq_mask 0x%x & 0x%x\n", eirr_val, cpu_eiem);
463 /* Work our way from MSb to LSb...same order we alloc EIRs */
464 for (irq = 0; eirr_val && bit; bit>>=1, irq++)
466 if (!(bit & eirr_val & cpu_eiem))
469 /* clear bit in mask - can exit loop sooner */
472 do_irq(&cpu_irq_actions[irq], TIMER_IRQ+irq, regs);
479 /* Called from second level IRQ regions: eg dino or iosapic. */
480 void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs)
486 if (mask != (1L<<MAX_CPU_IRQ))
487 printk(KERN_DEBUG "do_irq_mask %08lx %p %p\n", mask, region, regs);
490 for (bit = (1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) {
491 unsigned int irq_num;
495 mask &= ~bit; /* clear bit in mask - can exit loop sooner */
496 irq_num = region->data.irqbase + irq;
499 do_irq(®ion->action[irq], irq_num, regs);
505 static inline int find_free_region(void)
509 for (irqreg=1; irqreg <= (NR_IRQ_REGS); irqreg++) {
510 if (irq_region[irqreg] == NULL)
519 * alloc_irq_region - allocate/init a new IRQ region
520 * @count: number of IRQs in this region.
521 * @ops: function table with request/release/mask/unmask/etc.. entries.
522 * @name: name of region owner for /proc/interrupts output.
523 * @dev: private data to associate with the new IRQ region.
525 * Every IRQ must become a MMIO write to the CPU's EIRR in
526 * order to get CPU service. The IRQ region represents the
527 * number of unique events the region handler can (or must)
528 * identify. For PARISC CPU, that's the width of the EIR Register.
529 * IRQ regions virtualize IRQs (eg EISA or PCI host bus controllers)
530 * for line based devices.
532 struct irq_region *alloc_irq_region( int count, struct irq_region_ops *ops,
533 const char *name, void *dev)
535 struct irq_region *region;
538 index = find_free_region();
540 printk(KERN_ERR "Maximum number of irq regions exceeded. Increase NR_IRQ_REGS!\n");
544 if ((IRQ_REGION(count-1)))
547 if (count < IRQ_PER_REGION) {
548 DBG_IRQ(0, ("alloc_irq_region() using minimum of %d irq lines for %s (%d)\n",
549 IRQ_PER_REGION, name, count));
550 count = IRQ_PER_REGION;
553 /* if either mask *or* unmask is set, both have to be set. */
554 if((ops->mask_irq || ops->unmask_irq) &&
555 !(ops->mask_irq && ops->unmask_irq))
558 /* ditto for enable/disable */
559 if( (ops->disable_irq || ops->enable_irq) &&
560 !(ops->disable_irq && ops->enable_irq) )
563 region = kmalloc(sizeof(*region), GFP_ATOMIC);
566 memset(region, 0, sizeof(*region));
568 region->action = kmalloc(count * sizeof(*region->action), GFP_ATOMIC);
569 if (!region->action) {
573 memset(region->action, 0, count * sizeof(*region->action));
576 region->data.irqbase = IRQ_FROM_REGION(index);
577 region->data.name = name;
578 region->data.dev = dev;
580 irq_region[index] = region;
582 return irq_region[index];
585 /* FIXME: SMP, flags, bottom halves, rest */
587 int request_irq(unsigned int irq,
588 irqreturn_t (*handler)(int, void *, struct pt_regs *),
589 unsigned long irqflags,
590 const char * devname,
593 struct irqaction * action;
596 printk(KERN_INFO "request_irq(%d, %p, 0x%lx, %s, %p)\n",irq, handler, irqflags, devname, dev_id);
599 irq = irq_canonicalize(irq);
600 /* request_irq()/free_irq() may not be called from interrupt context. */
605 printk(KERN_ERR "request_irq(%d,...): Augh! No handler for irq!\n",
610 if (irq_region[IRQ_REGION(irq)] == NULL) {
612 ** Bug catcher for drivers which use "char" or u8 for
613 ** the IRQ number. They lose the region number which
614 ** is in pcidev->irq (an int).
616 printk(KERN_ERR "%p (%s?) called request_irq with an invalid irq %d\n",
617 __builtin_return_address(0), devname, irq);
621 spin_lock(&irq_lock);
622 action = &(irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)]);
624 /* First one is preallocated. */
625 if (action->handler) {
626 /* But it's in use...find the tail and allocate a new one */
628 action = action->next;
630 action->next = kmalloc(sizeof(*action), GFP_ATOMIC);
631 memset(action->next, 0, sizeof(*action));
633 action = action->next;
637 spin_unlock(&irq_lock);
638 printk(KERN_ERR "request_irq(): Augh! No action!\n") ;
642 action->handler = handler;
643 action->flags = irqflags;
644 cpus_clear(action->mask);
645 action->name = devname;
647 action->dev_id = dev_id;
648 spin_unlock(&irq_lock);
654 EXPORT_SYMBOL(request_irq);
656 void free_irq(unsigned int irq, void *dev_id)
658 struct irqaction *action, **p;
660 /* See comments in request_irq() about interrupt context */
661 irq = irq_canonicalize(irq);
663 if (in_interrupt()) BUG();
665 spin_lock(&irq_lock);
666 action = &irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)];
668 if (action->dev_id == dev_id) {
669 if (action->next == NULL) {
670 action->handler = NULL;
672 memcpy(action, action->next, sizeof(*action));
675 spin_unlock(&irq_lock);
680 action = action->next;
682 for (; (action = *p) != NULL; p = &action->next) {
683 if (action->dev_id != dev_id)
686 /* Found it - now free it */
690 spin_unlock(&irq_lock);
694 spin_unlock(&irq_lock);
695 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
698 EXPORT_SYMBOL(free_irq);
702 void synchronize_irq(unsigned int irqnum)
706 EXPORT_SYMBOL(synchronize_irq);
711 * IRQ autodetection code..
713 * This depends on the fact that any interrupt that
714 * comes in on to an unassigned handler will get stuck
715 * with "IRQ_WAITING" cleared and the interrupt
719 static DECLARE_MUTEX(probe_sem);
722 * probe_irq_on - begin an interrupt autodetect
724 * Commence probing for an interrupt. The interrupts are scanned
725 * and a mask of potential interrupt lines is returned.
729 /* TODO: spin_lock_irq(desc->lock -> irq_lock) */
731 unsigned long probe_irq_on(void)
736 struct irq_region *region;
738 /* support for irq autoprobing is limited to EISA (irq region 0) */
739 region = irq_region[EISA_IRQ_REGION];
740 if (!EISA_bus || !region)
746 * enable any unassigned irqs
747 * (we must startup again here because if a longstanding irq
748 * happened in the previous stage, it may have masked itself)
750 for (i = EISA_MAX_IRQS-1; i > 0; i--) {
751 struct irqaction *action;
753 spin_lock_irq(&irq_lock);
754 action = region->action + i;
755 if (!action->handler) {
756 region->data.status[i] |= IRQ_AUTODETECT | IRQ_WAITING;
757 region->ops.enable_irq(region->data.dev,i);
759 spin_unlock_irq(&irq_lock);
763 * Wait for spurious interrupts to trigger
765 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
766 /* about 100ms delay */ barrier();
769 * Now filter out any obviously spurious interrupts
772 for (i = 0; i < EISA_MAX_IRQS; i++) {
775 spin_lock_irq(&irq_lock);
776 status = region->data.status[i];
778 if (status & IRQ_AUTODETECT) {
779 /* It triggered already - consider it spurious. */
780 if (!(status & IRQ_WAITING)) {
781 region->data.status[i] = status & ~IRQ_AUTODETECT;
782 region->ops.disable_irq(region->data.dev,i);
784 if (i < BITS_PER_LONG)
787 spin_unlock_irq(&irq_lock);
793 EXPORT_SYMBOL(probe_irq_on);
796 * Return the one interrupt that triggered (this can
797 * handle any interrupt source).
801 * probe_irq_off - end an interrupt autodetect
802 * @val: mask of potential interrupts (unused)
804 * Scans the unused interrupt lines and returns the line which
805 * appears to have triggered the interrupt. If no interrupt was
806 * found then zero is returned. If more than one interrupt is
807 * found then minus the first candidate is returned to indicate
810 * The interrupt probe logic state is returned to its previous
813 * BUGS: When used in a module (which arguably shouldnt happen)
814 * nothing prevents two IRQ probe callers from overlapping. The
815 * results of this are non-optimal.
818 int probe_irq_off(unsigned long val)
820 struct irq_region *region;
821 int i, irq_found, nr_irqs;
823 /* support for irq autoprobing is limited to EISA (irq region 0) */
824 region = irq_region[EISA_IRQ_REGION];
825 if (!EISA_bus || !region)
830 for (i = 0; i < EISA_MAX_IRQS; i++) {
833 spin_lock_irq(&irq_lock);
834 status = region->data.status[i];
836 if (status & IRQ_AUTODETECT) {
837 if (!(status & IRQ_WAITING)) {
842 region->ops.disable_irq(region->data.dev,i);
843 region->data.status[i] = status & ~IRQ_AUTODETECT;
845 spin_unlock_irq(&irq_lock);
850 irq_found = -irq_found;
854 EXPORT_SYMBOL(probe_irq_off);
856 unsigned int probe_irq_mask(unsigned long irqs)
860 EXPORT_SYMBOL(probe_irq_mask);
862 void __init init_IRQ(void)
864 local_irq_disable(); /* PARANOID - should already be disabled */
865 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
868 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
870 cpu_eiem = EIEM_MASK(TIMER_IRQ);
872 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
876 #ifdef CONFIG_PROC_FS
877 /* called from kernel/sysctl.c:sysctl_init() */
878 void __init init_irq_proc(void)