2 * Code to handle x86 style IRQs plus some generic interrupt stuff.
4 * Copyright (C) 1992 Linus Torvalds
5 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
6 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
7 * Copyright (C) 1999-2000 Grant Grundler
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/bitops.h>
24 #include <linux/config.h>
25 #include <linux/eisa.h>
26 #include <linux/errno.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/signal.h>
30 #include <linux/types.h>
31 #include <linux/ioport.h>
32 #include <linux/timex.h>
33 #include <linux/slab.h>
34 #include <linux/random.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/irq.h>
39 #include <linux/seq_file.h>
40 #include <linux/spinlock.h>
42 #include <asm/cache.h>
46 #undef PARISC_IRQ_CR16_COUNTS
48 extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
49 extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
52 #define DBG_IRQ(irq, x) if ((irq) != TIMER_IRQ) printk x
54 #define DBG_IRQ(irq, x) do { } while (0)
55 #endif /* DEBUG_IRQ */
57 #define EIEM_MASK(irq) (1UL<<(MAX_CPU_IRQ-IRQ_OFFSET(irq)))
59 /* Bits in EIEM correlate with cpu_irq_action[].
60 ** Numbered *Big Endian*! (ie bit 0 is MSB)
62 static volatile unsigned long cpu_eiem = 0;
64 static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */
66 static void cpu_set_eiem(void *info)
68 set_eiem((unsigned long) info);
71 static inline void disable_cpu_irq(void *unused, int irq)
73 unsigned long eirr_bit = EIEM_MASK(irq);
75 cpu_eiem &= ~eirr_bit;
76 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
79 static void enable_cpu_irq(void *unused, int irq)
81 unsigned long eirr_bit = EIEM_MASK(irq);
83 mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
85 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
88 /* mask and disable are the same at the CPU level
89 ** Difference is enable clears pending interrupts
91 #define mask_cpu_irq disable_cpu_irq
93 static inline void unmask_cpu_irq(void *unused, int irq)
95 unsigned long eirr_bit = EIEM_MASK(irq);
97 /* NOTE: sending an IPI will cause do_cpu_irq_mask() to
98 ** handle *any* unmasked pending interrupts.
99 ** ie We don't need to check for pending interrupts here.
101 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
105 * XXX cpu_irq_actions[] will become 2 dimensional for per CPU EIR support.
106 * correspond changes needed in:
107 * processor_probe() initialize additional action arrays
108 * request_irq() handle CPU IRQ region specially
109 * do_cpu_irq_mask() index into the matching irq_action array.
111 struct irqaction cpu_irq_actions[IRQ_PER_REGION] = {
112 [IRQ_OFFSET(TIMER_IRQ)] = {
113 .handler = timer_interrupt,
117 [IRQ_OFFSET(IPI_IRQ)] = {
118 .handler = ipi_interrupt,
124 struct irq_region_ops cpu_irq_ops = {
125 .disable_irq = disable_cpu_irq,
126 .enable_irq = enable_cpu_irq,
127 .mask_irq = unmask_cpu_irq,
128 .unmask_irq = unmask_cpu_irq
131 struct irq_region cpu0_irq_region = {
133 .disable_irq = disable_cpu_irq,
134 .enable_irq = enable_cpu_irq,
135 .mask_irq = unmask_cpu_irq,
136 .unmask_irq = unmask_cpu_irq
140 .name = "PARISC-CPU",
141 .irqbase = IRQ_FROM_REGION(CPU_IRQ_REGION),
143 .action = cpu_irq_actions,
146 struct irq_region *irq_region[NR_IRQ_REGS] = {
147 [ 0 ] = NULL, /* reserved for EISA, else causes data page fault (aka code 15) */
148 [ CPU_IRQ_REGION ] = &cpu0_irq_region,
153 ** Generic interfaces that device drivers can use:
154 ** mask_irq() block IRQ
155 ** unmask_irq() re-enable IRQ and trigger if IRQ is pending
156 ** disable_irq() block IRQ
157 ** enable_irq() clear pending and re-enable IRQ
160 void mask_irq(int irq)
162 struct irq_region *region;
164 DBG_IRQ(irq, ("mask_irq(%d) %d+%d eiem 0x%lx\n", irq,
165 IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
166 irq = irq_canonicalize(irq);
167 region = irq_region[IRQ_REGION(irq)];
168 if (region->ops.mask_irq)
169 region->ops.mask_irq(region->data.dev, IRQ_OFFSET(irq));
172 void unmask_irq(int irq)
174 struct irq_region *region;
176 DBG_IRQ(irq, ("unmask_irq(%d) %d+%d eiem 0x%lx\n", irq,
177 IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
178 irq = irq_canonicalize(irq);
179 region = irq_region[IRQ_REGION(irq)];
180 if (region->ops.unmask_irq)
181 region->ops.unmask_irq(region->data.dev, IRQ_OFFSET(irq));
184 void disable_irq(int irq)
186 struct irq_region *region;
188 DBG_IRQ(irq, ("disable_irq(%d) %d+%d eiem 0x%lx\n", irq,
189 IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
190 irq = irq_canonicalize(irq);
191 region = irq_region[IRQ_REGION(irq)];
192 if (region->ops.disable_irq)
193 region->ops.disable_irq(region->data.dev, IRQ_OFFSET(irq));
197 EXPORT_SYMBOL(disable_irq);
199 void enable_irq(int irq)
201 struct irq_region *region;
203 DBG_IRQ(irq, ("enable_irq(%d) %d+%d eiem 0x%lx\n", irq,
204 IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
205 irq = irq_canonicalize(irq);
206 region = irq_region[IRQ_REGION(irq)];
208 if (region->ops.enable_irq)
209 region->ops.enable_irq(region->data.dev, IRQ_OFFSET(irq));
213 EXPORT_SYMBOL(enable_irq);
215 int show_interrupts(struct seq_file *p, void *v)
217 #ifdef CONFIG_PROC_FS
218 unsigned int regnr = *(loff_t *) v, i = 0;
223 for (i = 0; i < NR_CPUS; i++)
225 seq_printf(p, " CPU%02d ", i);
227 #ifdef PARISC_IRQ_CR16_COUNTS
228 seq_printf(p, "[min/avg/max] (CPU cycle counts)");
233 /* We don't need *irqsave lock variants since this is
234 ** only allowed to change while in the base context.
236 spin_lock(&irq_lock);
237 if (regnr < NR_IRQ_REGS) {
238 struct irq_region *region = irq_region[regnr];
240 if (!region || !region->action)
243 for (i = 0; i <= MAX_CPU_IRQ; i++) {
244 struct irqaction *action = ®ion->action[i];
245 unsigned int irq_no = IRQ_FROM_REGION(regnr) + i;
247 if (!action->handler)
250 seq_printf(p, "%3d: ", irq_no);
252 for (; j < NR_CPUS; j++)
254 seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq_no]);
256 seq_printf(p, " %14s",
257 region->data.name ? region->data.name : "N/A");
258 #ifndef PARISC_IRQ_CR16_COUNTS
259 seq_printf(p, " %s", action->name);
261 while ((action = action->next))
262 seq_printf(p, ", %s", action->name);
264 for ( ;action; action = action->next) {
265 unsigned int k, avg, min, max;
267 min = max = action->cr16_hist[0];
269 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
270 int hist = action->cr16_hist[k];
277 if (hist > max) max = hist;
278 if (hist < min) min = hist;
282 seq_printf(p, " %s[%d/%d/%d]", action->name,
291 spin_unlock(&irq_lock);
293 #endif /* CONFIG_PROC_FS */
300 ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
301 ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
303 ** To use txn_XXX() interfaces, get a Virtual IRQ first.
304 ** Then use that to get the Transaction address and data.
312 /* never return irq 0 cause that's the interval timer */
313 for (irq = 1; irq <= MAX_CPU_IRQ; irq++) {
314 if (cpu_irq_actions[irq].handler == NULL) {
315 return (IRQ_FROM_REGION(CPU_IRQ_REGION) + irq);
319 /* unlikely, but be prepared */
324 txn_claim_irq(int irq)
326 if (irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)].handler ==NULL)
329 /* unlikely, but be prepared */
334 txn_alloc_addr(int virt_irq)
336 static int next_cpu = -1;
338 next_cpu++; /* assign to "next" CPU we want this bugger on */
341 while ((next_cpu < NR_CPUS) && !cpu_data[next_cpu].txn_addr)
344 if (next_cpu >= NR_CPUS)
345 next_cpu = 0; /* nothing else, assign monarch */
347 return cpu_data[next_cpu].txn_addr;
352 ** The alloc process needs to accept a parameter to accommodate limitations
353 ** of the HW/SW which use these bits:
354 ** Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
355 ** V-class (EPIC): 6 bits
356 ** N/L-class/A500: 8 bits (iosapic)
357 ** PCI 2.2 MSI: 16 bits (I think)
358 ** Existing PCI devices: 32-bits (all Symbios SCSI/ATM/HyperFabric)
360 ** On the service provider side:
361 ** o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
362 ** o PA 2.0 wide mode 6-bits (per processor)
363 ** o IA64 8-bits (0-256 total)
365 ** So a Legacy PA I/O device on a PA 2.0 box can't use all
366 ** the bits supported by the processor...and the N/L-class
367 ** I/O subsystem supports more bits than PA2.0 has. The first
368 ** case is the problem.
371 txn_alloc_data(int virt_irq, unsigned int bits_wide)
373 /* XXX FIXME : bits_wide indicates how wide the transaction
374 ** data is allowed to be...we may need a different virt_irq
375 ** if this one won't work. Another reason to index virtual
376 ** irq's into a table which can manage CPU/IRQ bit separately.
378 if (IRQ_OFFSET(virt_irq) > (1 << (bits_wide -1)))
380 panic("Sorry -- didn't allocate valid IRQ for this device\n");
383 return (IRQ_OFFSET(virt_irq));
386 void do_irq(struct irqaction *action, int irq, struct pt_regs * regs)
388 int cpu = smp_processor_id();
391 ++kstat_cpu(cpu).irqs[irq];
393 DBG_IRQ(irq, ("do_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)));
395 for (; action; action = action->next) {
396 #ifdef PARISC_IRQ_CR16_COUNTS
397 unsigned long cr_start = mfctl(16);
400 if (action->handler == NULL) {
401 if (IRQ_REGION(irq) == EISA_IRQ_REGION && irq_region[EISA_IRQ_REGION]) {
402 /* were we called due to autodetecting (E)ISA irqs ? */
403 unsigned int *status;
404 status = &irq_region[EISA_IRQ_REGION]->data.status[IRQ_OFFSET(irq)];
405 if (*status & IRQ_AUTODETECT) {
406 *status &= ~IRQ_WAITING;
410 printk(KERN_ERR "IRQ: CPU:%d No handler for IRQ %d !\n", cpu, irq);
414 action->handler(irq, action->dev_id, regs);
416 #ifdef PARISC_IRQ_CR16_COUNTS
418 unsigned long cr_end = mfctl(16);
419 unsigned long tmp = cr_end - cr_start;
420 /* check for roll over */
421 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
423 action->cr16_hist[action->cr16_idx++] = (int) cr_start;
424 action->cr16_idx &= PARISC_CR16_HIST_SIZE - 1;
432 /* ONLY called from entry.S:intr_extint() */
433 void do_cpu_irq_mask(struct pt_regs *regs)
435 unsigned long eirr_val;
436 unsigned int i=3; /* limit time in interrupt context */
439 * PSW_I or EIEM bits cannot be enabled until after the
440 * interrupts are processed.
441 * timer_interrupt() assumes it won't get interrupted when it
442 * holds the xtime_lock...an unmasked interrupt source could
443 * interrupt and deadlock by trying to grab xtime_lock too.
444 * Keeping PSW_I and EIEM disabled avoids this.
446 set_eiem(0UL); /* disable all extr interrupt for now */
448 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
449 * 2) We loop here on EIRR contents in order to avoid
450 * nested interrupts or having to take another interrupt
451 * when we could have just handled it right away.
452 * 3) Limit the number of times we loop to make sure other
453 * processing can occur.
455 while ((eirr_val = (mfctl(23) & cpu_eiem)) && --i) {
456 unsigned long bit = (1UL<<MAX_CPU_IRQ);
459 mtctl(eirr_val, 23); /* reset bits we are going to process */
462 if (eirr_val != (1UL << MAX_CPU_IRQ))
463 printk(KERN_DEBUG "do_cpu_irq_mask %x\n", eirr_val);
466 /* Work our way from MSb to LSb...same order we alloc EIRs */
467 for (irq = 0; eirr_val && bit; bit>>=1, irq++)
469 if (!(bit & eirr_val & cpu_eiem))
472 /* clear bit in mask - can exit loop sooner */
475 do_irq(&cpu_irq_actions[irq], TIMER_IRQ+irq, regs);
482 /* Called from second level IRQ regions: eg dino or iosapic. */
483 void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs)
489 if (mask != (1L<<MAX_CPU_IRQ))
490 printk(KERN_DEBUG "do_irq_mask %08lx %p %p\n", mask, region, regs);
493 for (bit = (1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) {
494 unsigned int irq_num;
498 mask &= ~bit; /* clear bit in mask - can exit loop sooner */
499 irq_num = region->data.irqbase + irq;
502 do_irq(®ion->action[irq], irq_num, regs);
508 static inline int find_free_region(void)
512 for (irqreg=1; irqreg <= (NR_IRQ_REGS); irqreg++) {
513 if (irq_region[irqreg] == NULL)
522 * alloc_irq_region - allocate/init a new IRQ region
523 * @count: number of IRQs in this region.
524 * @ops: function table with request/release/mask/unmask/etc.. entries.
525 * @name: name of region owner for /proc/interrupts output.
526 * @dev: private data to associate with the new IRQ region.
528 * Every IRQ must become a MMIO write to the CPU's EIRR in
529 * order to get CPU service. The IRQ region represents the
530 * number of unique events the region handler can (or must)
531 * identify. For PARISC CPU, that's the width of the EIR Register.
532 * IRQ regions virtualize IRQs (eg EISA or PCI host bus controllers)
533 * for line based devices.
535 struct irq_region *alloc_irq_region( int count, struct irq_region_ops *ops,
536 const char *name, void *dev)
538 struct irq_region *region;
541 index = find_free_region();
543 printk(KERN_ERR "Maximum number of irq regions exceeded. Increase NR_IRQ_REGS!\n");
547 if ((IRQ_REGION(count-1)))
550 if (count < IRQ_PER_REGION) {
551 DBG_IRQ(0, ("alloc_irq_region() using minimum of %d irq lines for %s (%d)\n",
552 IRQ_PER_REGION, name, count));
553 count = IRQ_PER_REGION;
556 /* if either mask *or* unmask is set, both have to be set. */
557 if((ops->mask_irq || ops->unmask_irq) &&
558 !(ops->mask_irq && ops->unmask_irq))
561 /* ditto for enable/disable */
562 if( (ops->disable_irq || ops->enable_irq) &&
563 !(ops->disable_irq && ops->enable_irq) )
566 region = kmalloc(sizeof(*region), GFP_ATOMIC);
569 memset(region, 0, sizeof(*region));
571 region->action = kmalloc(count * sizeof(*region->action), GFP_ATOMIC);
572 if (!region->action) {
576 memset(region->action, 0, count * sizeof(*region->action));
579 region->data.irqbase = IRQ_FROM_REGION(index);
580 region->data.name = name;
581 region->data.dev = dev;
583 irq_region[index] = region;
585 return irq_region[index];
588 /* FIXME: SMP, flags, bottom halves, rest */
590 int request_irq(unsigned int irq,
591 irqreturn_t (*handler)(int, void *, struct pt_regs *),
592 unsigned long irqflags,
593 const char * devname,
596 struct irqaction * action;
599 printk(KERN_INFO "request_irq(%d, %p, 0x%lx, %s, %p)\n",irq, handler, irqflags, devname, dev_id);
602 irq = irq_canonicalize(irq);
603 /* request_irq()/free_irq() may not be called from interrupt context. */
608 printk(KERN_ERR "request_irq(%d,...): Augh! No handler for irq!\n",
613 if (irq_region[IRQ_REGION(irq)] == NULL) {
615 ** Bug catcher for drivers which use "char" or u8 for
616 ** the IRQ number. They lose the region number which
617 ** is in pcidev->irq (an int).
619 printk(KERN_ERR "%p (%s?) called request_irq with an invalid irq %d\n",
620 __builtin_return_address(0), devname, irq);
624 spin_lock(&irq_lock);
625 action = &(irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)]);
627 /* First one is preallocated. */
628 if (action->handler) {
629 /* But it's in use...find the tail and allocate a new one */
631 action = action->next;
633 action->next = kmalloc(sizeof(*action), GFP_ATOMIC);
634 memset(action->next, 0, sizeof(*action));
636 action = action->next;
640 spin_unlock(&irq_lock);
641 printk(KERN_ERR "request_irq(): Augh! No action!\n") ;
645 action->handler = handler;
646 action->flags = irqflags;
647 cpus_clear(action->mask);
648 action->name = devname;
650 action->dev_id = dev_id;
651 spin_unlock(&irq_lock);
657 EXPORT_SYMBOL(request_irq);
659 void free_irq(unsigned int irq, void *dev_id)
661 struct irqaction *action, **p;
663 /* See comments in request_irq() about interrupt context */
664 irq = irq_canonicalize(irq);
666 if (in_interrupt()) BUG();
668 spin_lock(&irq_lock);
669 action = &irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)];
671 if (action->dev_id == dev_id) {
672 if (action->next == NULL) {
673 action->handler = NULL;
675 memcpy(action, action->next, sizeof(*action));
678 spin_unlock(&irq_lock);
683 action = action->next;
685 for (; (action = *p) != NULL; p = &action->next) {
686 if (action->dev_id != dev_id)
689 /* Found it - now free it */
693 spin_unlock(&irq_lock);
697 spin_unlock(&irq_lock);
698 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
701 EXPORT_SYMBOL(free_irq);
705 void synchronize_irq(unsigned int irqnum)
709 EXPORT_SYMBOL(synchronize_irq);
714 * IRQ autodetection code..
716 * This depends on the fact that any interrupt that
717 * comes in on to an unassigned handler will get stuck
718 * with "IRQ_WAITING" cleared and the interrupt
722 static DECLARE_MUTEX(probe_sem);
725 * probe_irq_on - begin an interrupt autodetect
727 * Commence probing for an interrupt. The interrupts are scanned
728 * and a mask of potential interrupt lines is returned.
732 /* TODO: spin_lock_irq(desc->lock -> irq_lock) */
734 unsigned long probe_irq_on(void)
739 struct irq_region *region;
741 /* support for irq autoprobing is limited to EISA (irq region 0) */
742 region = irq_region[EISA_IRQ_REGION];
743 if (!EISA_bus || !region)
749 * enable any unassigned irqs
750 * (we must startup again here because if a longstanding irq
751 * happened in the previous stage, it may have masked itself)
753 for (i = EISA_MAX_IRQS-1; i > 0; i--) {
754 struct irqaction *action;
756 spin_lock_irq(&irq_lock);
757 action = region->action + i;
758 if (!action->handler) {
759 region->data.status[i] |= IRQ_AUTODETECT | IRQ_WAITING;
760 region->ops.enable_irq(region->data.dev,i);
762 spin_unlock_irq(&irq_lock);
766 * Wait for spurious interrupts to trigger
768 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
769 /* about 100ms delay */ barrier();
772 * Now filter out any obviously spurious interrupts
775 for (i = 0; i < EISA_MAX_IRQS; i++) {
778 spin_lock_irq(&irq_lock);
779 status = region->data.status[i];
781 if (status & IRQ_AUTODETECT) {
782 /* It triggered already - consider it spurious. */
783 if (!(status & IRQ_WAITING)) {
784 region->data.status[i] = status & ~IRQ_AUTODETECT;
785 region->ops.disable_irq(region->data.dev,i);
787 if (i < BITS_PER_LONG)
790 spin_unlock_irq(&irq_lock);
796 EXPORT_SYMBOL(probe_irq_on);
799 * Return the one interrupt that triggered (this can
800 * handle any interrupt source).
804 * probe_irq_off - end an interrupt autodetect
805 * @val: mask of potential interrupts (unused)
807 * Scans the unused interrupt lines and returns the line which
808 * appears to have triggered the interrupt. If no interrupt was
809 * found then zero is returned. If more than one interrupt is
810 * found then minus the first candidate is returned to indicate
813 * The interrupt probe logic state is returned to its previous
816 * BUGS: When used in a module (which arguably shouldnt happen)
817 * nothing prevents two IRQ probe callers from overlapping. The
818 * results of this are non-optimal.
821 int probe_irq_off(unsigned long val)
823 struct irq_region *region;
824 int i, irq_found, nr_irqs;
826 /* support for irq autoprobing is limited to EISA (irq region 0) */
827 region = irq_region[EISA_IRQ_REGION];
828 if (!EISA_bus || !region)
833 for (i = 0; i < EISA_MAX_IRQS; i++) {
836 spin_lock_irq(&irq_lock);
837 status = region->data.status[i];
839 if (status & IRQ_AUTODETECT) {
840 if (!(status & IRQ_WAITING)) {
845 region->ops.disable_irq(region->data.dev,i);
846 region->data.status[i] = status & ~IRQ_AUTODETECT;
848 spin_unlock_irq(&irq_lock);
853 irq_found = -irq_found;
857 EXPORT_SYMBOL(probe_irq_off);
859 unsigned int probe_irq_mask(unsigned long irqs)
863 EXPORT_SYMBOL(probe_irq_mask);
865 void __init init_IRQ(void)
867 local_irq_disable(); /* PARANOID - should already be disabled */
868 mtctl(-1L, 23); /* EIRR : clear all pending external intr */
871 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
873 cpu_eiem = EIEM_MASK(TIMER_IRQ);
875 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
879 #ifdef CONFIG_PROC_FS
880 /* called from kernel/sysctl.c:sysctl_init() */
881 void __init init_irq_proc(void)