X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Farm%2Fkernel%2Fentry-armv.S;h=e14278d59882367dc579f1dabeb5f2d191772ee2;hb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;hp=5639f1b618f9a80d7a8a9dccaff6c8ea2e1068fb;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 5639f1b61..e14278d59 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -14,1282 +14,383 @@ * it to save wrong values... Be aware! */ #include -#include -#include #include -#include #include +#include /* should be moved into entry-macro.S */ +#include /* should be moved into entry-macro.S */ +#include #include "entry-header.S" -#ifdef IOC_BASE -/* IOC / IOMD based hardware */ -#include - - .equ ioc_base_high, IOC_BASE & 0xff000000 - .equ ioc_base_low, IOC_BASE & 0x00ff0000 - .macro disable_fiq - mov r12, #ioc_base_high - .if ioc_base_low - orr r12, r12, #ioc_base_low - .endif - strb r12, [r12, #0x38] @ Disable FIQ register - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov r4, #ioc_base_high @ point at IOC - .if ioc_base_low - orr r4, r4, #ioc_base_low - .endif - ldrb \irqstat, [r4, #IOMD_IRQREQB] @ get high priority first - ldr \base, =irq_prio_h - teq \irqstat, #0 -#ifdef IOMD_BASE - ldreqb \irqstat, [r4, #IOMD_DMAREQ] @ get dma - addeq \base, \base, #256 @ irq_prio_h table size - teqeq \irqstat, #0 - bne 2406f -#endif - ldreqb \irqstat, [r4, #IOMD_IRQREQA] @ get low priority - addeq \base, \base, #256 @ irq_prio_d table size - teqeq \irqstat, #0 -#ifdef IOMD_IRQREQC - ldreqb \irqstat, [r4, #IOMD_IRQREQC] - addeq \base, \base, #256 @ irq_prio_l table size - teqeq \irqstat, #0 -#endif -#ifdef IOMD_IRQREQD - ldreqb \irqstat, [r4, #IOMD_IRQREQD] - addeq \base, \base, #256 @ irq_prio_lc table size - teqeq \irqstat, #0 -#endif -2406: ldrneb \irqnr, [\base, \irqstat] @ get IRQ number - .endm - -/* - * Interrupt table (incorporates priority). Please note that we - * rely on the order of these tables (see above code). - */ - .macro irq_prio_table -irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 - .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 -#ifdef IOMD_BASE -irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 - .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16 -#endif -irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 - .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 - .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 - .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 - .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 - .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 - .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 - .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 -#ifdef IOMD_IRQREQC -irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27 - .byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27 - .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 - .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 - .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27 - .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27 - .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 - .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 - .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 -#endif -#ifdef IOMD_IRQREQD -irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43 - .byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43 - .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 - .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 - .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43 - .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43 - .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 - .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 - .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47 -#endif - .endm - -#elif defined(CONFIG_ARCH_EBSA110) - -#define IRQ_STAT 0xff000000 /* read */ - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, stat, base, tmp - mov \base, #IRQ_STAT - ldrb \stat, [\base] @ get interrupts - mov \irqnr, #0 - tst \stat, #15 - addeq \irqnr, \irqnr, #4 - moveq \stat, \stat, lsr #4 - tst \stat, #3 - addeq \irqnr, \irqnr, #2 - moveq \stat, \stat, lsr #2 - tst \stat, #1 - addeq \irqnr, \irqnr, #1 - moveq \stat, \stat, lsr #1 - tst \stat, #1 @ bit 0 should be set - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_SHARK) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov r4, #0xe0000000 - - mov \irqstat, #0x0C - strb \irqstat, [r4, #0x20] @outb(0x0C, 0x20) /* Poll command */ - ldrb \irqnr, [r4, #0x20] @irq = inb(0x20) & 7 - and \irqstat, \irqnr, #0x80 - teq \irqstat, #0 - beq 43f - and \irqnr, \irqnr, #7 - teq \irqnr, #2 - bne 44f -43: mov \irqstat, #0x0C - strb \irqstat, [r4, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ - ldrb \irqnr, [r4, #0xa0] @irq = (inb(0xA0) & 7) + 8 - and \irqstat, \irqnr, #0x80 - teq \irqstat, #0 - beq 44f - and \irqnr, \irqnr, #7 - add \irqnr, \irqnr, #8 -44: teq \irqstat, #0 - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_FOOTBRIDGE) -#include - - .macro disable_fiq - .endm - - .equ dc21285_high, ARMCSR_BASE & 0xff000000 - .equ dc21285_low, ARMCSR_BASE & 0x00ffffff - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov r4, #dc21285_high - .if dc21285_low - orr r4, r4, #dc21285_low - .endif - ldr \irqstat, [r4, #0x180] @ get interrupts - - mov \irqnr, #IRQ_SDRAMPARITY - tst \irqstat, #IRQ_MASK_SDRAMPARITY - bne 1001f - - tst \irqstat, #IRQ_MASK_UART_RX - movne \irqnr, #IRQ_CONRX - bne 1001f - - tst \irqstat, #IRQ_MASK_DMA1 - movne \irqnr, #IRQ_DMA1 - bne 1001f - - tst \irqstat, #IRQ_MASK_DMA2 - movne \irqnr, #IRQ_DMA2 - bne 1001f - - tst \irqstat, #IRQ_MASK_IN0 - movne \irqnr, #IRQ_IN0 - bne 1001f - - tst \irqstat, #IRQ_MASK_IN1 - movne \irqnr, #IRQ_IN1 - bne 1001f - - tst \irqstat, #IRQ_MASK_IN2 - movne \irqnr, #IRQ_IN2 - bne 1001f - - tst \irqstat, #IRQ_MASK_IN3 - movne \irqnr, #IRQ_IN3 - bne 1001f - - tst \irqstat, #IRQ_MASK_PCI - movne \irqnr, #IRQ_PCI - bne 1001f - - tst \irqstat, #IRQ_MASK_DOORBELLHOST - movne \irqnr, #IRQ_DOORBELLHOST - bne 1001f - - tst \irqstat, #IRQ_MASK_I2OINPOST - movne \irqnr, #IRQ_I2OINPOST - bne 1001f - - tst \irqstat, #IRQ_MASK_TIMER1 - movne \irqnr, #IRQ_TIMER1 - bne 1001f - - tst \irqstat, #IRQ_MASK_TIMER2 - movne \irqnr, #IRQ_TIMER2 - bne 1001f - - tst \irqstat, #IRQ_MASK_TIMER3 - movne \irqnr, #IRQ_TIMER3 - bne 1001f - - tst \irqstat, #IRQ_MASK_UART_TX - movne \irqnr, #IRQ_CONTX - bne 1001f - - tst \irqstat, #IRQ_MASK_PCI_ABORT - movne \irqnr, #IRQ_PCI_ABORT - bne 1001f - - tst \irqstat, #IRQ_MASK_PCI_SERR - movne \irqnr, #IRQ_PCI_SERR - bne 1001f - - tst \irqstat, #IRQ_MASK_DISCARD_TIMER - movne \irqnr, #IRQ_DISCARD_TIMER - bne 1001f - - tst \irqstat, #IRQ_MASK_PCI_DPERR - movne \irqnr, #IRQ_PCI_DPERR - bne 1001f - - tst \irqstat, #IRQ_MASK_PCI_PERR - movne \irqnr, #IRQ_PCI_PERR -1001: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_NEXUSPCI) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \irqstat, =INTCONT_BASE - ldr \base, =soft_irq_mask - ldr \irqstat, [\irqstat] @ get interrupts - ldr \base, [\base] - mov \irqnr, #0 - and \irqstat, \irqstat, \base @ mask out disabled ones -1001: tst \irqstat, #1 - addeq \irqnr, \irqnr, #1 - moveq \irqstat, \irqstat, lsr #1 - tsteq \irqnr, #32 - beq 1001b - teq \irqnr, #32 - .endm - - .macro irq_prio_table - .ltorg - .bss -ENTRY(soft_irq_mask) - .word 0 - .text - .endm - -#elif defined(CONFIG_ARCH_TBOX) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \irqstat, =0xffff7000 - ldr \irqstat, [\irqstat] @ get interrupts - ldr \base, =soft_irq_mask - ldr \base, [\base] - mov \irqnr, #0 - and \irqstat, \irqstat, \base @ mask out disabled ones -1001: tst \irqstat, #1 - addeq \irqnr, \irqnr, #1 - moveq \irqstat, \irqstat, lsr #1 - tsteq \irqnr, #32 - beq 1001b - teq \irqnr, #32 - .endm - - .macro irq_prio_table - .ltorg - .bss -ENTRY(soft_irq_mask) - .word 0 - .text - .endm - -#elif defined(CONFIG_ARCH_SA1100) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov r4, #0xfa000000 @ ICIP = 0xfa050000 - add r4, r4, #0x00050000 - ldr \irqstat, [r4] @ get irqs - ldr \irqnr, [r4, #4] @ ICMR = 0xfa050004 - ands \irqstat, \irqstat, \irqnr - mov \irqnr, #0 - beq 1001f - tst \irqstat, #0xff - moveq \irqstat, \irqstat, lsr #8 - addeq \irqnr, \irqnr, #8 - tsteq \irqstat, #0xff - moveq \irqstat, \irqstat, lsr #8 - addeq \irqnr, \irqnr, #8 - tsteq \irqstat, #0xff - moveq \irqstat, \irqstat, lsr #8 - addeq \irqnr, \irqnr, #8 - tst \irqstat, #0x0f - moveq \irqstat, \irqstat, lsr #4 - addeq \irqnr, \irqnr, #4 - tst \irqstat, #0x03 - moveq \irqstat, \irqstat, lsr #2 - addeq \irqnr, \irqnr, #2 - tst \irqstat, #0x01 - addeqs \irqnr, \irqnr, #1 -1001: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_L7200) -#include - - .equ irq_base_addr, IO_BASE_2 - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov \irqstat, #irq_base_addr @ Virt addr IRQ regs - add \irqstat, \irqstat, #0x00001000 @ Status reg - ldr \irqstat, [\irqstat, #0] @ get interrupts - mov \irqnr, #0 -1001: tst \irqstat, #1 - addeq \irqnr, \irqnr, #1 - moveq \irqstat, \irqstat, lsr #1 - tsteq \irqnr, #32 - beq 1001b - teq \irqnr, #32 - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_INTEGRATOR) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp -/* FIXME: should not be using soo many LDRs here */ - ldr \base, =IO_ADDRESS(INTEGRATOR_IC_BASE) - mov \irqnr, #IRQ_PIC_START - ldr \irqstat, [\base, #IRQ_STATUS] @ get masked status - ldr \base, =IO_ADDRESS(INTEGRATOR_HDR_BASE) - teq \irqstat, #0 - ldreq \irqstat, [\base, #(INTEGRATOR_HDR_IC_OFFSET+IRQ_STATUS)] - moveq \irqnr, #IRQ_CIC_START - -1001: tst \irqstat, #15 - bne 1002f - add \irqnr, \irqnr, #4 - movs \irqstat, \irqstat, lsr #4 - bne 1001b -1002: tst \irqstat, #1 - bne 1003f - add \irqnr, \irqnr, #1 - movs \irqstat, \irqstat, lsr #1 - bne 1002b -1003: /* EQ will be set if no irqs pending */ - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_VERSATILE_PB) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \base, =IO_ADDRESS(VERSATILE_VIC_BASE) - ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status - mov \irqnr, #0 - teq \irqstat, #0 - beq 1003f - -1001: tst \irqstat, #15 - bne 1002f - add \irqnr, \irqnr, #4 - movs \irqstat, \irqstat, lsr #4 - bne 1001b -1002: tst \irqstat, #1 - bne 1003f - add \irqnr, \irqnr, #1 - movs \irqstat, \irqstat, lsr #1 - bne 1002b -1003: /* EQ will be set if no irqs pending */ - -@ clz \irqnr, \irqstat -@1003: /* EQ will be set if we reach MAXIRQNUM */ - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_CLPS711X) - -#include - - .macro disable_fiq - .endm - -#if (INTSR2 - INTSR1) != (INTMR2 - INTMR1) -#error INTSR stride != INTMR stride -#endif - - .macro get_irqnr_and_base, irqnr, stat, base, mask - mov \base, #CLPS7111_BASE - ldr \stat, [\base, #INTSR1] - ldr \mask, [\base, #INTMR1] - mov \irqnr, #4 - mov \mask, \mask, lsl #16 - and \stat, \stat, \mask, lsr #16 - movs \stat, \stat, lsr #4 - bne 1001f - - add \base, \base, #INTSR2 - INTSR1 - ldr \stat, [\base, #INTSR1] - ldr \mask, [\base, #INTMR1] - mov \irqnr, #16 - mov \mask, \mask, lsl #16 - and \stat, \stat, \mask, lsr #16 - -1001: tst \stat, #255 - addeq \irqnr, \irqnr, #8 - moveq \stat, \stat, lsr #8 - tst \stat, #15 - addeq \irqnr, \irqnr, #4 - moveq \stat, \stat, lsr #4 - tst \stat, #3 - addeq \irqnr, \irqnr, #2 - moveq \stat, \stat, lsr #2 - tst \stat, #1 - addeq \irqnr, \irqnr, #1 - moveq \stat, \stat, lsr #1 - tst \stat, #1 @ bit 0 should be set - .endm - - .macro irq_prio_table - .endm - -#elif defined (CONFIG_ARCH_CAMELOT) -#include -#undef IRQ_MODE /* same name defined in asm/proc/ptrace.h */ -#include - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - - ldr \irqstat, =INT_ID(IO_ADDRESS(EXC_INT_CTRL00_BASE)) - ldr \irqnr,[\irqstat] - cmp \irqnr,#0 - subne \irqnr,\irqnr,#1 - - - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_IOP321) - .macro disable_fiq - .endm - - /* - * Note: only deal with normal interrupts, not FIQ - */ - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov \irqnr, #0 - mrc p6, 0, \irqstat, c8, c0, 0 @ Read IINTSRC - cmp \irqstat, #0 - beq 1001f - clz \irqnr, \irqstat - mov \base, #31 - subs \irqnr,\base,\irqnr - add \irqnr,\irqnr,#IRQ_IOP321_DMA0_EOT -1001: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_IOP331) - .macro disable_fiq - .endm - - /* - * Note: only deal with normal interrupts, not FIQ - */ - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov \irqnr, #0 - mrc p6, 0, \irqstat, c4, c0, 0 @ Read IINTSRC0 - cmp \irqstat, #0 - bne 1002f - mrc p6, 0, \irqstat, c5, c0, 0 @ Read IINTSRC1 - cmp \irqstat, #0 - beq 1001f - clz \irqnr, \irqstat -/* - * mov \base, #31 - * subs \irqnr,\base,\irqnr - */ - rsbs \irqnr,\irqnr,#31 @ recommend by RMK - add \irqnr,\irqnr,#IRQ_IOP331_XINT8 - b 1001f -1002: clz \irqnr, \irqstat - mov \base, #31 - subs \irqnr,\base,\irqnr - add \irqnr,\irqnr,#IRQ_IOP331_DMA0_EOT -1001: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_PXA) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp -#ifdef CONFIG_PXA27x - mrc p6, 0, \irqstat, c0, c0, 0 @ ICIP - mrc p6, 0, \irqnr, c1, c0, 0 @ ICMR -#else - mov \base, #io_p2v(0x40000000) @ IIR Ctl = 0x40d00000 - add \base, \base, #0x00d00000 - ldr \irqstat, [\base, #0] @ ICIP - ldr \irqnr, [\base, #4] @ ICMR -#endif - ands \irqnr, \irqstat, \irqnr - beq 1001f - rsb \irqstat, \irqnr, #0 - and \irqstat, \irqstat, \irqnr - clz \irqnr, \irqstat - rsb \irqnr, \irqnr, #(31 - PXA_IRQ_SKIP) -1001: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_IXP2000) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - - mov \irqnr, #0x0 @clear out irqnr as default - mov \base, #0xfe000000 - orr \base, \base, #0x00ff0000 - orr \base, \base, #0x0000a000 - orr \base, \base, #0x08 - ldr \irqstat, [\base] @ get interrupts - mov \tmp, #IXP2000_VALID_IRQ_MASK & 0xff000000 - orr \tmp, \tmp, #IXP2000_VALID_IRQ_MASK & 0x00ff0000 - orr \tmp, \tmp, #IXP2000_VALID_IRQ_MASK & 0x0000ff00 - orr \tmp, \tmp, #IXP2000_VALID_IRQ_MASK & 0x000000ff - and \irqstat, \irqstat, \tmp - - cmp \irqstat, #0 - beq 1001f - - clz \irqnr, \irqstat - mov \base, #31 - subs \irqnr, \base, \irqnr - - /* - * We handle PCIA and PCIB here so we don't have an - * extra layer of code just to check these two bits. - */ - cmp \irqnr, #IRQ_IXP2000_PCI - bne 1001f - - mov \base, #0xfe000000 - orr \base, \base, #0x00fd0000 - orr \base, \base, #0x0000e100 - orr \base, \base, #0x00000058 - ldr \irqstat, [\base] - - mov \tmp, #(1<<26) - tst \irqstat, \tmp - movne \irqnr, #IRQ_IXP2000_PCIA - bne 1001f - - mov \tmp, #(1<<27) - tst \irqstat, \tmp - movne \irqnr, #IRQ_IXP2000_PCIB - -1001: - .endm - - .macro irq_prio_table - .endm - -#elif defined (CONFIG_ARCH_IXP4XX) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \irqstat, =(IXP4XX_INTC_BASE_VIRT+IXP4XX_ICIP_OFFSET) - ldr \irqstat, [\irqstat] @ get interrupts - cmp \irqstat, #0 - beq 1002f - clz \irqnr, \irqstat - mov \base, #31 - subs \irqnr, \base, \irqnr - -/* -1001: tst \irqstat, #1 - addeq \irqnr, \irqnr, #1 - moveq \irqstat, \irqstat, lsr #1 - tsteq \irqnr, #32 - beq 1001b - teq \irqnr, #32 -*/ -1002: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_OMAP) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \base, =IO_ADDRESS(OMAP_IH1_BASE) - ldr \irqnr, [\base, #IRQ_ITR_REG_OFFSET] - ldr \tmp, [\base, #IRQ_MIR_REG_OFFSET] - mov \irqstat, #0xffffffff - bic \tmp, \irqstat, \tmp - tst \irqnr, \tmp - beq 1510f - - ldr \irqnr, [\base, #IRQ_SIR_FIQ_REG_OFFSET] - cmp \irqnr, #0 - ldreq \irqnr, [\base, #IRQ_SIR_IRQ_REG_OFFSET] - cmpeq \irqnr, #INT_IH2_IRQ - ldreq \base, =IO_ADDRESS(OMAP_IH2_BASE) - ldreq \irqnr, [\base, #IRQ_SIR_IRQ_REG_OFFSET] - addeqs \irqnr, \irqnr, #32 -1510: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_S3C2410) - /* S3C2410X IRQ Handler, */ - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - -30000: - mov \tmp, #S3C2410_VA_IRQ - ldr \irqnr, [ \tmp, #0x14 ] @ get irq no - teq \irqnr, #4 - teqne \irqnr, #5 - beq 1002f @ external irq reg - teq \irqnr, #16 - beq 1003f @ lcd controller - - @ debug check to see if interrupt reported is the same - @ as the offset.... - - teq \irqnr, #0 - beq 20002f - ldr \irqstat, [ \tmp, #0x10 ] @ INTPND - mov \irqstat, \irqstat, lsr \irqnr - tst \irqstat, #1 - bne 20002f - -#if 1 - stmfd r13!, { r0 - r4 , r14 } - ldr r1, [ \tmp, #0x14 ] @ intoffset - ldr r2, [ \tmp, #0x10 ] @ INTPND - ldr r3, [ \tmp, #0x00 ] @ SRCPND - adr r0, 20003f - bl printk - b 20004f -#endif -20003: - .ascii "<7>irq: err - bad offset %d, intpnd=%08x, srcpnd=%08x\n" - .byte 0 - .align 4 -20004: - mov r1, #1 - mov \tmp, #S3C2410_VA_IRQ - ldmfd r13!, { r0 - r4 , r14 } - - @ try working out interript number for ourselves - mov \irqnr, #0 - ldr \irqstat, [ \tmp, #0x10 ] @ INTPND -10021: - movs \irqstat, \irqstat, lsr#1 - bcs 30000b @ try and re-start the proccess - add \irqnr, \irqnr, #1 - cmp \irqnr, #32 - ble 10021b - - @ found no interrupt, set Z flag and leave - movs \irqnr, #0 - b 1001f - -20005: -20002: @ exit - @ we base the s3c2410x interrupts at 16 and above to allow - @ isa peripherals to have their standard interrupts, also - @ ensure that Z flag is un-set on exit - - @ note, we cannot be sure if we get IRQ_EINT0 (0) that - @ there is simply no interrupt pending, so in all other - @ cases we jump to say we have found something, otherwise - @ we check to see if the interrupt really is assrted - adds \irqnr, \irqnr, #IRQ_EINT0 - teq \irqnr, #IRQ_EINT0 - bne 1001f @ exit - ldr \irqstat, [ \tmp, #0x10 ] @ INTPND - teq \irqstat, #0 - moveq \irqnr, #0 - b 1001f - - @ we get here from no main or external interrupts pending -1002: - add \tmp, \tmp, #S3C2410_VA_GPIO - S3C2410_VA_IRQ - ldr \irqstat, [ \tmp, # 0xa8 ] @ EXTINTPEND - ldr \irqnr, [ \tmp, # 0xa4 ] @ EXTINTMASK - - bic \irqstat, \irqstat, \irqnr @ clear masked irqs - - mov \irqnr, #IRQ_EINT4 @ start extint nos - mov \irqstat, \irqstat, lsr#4 @ ignore bottom 4 bits -10021: - movs \irqstat, \irqstat, lsr#1 - bcs 1004f - add \irqnr, \irqnr, #1 - cmp \irqnr, #IRQ_EINT23 - ble 10021b - - @ found no interrupt, set Z flag and leave - movs \irqnr, #0 - b 1001f - -1003: - @ lcd interrupt has been asserted... - add \tmp, \tmp, #S3C2410_VA_LCD - S3C2410_VA_IRQ - ldr \irqstat, [ \tmp, # 0x54 ] @ lcd int pending - - tst \irqstat, #2 - movne \irqnr, #IRQ_LCD_FRAME - tst \irqstat, #1 - movne \irqnr, #IRQ_LCD_FIFO - - @ fall through to exit with flags updated - -1004: @ ensure Z flag clear in case our MOVS shifted out the last bit - teq \irqnr, #0 -1001: - @ exit irq routine - .endm - - - /* currently don't need an disable_fiq macro */ - - .macro disable_fiq - .endm - - /* we don't have an irq priority table */ - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_LH7A400) - -# if defined (CONFIG_ARCH_LH7A404) -# error "LH7A400 and LH7A404 are mutually exclusive" -# endif - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov \irqnr, #0 - mov \base, #io_p2v(0x80000000) @ APB registers - ldr \irqstat, [\base, #0x500] @ PIC INTSR - -1001: movs \irqstat, \irqstat, lsr #1 @ Shift into carry - bcs 1008f @ Bit set; irq found - add \irqnr, \irqnr, #1 - bne 1001b @ Until no bits - b 1009f @ Nothing? Hmm. -1008: movs \irqstat, #1 @ Force !Z -1009: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_LH7A404) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov \irqnr, #0 @ VIC1 irq base - mov \base, #io_p2v(0x80000000) @ APB registers - add \base, \base, #0x8000 - ldr \tmp, [\base, #0x0030] @ VIC1_VECTADDR - tst \tmp, #VA_VECTORED @ Direct vectored - bne 1002f - tst \tmp, #VA_VIC1DEFAULT @ Default vectored VIC1 - ldrne \irqstat, [\base, #0] @ VIC1_IRQSTATUS - bne 1001f - add \base, \base, #(0xa000 - 0x8000) - ldr \tmp, [\base, #0x0030] @ VIC2_VECTADDR - tst \tmp, #VA_VECTORED @ Direct vectored - bne 1002f - ldr \irqstat, [\base, #0] @ VIC2_IRQSTATUS - mov \irqnr, #32 @ VIC2 irq base - -1001: movs \irqstat, \irqstat, lsr #1 @ Shift into carry - bcs 1008f @ Bit set; irq found - add \irqnr, \irqnr, #1 - bne 1001b @ Until no bits - b 1009f @ Nothing? Hmm. -1002: and \irqnr, \tmp, #0x3f @ Mask for valid bits -1008: movs \irqstat, #1 @ Force !Z - str \tmp, [\base, #0x0030] @ Clear vector -1009: - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_IMX) - - .macro disable_fiq - .endm -#define AITC_NIVECSR 0x40 - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \irqstat, =IO_ADDRESS(IMX_AITC_BASE) - @ Load offset & priority of the highest priority - @ interrupt pending. - ldr \irqnr, [\irqstat, #AITC_NIVECSR] - @ Shift off the priority leaving the offset or - @ "interrupt number" - mov \irqnr, \irqnr, lsr #16 - ldr \irqstat, =1 @ dummy compare - ldr \base, =0xFFFF // invalid interrupt - cmp \irqnr, \base - bne 1001f - ldr \irqstat, =0 -1001: - tst \irqstat, #1 @ to make the condition code = TRUE - .endm - - .macro irq_prio_table - .endm - -#elif defined(CONFIG_ARCH_H720X) - - .macro disable_fiq - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp -#if defined (CONFIG_CPU_H7201) || defined (CONFIG_CPU_H7202) - @ we could use the id register on H7202, but this is not - @ properly updated when we come back from asm_do_irq - @ without a previous return from interrupt - @ (see loops below in irq_svc, irq_usr) - @ We see unmasked pending ints only, as the masked pending ints - @ are not visible here - - mov \base, #0xf0000000 @ base register - orr \base, \base, #0x24000 @ irqbase - ldr \irqstat, [\base, #0x04] @ get interrupt status -#if defined (CONFIG_CPU_H7201) - ldr \tmp, =0x001fffff -#else - mvn \tmp, #0xc0000000 -#endif - and \irqstat, \irqstat, \tmp @ mask out unused ints - mov \irqnr, #0 - - mov \tmp, #0xff00 - orr \tmp, \tmp, #0xff - tst \irqstat, \tmp - addeq \irqnr, \irqnr, #16 - moveq \irqstat, \irqstat, lsr #16 - tst \irqstat, #255 - addeq \irqnr, \irqnr, #8 - moveq \irqstat, \irqstat, lsr #8 - tst \irqstat, #15 - addeq \irqnr, \irqnr, #4 - moveq \irqstat, \irqstat, lsr #4 - tst \irqstat, #3 - addeq \irqnr, \irqnr, #2 - moveq \irqstat, \irqstat, lsr #2 - tst \irqstat, #1 - addeq \irqnr, \irqnr, #1 - moveq \irqstat, \irqstat, lsr #1 - tst \irqstat, #1 @ bit 0 should be set - .endm - - .macro irq_prio_table - .endm - -#else -#error hynix processor selection missmatch -#endif -#else -#error Unknown architecture -#endif - /* * Invalid mode handlers */ -__pabt_invalid: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - lr} @ Save XXX r0 - lr - ldr r4, .LCabt - mov r1, #BAD_PREFETCH - b 1f - -__dabt_invalid: sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - lr} @ Save SVC r0 - lr [lr *should* be intact] - ldr r4, .LCabt - mov r1, #BAD_DATA - b 1f - -__irq_invalid: sub sp, sp, #S_FRAME_SIZE @ Allocate space on stack for frame - stmfd sp, {r0 - lr} @ Save r0 - lr - ldr r4, .LCirq - mov r1, #BAD_IRQ - b 1f - -__und_invalid: sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - lr} - ldr r4, .LCund - mov r1, #BAD_UNDEFINSTR @ int reason - -1: zero_fp - ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 - add r4, sp, #S_PC - stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 - mov r0, sp - and r2, r6, #31 @ int mode - b bad_mode + .macro inv_entry, sym, reason + sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go + stmia sp, {r0 - lr} @ Save XXX r0 - lr + ldr r4, .LC\sym + mov r1, #\reason + .endm + +__pabt_invalid: + inv_entry abt, BAD_PREFETCH + b 1f + +__dabt_invalid: + inv_entry abt, BAD_DATA + b 1f + +__irq_invalid: + inv_entry irq, BAD_IRQ + b 1f + +__und_invalid: + inv_entry und, BAD_UNDEFINSTR + +1: zero_fp + ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 + add r4, sp, #S_PC + stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 + mov r0, sp + and r2, r6, #31 @ int mode + b bad_mode /* * SVC mode handlers */ - .align 5 -__dabt_svc: sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r2, .LCabt - add r0, sp, #S_FRAME_SIZE - ldmia r2, {r2 - r4} @ get pc, cpsr - add r5, sp, #S_SP - mov r1, lr - stmia r5, {r0 - r4} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro - mrs r9, cpsr @ Enable interrupts if they were - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT @ previously -/* - * This routine must not corrupt r9 - */ + .macro svc_entry, sym + sub sp, sp, #S_FRAME_SIZE + stmia sp, {r0 - r12} @ save r0 - r12 + ldr r2, .LC\sym + add r0, sp, #S_FRAME_SIZE + ldmia r2, {r2 - r4} @ get pc, cpsr + add r5, sp, #S_SP + mov r1, lr + + @ + @ We are now ready to fill in the remaining blanks on the stack: + @ + @ r0 - sp_svc + @ r1 - lr_svc + @ r2 - lr_, already fixed up for correct return/restart + @ r3 - spsr_ + @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ + stmia r5, {r0 - r4} + .endm + + .align 5 +__dabt_svc: + svc_entry abt + + @ + @ get ready to re-enable interrupts if appropriate + @ + mrs r9, cpsr + tst r3, #PSR_I_BIT + biceq r9, r9, #PSR_I_BIT + + @ + @ Call the processor-specific abort handler: + @ + @ r2 - aborted context pc + @ r3 - aborted context cpsr + @ + @ The abort handler must return the aborted address in r0, and + @ the fault status register in r1. r9 must be preserved. + @ #ifdef MULTI_ABORT - ldr r4, .LCprocfns @ pass r2, r3 to - mov lr, pc @ processor code - ldr pc, [r4] @ call processor specific code + ldr r4, .LCprocfns + mov lr, pc + ldr pc, [r4] #else - bl CPU_ABORT_HANDLER + bl CPU_ABORT_HANDLER #endif - msr cpsr_c, r9 - mov r2, sp - bl do_DataAbort - disable_irq r0 - ldr r0, [sp, #S_PSR] - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr - - .align 5 -__irq_svc: sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r7, .LCirq - add r5, sp, #S_FRAME_SIZE - ldmia r7, {r7 - r9} - add r4, sp, #S_SP - mov r6, lr - stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro + + @ + @ set desired IRQ state, then call main handler + @ + msr cpsr_c, r9 + mov r2, sp + bl do_DataAbort + + @ + @ IRQs off again before pulling preserved data off the stack + @ + disable_irq + + @ + @ restore SPSR and restart the instruction + @ + ldr r0, [sp, #S_PSR] + msr spsr_cxsf, r0 + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + + .align 5 +__irq_svc: + svc_entry irq #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + get_thread_info r8 + ldr r9, [r8, #TI_PREEMPT] @ get preempt count + add r7, r9, #1 @ increment it + str r7, [r8, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrsvc ne, lr, 1b - bne asm_do_IRQ +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adrne lr, 1b + bne asm_do_IRQ #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_FLAGS] @ get flags - tst r0, #_TIF_NEED_RESCHED - blne svc_preempt + ldr r0, [r8, #TI_FLAGS] @ get flags + tst r0, #_TIF_NEED_RESCHED + blne svc_preempt preempt_return: - ldr r0, [r8, #TI_PREEMPT] @ read preempt value - teq r0, r7 - str r9, [r8, #TI_PREEMPT] @ restore preempt count - strne r0, [r0, -r0] @ bug() + ldr r0, [r8, #TI_PREEMPT] @ read preempt value + teq r0, r7 + str r9, [r8, #TI_PREEMPT] @ restore preempt count + strne r0, [r0, -r0] @ bug() #endif - ldr r0, [sp, #S_PSR] @ irqs are already disabled - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + ldr r0, [sp, #S_PSR] @ irqs are already disabled + msr spsr_cxsf, r0 + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr - .ltorg + .ltorg #ifdef CONFIG_PREEMPT -svc_preempt: teq r9, #0 @ was preempt count = 0 - ldreq r6, .LCirq_stat - movne pc, lr @ no - ldr r0, [r6, #4] @ local_irq_count - ldr r1, [r6, #8] @ local_bh_count - adds r0, r0, r1 - movne pc, lr - mov r7, #PREEMPT_ACTIVE - str r7, [r8, #TI_PREEMPT] @ set PREEMPT_ACTIVE -1: enable_irq r2 @ enable IRQs - bl schedule - disable_irq r0 @ disable IRQs - ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS - tst r0, #_TIF_NEED_RESCHED - beq preempt_return @ go again - b 1b +svc_preempt: + teq r9, #0 @ was preempt count = 0 + ldreq r6, .LCirq_stat + movne pc, lr @ no + ldr r0, [r6, #4] @ local_irq_count + ldr r1, [r6, #8] @ local_bh_count + adds r0, r0, r1 + movne pc, lr + mov r7, #0 @ preempt_schedule_irq + str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 +1: bl preempt_schedule_irq @ irq en/disable is done inside + ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS + tst r0, #_TIF_NEED_RESCHED + beq preempt_return @ go again + b 1b #endif - .align 5 -__und_svc: sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r3, .LCund - mov r4, lr - ldmia r3, {r5 - r7} - add r3, sp, #S_FRAME_SIZE - add r2, sp, #S_SP - stmia r2, {r3 - r7} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro - - ldr r0, [r5, #-4] @ r0 = instruction - adrsvc al, r9, 1f @ r9 = normal FP return - bl call_fpe @ lr = undefined instr return - - mov r0, sp @ struct pt_regs *regs - bl do_undefinstr - -1: disable_irq r0 - ldr lr, [sp, #S_PSR] @ Get SVC cpsr - msr spsr_cxsf, lr - ldmia sp, {r0 - pc}^ @ Restore SVC registers - - .align 5 -__pabt_svc: sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r2, .LCabt - add r0, sp, #S_FRAME_SIZE - ldmia r2, {r2 - r4} @ get pc, cpsr - add r5, sp, #S_SP - mov r1, lr - stmia r5, {r0 - r4} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro - mrs r9, cpsr @ Enable interrupts if they were - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT @ previously - msr cpsr_c, r9 - mov r0, r2 @ address (pc) - mov r1, sp @ regs - bl do_PrefetchAbort @ call abort handler - disable_irq r0 - ldr r0, [sp, #S_PSR] - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr - - .align 5 -.LCirq: .word __temp_irq -.LCund: .word __temp_und -.LCabt: .word __temp_abt + .align 5 +__und_svc: + svc_entry und + + @ + @ call emulation code, which returns using r9 if it has emulated + @ the instruction, or the more conventional lr if we are to treat + @ this as a real undefined instruction + @ + @ r0 - instruction + @ + ldr r0, [r2, #-4] + adr r9, 1f + bl call_fpe + + mov r0, sp @ struct pt_regs *regs + bl do_undefinstr + + @ + @ IRQs off again before pulling preserved data off the stack + @ +1: disable_irq + + @ + @ restore SPSR and restart the instruction + @ + ldr lr, [sp, #S_PSR] @ Get SVC cpsr + msr spsr_cxsf, lr + ldmia sp, {r0 - pc}^ @ Restore SVC registers + + .align 5 +__pabt_svc: + svc_entry abt + + @ + @ re-enable interrupts if appropriate + @ + mrs r9, cpsr + tst r3, #PSR_I_BIT + biceq r9, r9, #PSR_I_BIT + msr cpsr_c, r9 + + @ + @ set args, then call main handler + @ + @ r0 - address of faulting instruction + @ r1 - pointer to registers on stack + @ + mov r0, r2 @ address (pc) + mov r1, sp @ regs + bl do_PrefetchAbort @ call abort handler + + @ + @ IRQs off again before pulling preserved data off the stack + @ + disable_irq + + @ + @ restore SPSR and restart the instruction + @ + ldr r0, [sp, #S_PSR] + msr spsr_cxsf, r0 + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + + .align 5 +.LCirq: + .word __temp_irq +.LCund: + .word __temp_und +.LCabt: + .word __temp_abt #ifdef MULTI_ABORT -.LCprocfns: .word processor +.LCprocfns: + .word processor #endif -.LCfp: .word fp_enter +.LCfp: + .word fp_enter #ifdef CONFIG_PREEMPT -.LCirq_stat: .word irq_stat +.LCirq_stat: + .word irq_stat #endif - irq_prio_table - /* * User mode handlers */ - .align 5 -__dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r7, .LCabt - add r5, sp, #S_PC - ldmia r7, {r2 - r4} @ Get USR pc, cpsr - stmia r5, {r2 - r4} @ Save USR pc, cpsr, old_r0 - stmdb r5, {sp, lr}^ - alignment_trap r7, r7, __temp_abt - zero_fp + .macro usr_entry, sym + sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go + stmia sp, {r0 - r12} @ save r0 - r12 + ldr r7, .LC\sym + add r5, sp, #S_PC + ldmia r7, {r2 - r4} @ Get USR pc, cpsr + +#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) + @ make sure our user space atomic helper is aborted + cmp r2, #VIRT_OFFSET + bichs r3, r3, #PSR_Z_BIT +#endif + + @ + @ We are now ready to fill in the remaining blanks on the stack: + @ + @ r2 - lr_, already fixed up for correct return/restart + @ r3 - spsr_ + @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ + @ Also, separately save sp_usr and lr_usr + @ + stmia r5, {r2 - r4} + stmdb r5, {sp, lr}^ + + @ + @ Enable the alignment trap while in kernel mode + @ + alignment_trap r7, r0, __temp_\sym + + @ + @ Clear FP to mark the first stack frame + @ + zero_fp + .endm + + .align 5 +__dabt_usr: + usr_entry abt + + @ + @ Call the processor-specific abort handler: + @ + @ r2 - aborted context pc + @ r3 - aborted context cpsr + @ + @ The abort handler must return the aborted address in r0, and + @ the fault status register in r1. + @ #ifdef MULTI_ABORT - ldr r4, .LCprocfns @ pass r2, r3 to - mov lr, pc @ processor code - ldr pc, [r4] @ call processor specific code + ldr r4, .LCprocfns + mov lr, pc + ldr pc, [r4] #else - bl CPU_ABORT_HANDLER + bl CPU_ABORT_HANDLER #endif - enable_irq r2 @ Enable interrupts - mov r2, sp - adrsvc al, lr, ret_from_exception - b do_DataAbort - - .align 5 -__irq_usr: sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r4, .LCirq - add r8, sp, #S_PC - ldmia r4, {r5 - r7} @ get saved PC, SPSR - stmia r8, {r5 - r7} @ save pc, psr, old_r0 - stmdb r8, {sp, lr}^ - alignment_trap r4, r7, __temp_irq - zero_fp + + @ + @ IRQs on, then call the main handler + @ + enable_irq + mov r2, sp + adr lr, ret_from_exception + b do_DataAbort + + .align 5 +__irq_usr: + usr_entry irq + #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + get_thread_info r8 + ldr r9, [r8, #TI_PREEMPT] @ get preempt count + add r7, r9, #1 @ increment it + str r7, [r8, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - adrsvc ne, lr, 1b - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - bne asm_do_IRQ +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + adrne lr, 1b + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + bne asm_do_IRQ #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_PREEMPT] - teq r0, r7 - str r9, [r8, #TI_PREEMPT] - strne r0, [r0, -r0] - mov tsk, r8 + ldr r0, [r8, #TI_PREEMPT] + teq r0, r7 + str r9, [r8, #TI_PREEMPT] + strne r0, [r0, -r0] + mov tsk, r8 #else - get_thread_info tsk + get_thread_info tsk #endif - mov why, #0 - b ret_to_user - - .ltorg - - .align 5 -__und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - r12} @ Save r0 - r12 - ldr r4, .LCund - add r8, sp, #S_PC - ldmia r4, {r5 - r7} - stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0 - stmdb r8, {sp, lr}^ @ Save user sp, lr - alignment_trap r4, r7, __temp_und - zero_fp - tst r6, #PSR_T_BIT @ Thumb mode? - bne fpundefinstr @ ignore FP - sub r4, r5, #4 -1: ldrt r0, [r4] @ r0 = instruction - adrsvc al, r9, ret_from_exception @ r9 = normal FP return - adrsvc al, lr, fpundefinstr @ lr = undefined instr return + mov why, #0 + b ret_to_user + + .ltorg + + .align 5 +__und_usr: + usr_entry und + + tst r3, #PSR_T_BIT @ Thumb mode? + bne fpundefinstr @ ignore FP + sub r4, r2, #4 + + @ + @ fall through to the emulation code, which returns using r9 if + @ it has emulated the instruction, or the more conventional lr + @ if we are to treat this as a real undefined instruction + @ + @ r0 - instruction + @ +1: ldrt r0, [r4] + adr r9, ret_from_exception + adr lr, fpundefinstr + @ + @ fallthrough to call_fpe + @ /* * The out of line fixup for the ldrt above. */ - .section .fixup, "ax" -2: mov pc, r9 - .previous - .section __ex_table,"a" - .long 1b, 2b - .previous + .section .fixup, "ax" +2: mov pc, r9 + .previous + .section __ex_table,"a" + .long 1b, 2b + .previous /* - * r0 = instruction. - * * Check whether the instruction is a co-processor instruction. * If yes, we need to call the relevant co-processor handler. * @@ -1300,98 +401,95 @@ __und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go * for the ARM6/ARM7 SWI bug. * * Emulators may wish to make use of the following registers: - * r0 - instruction opcode. - * r10 - this threads thread_info structure. + * r0 = instruction opcode. + * r2 = PC+4 + * r10 = this threads thread_info structure. */ call_fpe: - tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 + tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) - and r8, r0, #0x0f000000 @ mask out op-code bits - teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? + and r8, r0, #0x0f000000 @ mask out op-code bits + teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? #endif - moveq pc, lr - get_thread_info r10 @ get current thread - and r8, r0, #0x00000f00 @ mask out CP number - mov r7, #1 - add r6, r10, #TI_USED_CP - strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] + moveq pc, lr + get_thread_info r10 @ get current thread + and r8, r0, #0x00000f00 @ mask out CP number + mov r7, #1 + add r6, r10, #TI_USED_CP + strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] #ifdef CONFIG_IWMMXT - @ Test if we need to give access to iWMMXt coprocessors - ldr r5, [r10, #TI_FLAGS] - rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only - movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) - bcs iwmmxt_task_enable + @ Test if we need to give access to iWMMXt coprocessors + ldr r5, [r10, #TI_FLAGS] + rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only + movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) + bcs iwmmxt_task_enable #endif - enable_irq r7 - add pc, pc, r8, lsr #6 - mov r0, r0 - - mov pc, lr @ CP#0 - b do_fpe @ CP#1 (FPE) - b do_fpe @ CP#2 (FPE) - mov pc, lr @ CP#3 - mov pc, lr @ CP#4 - mov pc, lr @ CP#5 - mov pc, lr @ CP#6 - mov pc, lr @ CP#7 - mov pc, lr @ CP#8 - mov pc, lr @ CP#9 + enable_irq + add pc, pc, r8, lsr #6 + mov r0, r0 + + mov pc, lr @ CP#0 + b do_fpe @ CP#1 (FPE) + b do_fpe @ CP#2 (FPE) + mov pc, lr @ CP#3 + mov pc, lr @ CP#4 + mov pc, lr @ CP#5 + mov pc, lr @ CP#6 + mov pc, lr @ CP#7 + mov pc, lr @ CP#8 + mov pc, lr @ CP#9 #ifdef CONFIG_VFP - b do_vfp @ CP#10 (VFP) - b do_vfp @ CP#11 (VFP) + b do_vfp @ CP#10 (VFP) + b do_vfp @ CP#11 (VFP) #else - mov pc, lr @ CP#10 (VFP) - mov pc, lr @ CP#11 (VFP) + mov pc, lr @ CP#10 (VFP) + mov pc, lr @ CP#11 (VFP) #endif - mov pc, lr @ CP#12 - mov pc, lr @ CP#13 - mov pc, lr @ CP#14 (Debug) - mov pc, lr @ CP#15 (Control) + mov pc, lr @ CP#12 + mov pc, lr @ CP#13 + mov pc, lr @ CP#14 (Debug) + mov pc, lr @ CP#15 (Control) -do_fpe: ldr r4, .LCfp - add r10, r10, #TI_FPSTATE @ r10 = workspace - ldr pc, [r4] @ Call FP module USR entry point +do_fpe: + ldr r4, .LCfp + add r10, r10, #TI_FPSTATE @ r10 = workspace + ldr pc, [r4] @ Call FP module USR entry point /* * The FP module is called with these registers set: * r0 = instruction - * r5 = PC + * r2 = PC+4 * r9 = normal "successful" return address * r10 = FP workspace * lr = unrecognised FP instruction return address */ - .data + .data ENTRY(fp_enter) - .word fpundefinstr - .text - -fpundefinstr: mov r0, sp - adrsvc al, lr, ret_from_exception - b do_undefinstr - - .align 5 -__pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - r12} @ Save r0 - r12 - ldr r4, .LCabt - add r8, sp, #S_PC - ldmia r4, {r5 - r7} @ Get USR pc, cpsr - stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0 - stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr - alignment_trap r4, r7, __temp_abt - zero_fp - enable_irq r0 @ Enable interrupts - mov r0, r5 @ address (pc) - mov r1, sp @ regs - bl do_PrefetchAbort @ call abort handler - /* fall through */ + .word fpundefinstr + .text + +fpundefinstr: + mov r0, sp + adr lr, ret_from_exception + b do_undefinstr + + .align 5 +__pabt_usr: + usr_entry abt + + enable_irq @ Enable interrupts + mov r0, r2 @ address (pc) + mov r1, sp @ regs + bl do_PrefetchAbort @ call abort handler + /* fall through */ /* * This is the return code to user mode for abort handlers */ ENTRY(ret_from_exception) - get_thread_info tsk - mov why, #0 - b ret_to_user + get_thread_info tsk + mov why, #0 + b ret_to_user /* * Register switch for ARMv3 and ARMv4 processors @@ -1399,212 +497,369 @@ ENTRY(ret_from_exception) * previous and next are guaranteed not to be the same. */ ENTRY(__switch_to) - add ip, r1, #TI_CPU_SAVE - ldr r3, [r2, #TI_CPU_DOMAIN]! - stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack + add ip, r1, #TI_CPU_SAVE + ldr r3, [r2, #TI_TP_VALUE] + stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack + ldr r6, [r2, #TI_CPU_DOMAIN]! #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) - mra r4, r5, acc0 - stmia ip, {r4, r5} + mra r4, r5, acc0 + stmia ip, {r4, r5} +#endif +#if defined(CONFIG_HAS_TLS_REG) + mcr p15, 0, r3, c13, c0, 3 @ set TLS register +#elif !defined(CONFIG_TLS_REG_EMUL) + mov r4, #0xffff0fff + str r3, [r4, #-15] @ TLS val at 0xffff0ff0 #endif - mcr p15, 0, r3, c3, c0, 0 @ Set domain register + mcr p15, 0, r6, c3, c0, 0 @ Set domain register #ifdef CONFIG_VFP - @ Always disable VFP so we can lazily save/restore the old - @ state. This occurs in the context of the previous thread. - VFPFMRX r4, FPEXC - bic r4, r4, #FPEXC_ENABLE - VFPFMXR FPEXC, r4 + @ Always disable VFP so we can lazily save/restore the old + @ state. This occurs in the context of the previous thread. + VFPFMRX r4, FPEXC + bic r4, r4, #FPEXC_ENABLE + VFPFMXR FPEXC, r4 #endif #if defined(CONFIG_IWMMXT) - bl iwmmxt_task_switch + bl iwmmxt_task_switch #elif defined(CONFIG_CPU_XSCALE) - add r4, r2, #40 @ cpu_context_save->extra - ldmib r4, {r4, r5} - mar acc0, r4, r5 + add r4, r2, #40 @ cpu_context_save->extra + ldmib r4, {r4, r5} + mar acc0, r4, r5 +#endif + ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously + + __INIT + +/* + * User helpers. + * + * These are segment of kernel provided user code reachable from user space + * at a fixed address in kernel memory. This is used to provide user space + * with some operations which require kernel help because of unimplemented + * native feature and/or instructions in many ARM CPUs. The idea is for + * this code to be executed directly in user mode for best efficiency but + * which is too intimate with the kernel counter part to be left to user + * libraries. In fact this code might even differ from one CPU to another + * depending on the available instruction set and restrictions like on + * SMP systems. In other words, the kernel reserves the right to change + * this code as needed without warning. Only the entry points and their + * results are guaranteed to be stable. + * + * Each segment is 32-byte aligned and will be moved to the top of the high + * vector page. New segments (if ever needed) must be added in front of + * existing ones. This mechanism should be used only for things that are + * really small and justified, and not be abused freely. + * + * User space is expected to implement those things inline when optimizing + * for a processor that has the necessary native support, but only if such + * resulting binaries are already to be incompatible with earlier ARM + * processors due to the use of unsupported instructions other than what + * is provided here. In other words don't make binaries unable to run on + * earlier processors just for the sake of not using these kernel helpers + * if your compiled code is not going to use the new instructions for other + * purpose. + */ + + .align 5 + .globl __kuser_helper_start +__kuser_helper_start: + +/* + * Reference prototype: + * + * int __kernel_cmpxchg(int oldval, int newval, int *ptr) + * + * Input: + * + * r0 = oldval + * r1 = newval + * r2 = ptr + * lr = return address + * + * Output: + * + * r0 = returned value (zero or non-zero) + * C flag = set if r0 == 0, clear if r0 != 0 + * + * Clobbered: + * + * r3, ip, flags + * + * Definition and user space usage example: + * + * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); + * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) + * + * Atomically store newval in *ptr if *ptr is equal to oldval for user space. + * Return zero if *ptr was changed or non-zero if no exchange happened. + * The C flag is also set if *ptr was changed to allow for assembly + * optimization in the calling code. + * + * For example, a user space atomic_add implementation could look like this: + * + * #define atomic_add(ptr, val) \ + * ({ register unsigned int *__ptr asm("r2") = (ptr); \ + * register unsigned int __result asm("r1"); \ + * asm volatile ( \ + * "1: @ atomic_add\n\t" \ + * "ldr r0, [r2]\n\t" \ + * "mov r3, #0xffff0fff\n\t" \ + * "add lr, pc, #4\n\t" \ + * "add r1, r0, %2\n\t" \ + * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ + * "bcc 1b" \ + * : "=&r" (__result) \ + * : "r" (__ptr), "rIL" (val) \ + * : "r0","r3","ip","lr","cc","memory" ); \ + * __result; }) + */ + +__kuser_cmpxchg: @ 0xffff0fc0 + +#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) + + /* + * Poor you. No fast solution possible... + * The kernel itself must perform the operation. + * A special ghost syscall is used for that (see traps.c). + */ + swi #0x9ffff0 + mov pc, lr + +#elif __LINUX_ARM_ARCH__ < 6 + + /* + * Theory of operation: + * + * We set the Z flag before loading oldval. If ever an exception + * occurs we can not be sure the loaded value will still be the same + * when the exception returns, therefore the user exception handler + * will clear the Z flag whenever the interrupted user code was + * actually from the kernel address space (see the usr_entry macro). + * + * The post-increment on the str is used to prevent a race with an + * exception happening just after the str instruction which would + * clear the Z flag although the exchange was done. + */ + teq ip, ip @ set Z flag + ldr ip, [r2] @ load current val + add r3, r2, #1 @ prepare store ptr + teqeq ip, r0 @ compare with oldval if still allowed + streq r1, [r3, #-1]! @ store newval if still allowed + subs r0, r2, r3 @ if r2 == r3 the str occured + mov pc, lr + +#else + + ldrex r3, [r2] + subs r3, r3, r0 + strexeq r3, r1, [r2] + rsbs r0, r3, #0 + mov pc, lr + +#endif + + .align 5 + +/* + * Reference prototype: + * + * int __kernel_get_tls(void) + * + * Input: + * + * lr = return address + * + * Output: + * + * r0 = TLS value + * + * Clobbered: + * + * the Z flag might be lost + * + * Definition and user space usage example: + * + * typedef int (__kernel_get_tls_t)(void); + * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) + * + * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. + * + * This could be used as follows: + * + * #define __kernel_get_tls() \ + * ({ register unsigned int __val asm("r0"); \ + * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ + * : "=r" (__val) : : "lr","cc" ); \ + * __val; }) + */ + +__kuser_get_tls: @ 0xffff0fe0 + +#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) + + ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 + mov pc, lr + +#else + + mrc p15, 0, r0, c13, c0, 3 @ read TLS register + mov pc, lr + #endif - ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously - __INIT + .rep 5 + .word 0 @ pad up to __kuser_helper_version + .endr + +/* + * Reference declaration: + * + * extern unsigned int __kernel_helper_version; + * + * Definition and user space usage example: + * + * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) + * + * User space may read this to determine the curent number of helpers + * available. + */ + +__kuser_helper_version: @ 0xffff0ffc + .word ((__kuser_helper_end - __kuser_helper_start) >> 5) + + .globl __kuser_helper_end +__kuser_helper_end: + + /* - * Vector stubs. NOTE that we only align 'vector_IRQ' to a cache line boundary, - * and we rely on each stub being exactly 48 (1.5 cache lines) in size. This - * means that we only ever load two cache lines for this code, or one if we're - * lucky. We also copy this code to 0x200 so that we can use branches in the - * vectors, rather than ldr's. + * Vector stubs. + * + * This code is copied to 0xffff0200 so we can use branches in the + * vectors, rather than ldr's. Note that this code must not + * exceed 0x300 bytes. + * + * Common stub entry macro: + * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ - .align 5 + .macro vector_stub, name, sym, correction=0 + .align 5 + +vector_\name: + ldr r13, .LCs\sym + .if \correction + sub lr, lr, #\correction + .endif + str lr, [r13] @ save lr_IRQ + mrs lr, spsr + str lr, [r13, #4] @ save spsr_IRQ + @ + @ now branch to the relevant MODE handling routine + @ + mrs r13, cpsr + bic r13, r13, #MODE_MASK + orr r13, r13, #SVC_MODE + msr spsr_cxsf, r13 @ switch to SVC_32 mode + + and lr, lr, #15 + ldr lr, [pc, lr, lsl #2] + movs pc, lr @ Changes mode and branches + .endm + + .globl __stubs_start __stubs_start: /* * Interrupt dispatcher - * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ -vector_IRQ: @ - @ save mode specific registers - @ - ldr r13, .LCsirq - sub lr, lr, #4 - str lr, [r13] @ save lr_IRQ - mrs lr, spsr - str lr, [r13, #4] @ save spsr_IRQ - @ - @ now branch to the relevant MODE handling routine - @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #MODE_SVC - msr spsr_cxsf, r13 @ switch to SVC_32 mode - - and lr, lr, #15 - ldr lr, [pc, lr, lsl #2] - movs pc, lr @ Changes mode and branches - -.LCtab_irq: .word __irq_usr @ 0 (USR_26 / USR_32) - .word __irq_invalid @ 1 (FIQ_26 / FIQ_32) - .word __irq_invalid @ 2 (IRQ_26 / IRQ_32) - .word __irq_svc @ 3 (SVC_26 / SVC_32) - .word __irq_invalid @ 4 - .word __irq_invalid @ 5 - .word __irq_invalid @ 6 - .word __irq_invalid @ 7 - .word __irq_invalid @ 8 - .word __irq_invalid @ 9 - .word __irq_invalid @ a - .word __irq_invalid @ b - .word __irq_invalid @ c - .word __irq_invalid @ d - .word __irq_invalid @ e - .word __irq_invalid @ f - - .align 5 + vector_stub irq, irq, 4 + + .long __irq_usr @ 0 (USR_26 / USR_32) + .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) + .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) + .long __irq_svc @ 3 (SVC_26 / SVC_32) + .long __irq_invalid @ 4 + .long __irq_invalid @ 5 + .long __irq_invalid @ 6 + .long __irq_invalid @ 7 + .long __irq_invalid @ 8 + .long __irq_invalid @ 9 + .long __irq_invalid @ a + .long __irq_invalid @ b + .long __irq_invalid @ c + .long __irq_invalid @ d + .long __irq_invalid @ e + .long __irq_invalid @ f /* - * Data abort dispatcher - dispatches it to the correct handler for the processor mode + * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ -vector_data: @ - @ save mode specific registers - @ - ldr r13, .LCsabt - sub lr, lr, #8 - str lr, [r13] - mrs lr, spsr - str lr, [r13, #4] - @ - @ now branch to the relevant MODE handling routine - @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #MODE_SVC - msr spsr_cxsf, r13 @ switch to SVC_32 mode - - and lr, lr, #15 - ldr lr, [pc, lr, lsl #2] - movs pc, lr @ Changes mode and branches - -.LCtab_dabt: .word __dabt_usr @ 0 (USR_26 / USR_32) - .word __dabt_invalid @ 1 (FIQ_26 / FIQ_32) - .word __dabt_invalid @ 2 (IRQ_26 / IRQ_32) - .word __dabt_svc @ 3 (SVC_26 / SVC_32) - .word __dabt_invalid @ 4 - .word __dabt_invalid @ 5 - .word __dabt_invalid @ 6 - .word __dabt_invalid @ 7 - .word __dabt_invalid @ 8 - .word __dabt_invalid @ 9 - .word __dabt_invalid @ a - .word __dabt_invalid @ b - .word __dabt_invalid @ c - .word __dabt_invalid @ d - .word __dabt_invalid @ e - .word __dabt_invalid @ f - - .align 5 + vector_stub dabt, abt, 8 + + .long __dabt_usr @ 0 (USR_26 / USR_32) + .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) + .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) + .long __dabt_svc @ 3 (SVC_26 / SVC_32) + .long __dabt_invalid @ 4 + .long __dabt_invalid @ 5 + .long __dabt_invalid @ 6 + .long __dabt_invalid @ 7 + .long __dabt_invalid @ 8 + .long __dabt_invalid @ 9 + .long __dabt_invalid @ a + .long __dabt_invalid @ b + .long __dabt_invalid @ c + .long __dabt_invalid @ d + .long __dabt_invalid @ e + .long __dabt_invalid @ f /* - * Prefetch abort dispatcher - dispatches it to the correct handler for the processor mode + * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ -vector_prefetch: - @ - @ save mode specific registers - @ - ldr r13, .LCsabt - sub lr, lr, #4 - str lr, [r13] @ save lr_ABT - mrs lr, spsr - str lr, [r13, #4] @ save spsr_ABT - @ - @ now branch to the relevant MODE handling routine - @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #MODE_SVC - msr spsr_cxsf, r13 @ switch to SVC_32 mode - - ands lr, lr, #15 - ldr lr, [pc, lr, lsl #2] - movs pc, lr - -.LCtab_pabt: .word __pabt_usr @ 0 (USR_26 / USR_32) - .word __pabt_invalid @ 1 (FIQ_26 / FIQ_32) - .word __pabt_invalid @ 2 (IRQ_26 / IRQ_32) - .word __pabt_svc @ 3 (SVC_26 / SVC_32) - .word __pabt_invalid @ 4 - .word __pabt_invalid @ 5 - .word __pabt_invalid @ 6 - .word __pabt_invalid @ 7 - .word __pabt_invalid @ 8 - .word __pabt_invalid @ 9 - .word __pabt_invalid @ a - .word __pabt_invalid @ b - .word __pabt_invalid @ c - .word __pabt_invalid @ d - .word __pabt_invalid @ e - .word __pabt_invalid @ f - - .align 5 + vector_stub pabt, abt, 4 + + .long __pabt_usr @ 0 (USR_26 / USR_32) + .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) + .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) + .long __pabt_svc @ 3 (SVC_26 / SVC_32) + .long __pabt_invalid @ 4 + .long __pabt_invalid @ 5 + .long __pabt_invalid @ 6 + .long __pabt_invalid @ 7 + .long __pabt_invalid @ 8 + .long __pabt_invalid @ 9 + .long __pabt_invalid @ a + .long __pabt_invalid @ b + .long __pabt_invalid @ c + .long __pabt_invalid @ d + .long __pabt_invalid @ e + .long __pabt_invalid @ f /* - * Undef instr entry dispatcher - dispatches it to the correct handler for the processor mode + * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ -vector_undefinstr: - @ - @ save mode specific registers - @ - ldr r13, .LCsund - str lr, [r13] @ save lr_UND - mrs lr, spsr - str lr, [r13, #4] @ save spsr_UND - @ - @ now branch to the relevant MODE handling routine - @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #MODE_SVC - msr spsr_cxsf, r13 @ switch to SVC_32 mode - - and lr, lr, #15 - ldr lr, [pc, lr, lsl #2] - movs pc, lr @ Changes mode and branches - -.LCtab_und: .word __und_usr @ 0 (USR_26 / USR_32) - .word __und_invalid @ 1 (FIQ_26 / FIQ_32) - .word __und_invalid @ 2 (IRQ_26 / IRQ_32) - .word __und_svc @ 3 (SVC_26 / SVC_32) - .word __und_invalid @ 4 - .word __und_invalid @ 5 - .word __und_invalid @ 6 - .word __und_invalid @ 7 - .word __und_invalid @ 8 - .word __und_invalid @ 9 - .word __und_invalid @ a - .word __und_invalid @ b - .word __und_invalid @ c - .word __und_invalid @ d - .word __und_invalid @ e - .word __und_invalid @ f - - .align 5 + vector_stub und, und + + .long __und_usr @ 0 (USR_26 / USR_32) + .long __und_invalid @ 1 (FIQ_26 / FIQ_32) + .long __und_invalid @ 2 (IRQ_26 / IRQ_32) + .long __und_svc @ 3 (SVC_26 / SVC_32) + .long __und_invalid @ 4 + .long __und_invalid @ 5 + .long __und_invalid @ 6 + .long __und_invalid @ 7 + .long __und_invalid @ 8 + .long __und_invalid @ 9 + .long __und_invalid @ a + .long __und_invalid @ b + .long __und_invalid @ c + .long __und_invalid @ d + .long __und_invalid @ e + .long __und_invalid @ f + + .align 5 /*============================================================================= * Undefined FIQs @@ -1616,8 +871,9 @@ vector_undefinstr: * other mode than FIQ... Ok you can switch to another mode, but you can't * get out of that mode without clobbering one register. */ -vector_FIQ: disable_fiq - subs pc, lr, #4 +vector_fiq: + disable_fiq + subs pc, lr, #4 /*============================================================================= * Address exception handler @@ -1627,68 +883,65 @@ vector_FIQ: disable_fiq */ vector_addrexcptn: - b vector_addrexcptn + b vector_addrexcptn /* * We group all the following data together to optimise * for CPUs with separate I & D caches. */ - .align 5 + .align 5 -.LCvswi: .word vector_swi +.LCvswi: + .word vector_swi -.LCsirq: .word __temp_irq -.LCsund: .word __temp_und -.LCsabt: .word __temp_abt +.LCsirq: + .word __temp_irq +.LCsund: + .word __temp_und +.LCsabt: + .word __temp_abt + .globl __stubs_end __stubs_end: - .equ __real_stubs_start, .LCvectors + 0x200 - -.LCvectors: swi SYS_ERROR0 - b __real_stubs_start + (vector_undefinstr - __stubs_start) - ldr pc, __real_stubs_start + (.LCvswi - __stubs_start) - b __real_stubs_start + (vector_prefetch - __stubs_start) - b __real_stubs_start + (vector_data - __stubs_start) - b __real_stubs_start + (vector_addrexcptn - __stubs_start) - b __real_stubs_start + (vector_IRQ - __stubs_start) - b __real_stubs_start + (vector_FIQ - __stubs_start) - -ENTRY(__trap_init) - stmfd sp!, {r4 - r6, lr} + .equ stubs_offset, __vectors_start + 0x200 - __stubs_start - adr r1, .LCvectors @ set up the vectors - ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr} - stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr} + .globl __vectors_start +__vectors_start: + swi SYS_ERROR0 + b vector_und + stubs_offset + ldr pc, .LCvswi + stubs_offset + b vector_pabt + stubs_offset + b vector_dabt + stubs_offset + b vector_addrexcptn + stubs_offset + b vector_irq + stubs_offset + b vector_fiq + stubs_offset - add r2, r0, #0x200 - adr r0, __stubs_start @ copy stubs to 0x200 - adr r1, __stubs_end -1: ldr r3, [r0], #4 - str r3, [r2], #4 - cmp r0, r1 - blt 1b - LOADREGS(fd, sp!, {r4 - r6, pc}) + .globl __vectors_end +__vectors_end: - .data + .data /* * Do not reorder these, and do not insert extra data between... */ -__temp_irq: .word 0 @ saved lr_irq - .word 0 @ saved spsr_irq - .word -1 @ old_r0 -__temp_und: .word 0 @ Saved lr_und - .word 0 @ Saved spsr_und - .word -1 @ old_r0 -__temp_abt: .word 0 @ Saved lr_abt - .word 0 @ Saved spsr_abt - .word -1 @ old_r0 - - .globl cr_alignment - .globl cr_no_alignment +__temp_irq: + .word 0 @ saved lr_irq + .word 0 @ saved spsr_irq + .word -1 @ old_r0 +__temp_und: + .word 0 @ Saved lr_und + .word 0 @ Saved spsr_und + .word -1 @ old_r0 +__temp_abt: + .word 0 @ Saved lr_abt + .word 0 @ Saved spsr_abt + .word -1 @ old_r0 + + .globl cr_alignment + .globl cr_no_alignment cr_alignment: - .space 4 + .space 4 cr_no_alignment: - .space 4 + .space 4