vserver 1.9.5.x5
[linux-2.6.git] / arch / arm / mm / alignment.c
index 828faad..81f4a8a 100644 (file)
@@ -3,6 +3,9 @@
  *
  *  Copyright (C) 1995  Linus Torvalds
  *  Modifications for ARM processor (c) 1995-2001 Russell King
+ *  Thumb aligment fault fixups (c) 2004 MontaVista Software, Inc.
+ *  - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
+ *    Copyright (C) 1996, Cygnus Software Technologies Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -130,6 +133,18 @@ union offset_union {
 #define TYPE_LDST      2
 #define TYPE_DONE      3
 
+#ifdef __ARMEB__
+#define BE             1
+#define FIRST_BYTE_16  "mov    %1, %1, ror #8\n"
+#define FIRST_BYTE_32  "mov    %1, %1, ror #24\n"
+#define NEXT_BYTE      "ror #24"
+#else
+#define BE             0
+#define FIRST_BYTE_16
+#define FIRST_BYTE_32
+#define NEXT_BYTE      "lsr #8"
+#endif
+
 #define __get8_unaligned_check(ins,val,addr,err)       \
        __asm__(                                        \
        "1:     "ins"   %1, [%2], #1\n"                 \
@@ -149,9 +164,10 @@ union offset_union {
 #define __get16_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v, a = addr;              \
-               __get8_unaligned_check(ins,val,a,err);          \
                __get8_unaligned_check(ins,v,a,err);            \
-               val |= v << 8;                                  \
+               val =  v << ((BE) ? 8 : 0);                     \
+               __get8_unaligned_check(ins,v,a,err);            \
+               val |= v << ((BE) ? 0 : 8);                     \
                if (err)                                        \
                        goto fault;                             \
        } while (0)
@@ -165,13 +181,14 @@ union offset_union {
 #define __get32_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v, a = addr;              \
-               __get8_unaligned_check(ins,val,a,err);          \
                __get8_unaligned_check(ins,v,a,err);            \
-               val |= v << 8;                                  \
+               val =  v << ((BE) ? 24 :  0);                   \
                __get8_unaligned_check(ins,v,a,err);            \
-               val |= v << 16;                                 \
+               val |= v << ((BE) ? 16 :  8);                   \
                __get8_unaligned_check(ins,v,a,err);            \
-               val |= v << 24;                                 \
+               val |= v << ((BE) ?  8 : 16);                   \
+               __get8_unaligned_check(ins,v,a,err);            \
+               val |= v << ((BE) ?  0 : 24);                   \
                if (err)                                        \
                        goto fault;                             \
        } while (0)
@@ -185,9 +202,9 @@ union offset_union {
 #define __put16_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v = val, a = addr;        \
-               __asm__(                                        \
+               __asm__( FIRST_BYTE_16                          \
                "1:     "ins"   %1, [%2], #1\n"                 \
-               "       mov     %1, %1, lsr #8\n"               \
+               "       mov     %1, %1, "NEXT_BYTE"\n"          \
                "2:     "ins"   %1, [%2]\n"                     \
                "3:\n"                                          \
                "       .section .fixup,\"ax\"\n"               \
@@ -215,13 +232,13 @@ union offset_union {
 #define __put32_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v = val, a = addr;        \
-               __asm__(                                        \
+               __asm__( FIRST_BYTE_32                          \
                "1:     "ins"   %1, [%2], #1\n"                 \
-               "       mov     %1, %1, lsr #8\n"               \
+               "       mov     %1, %1, "NEXT_BYTE"\n"          \
                "2:     "ins"   %1, [%2], #1\n"                 \
-               "       mov     %1, %1, lsr #8\n"               \
+               "       mov     %1, %1, "NEXT_BYTE"\n"          \
                "3:     "ins"   %1, [%2], #1\n"                 \
-               "       mov     %1, %1, lsr #8\n"               \
+               "       mov     %1, %1, "NEXT_BYTE"\n"          \
                "4:     "ins"   %1, [%2]\n"                     \
                "5:\n"                                          \
                "       .section .fixup,\"ax\"\n"               \
@@ -449,16 +466,146 @@ bad:
        return TYPE_ERROR;
 }
 
+/*
+ * Convert Thumb ld/st instruction forms to equivalent ARM instructions so
+ * we can reuse ARM userland alignment fault fixups for Thumb.
+ *
+ * This implementation was initially based on the algorithm found in
+ * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
+ * to convert only Thumb ld/st instruction forms to equivalent ARM forms.
+ *
+ * NOTES:
+ * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
+ * 2. If for some reason we're passed an non-ld/st Thumb instruction to
+ *    decode, we return 0xdeadc0de. This should never happen under normal
+ *    circumstances but if it does, we've got other problems to deal with
+ *    elsewhere and we obviously can't fix those problems here.
+ */
+
+static unsigned long
+thumb2arm(u16 tinstr)
+{
+       u32 L = (tinstr & (1<<11)) >> 11;
+
+       switch ((tinstr & 0xf800) >> 11) {
+       /* 6.5.1 Format 1: */
+       case 0x6000 >> 11:                              /* 7.1.52 STR(1) */
+       case 0x6800 >> 11:                              /* 7.1.26 LDR(1) */
+       case 0x7000 >> 11:                              /* 7.1.55 STRB(1) */
+       case 0x7800 >> 11:                              /* 7.1.30 LDRB(1) */
+               return 0xe5800000 |
+                       ((tinstr & (1<<12)) << (22-12)) |       /* fixup */
+                       (L<<20) |                               /* L==1? */
+                       ((tinstr & (7<<0)) << (12-0)) |         /* Rd */
+                       ((tinstr & (7<<3)) << (16-3)) |         /* Rn */
+                       ((tinstr & (31<<6)) >>                  /* immed_5 */
+                               (6 - ((tinstr & (1<<12)) ? 0 : 2)));
+       case 0x8000 >> 11:                              /* 7.1.57 STRH(1) */
+       case 0x8800 >> 11:                              /* 7.1.32 LDRH(1) */
+               return 0xe1c000b0 |
+                       (L<<20) |                               /* L==1? */
+                       ((tinstr & (7<<0)) << (12-0)) |         /* Rd */
+                       ((tinstr & (7<<3)) << (16-3)) |         /* Rn */
+                       ((tinstr & (7<<6)) >> (6-1)) |   /* immed_5[2:0] */
+                       ((tinstr & (3<<9)) >> (9-8));    /* immed_5[4:3] */
+
+       /* 6.5.1 Format 2: */
+       case 0x5000 >> 11:
+       case 0x5800 >> 11:
+               {
+                       static const u32 subset[8] = {
+                               0xe7800000,             /* 7.1.53 STR(2) */
+                               0xe18000b0,             /* 7.1.58 STRH(2) */
+                               0xe7c00000,             /* 7.1.56 STRB(2) */
+                               0xe19000d0,             /* 7.1.34 LDRSB */
+                               0xe7900000,             /* 7.1.27 LDR(2) */
+                               0xe19000b0,             /* 7.1.33 LDRH(2) */
+                               0xe7d00000,             /* 7.1.31 LDRB(2) */
+                               0xe19000f0              /* 7.1.35 LDRSH */
+                       };
+                       return subset[(tinstr & (7<<9)) >> 9] |
+                           ((tinstr & (7<<0)) << (12-0)) |     /* Rd */
+                           ((tinstr & (7<<3)) << (16-3)) |     /* Rn */
+                           ((tinstr & (7<<6)) >> (6-0));       /* Rm */
+               }
+
+       /* 6.5.1 Format 3: */
+       case 0x4800 >> 11:                              /* 7.1.28 LDR(3) */
+               /* NOTE: This case is not technically possible. We're
+                *       loading 32-bit memory data via PC relative
+                *       addressing mode. So we can and should eliminate
+                *       this case. But I'll leave it here for now.
+                */
+               return 0xe59f0000 |
+                   ((tinstr & (7<<8)) << (12-8)) |             /* Rd */
+                   ((tinstr & 255) << (2-0));                  /* immed_8 */
+
+       /* 6.5.1 Format 4: */
+       case 0x9000 >> 11:                              /* 7.1.54 STR(3) */
+       case 0x9800 >> 11:                              /* 7.1.29 LDR(4) */
+               return 0xe58d0000 |
+                       (L<<20) |                               /* L==1? */
+                       ((tinstr & (7<<8)) << (12-8)) |         /* Rd */
+                       ((tinstr & 255) << 2);                  /* immed_8 */
+
+       /* 6.6.1 Format 1: */
+       case 0xc000 >> 11:                              /* 7.1.51 STMIA */
+       case 0xc800 >> 11:                              /* 7.1.25 LDMIA */
+               {
+                       u32 Rn = (tinstr & (7<<8)) >> 8;
+                       u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
+
+                       return 0xe8800000 | W | (L<<20) | (Rn<<16) |
+                               (tinstr&255);
+               }
+
+       /* 6.6.1 Format 2: */
+       case 0xb000 >> 11:                              /* 7.1.48 PUSH */
+       case 0xb800 >> 11:                              /* 7.1.47 POP */
+               if ((tinstr & (3 << 9)) == 0x0400) {
+                       static const u32 subset[4] = {
+                               0xe92d0000,     /* STMDB sp!,{registers} */
+                               0xe92d4000,     /* STMDB sp!,{registers,lr} */
+                               0xe8bd0000,     /* LDMIA sp!,{registers} */
+                               0xe8bd8000      /* LDMIA sp!,{registers,pc} */
+                       };
+                       return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
+                           (tinstr & 255);             /* register_list */
+               }
+               /* Else fall through for illegal instruction case */
+
+       default:
+               return 0xdeadc0de;
+       }
+}
+
 static int
 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
        union offset_union offset;
-       unsigned long instr, instrptr;
+       unsigned long instr = 0, instrptr;
        int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
        unsigned int type;
+       mm_segment_t fs;
+       unsigned int fault;
+       u16 tinstr = 0;
 
        instrptr = instruction_pointer(regs);
-       instr = *(unsigned long *)instrptr;
+
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       if thumb_mode(regs) {
+               fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
+               if (!(fault))
+                       instr = thumb2arm(tinstr);
+       } else
+               fault = __get_user(instr, (u32 *)instrptr);
+       set_fs(fs);
+
+       if (fault) {
+               type = TYPE_FAULT;
+               goto bad_or_fault;
+       }
 
        if (user_mode(regs))
                goto user;
@@ -467,7 +614,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
  fixup:
 
-       regs->ARM_pc += 4;
+       regs->ARM_pc += thumb_mode(regs) ? 2 : 4;
 
        switch (CODING_BITS(instr)) {
        case 0x00000000:        /* ldrh or strh */
@@ -537,7 +684,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  bad_or_fault:
        if (type == TYPE_ERROR)
                goto bad;
-       regs->ARM_pc -= 4;
+       regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
        /*
         * We got a fault - fix it up, or die.
         */
@@ -549,7 +696,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * Oops, we didn't handle the instruction.
         */
        printk(KERN_ERR "Alignment trap: not handling instruction "
-               "%08lx at [<%08lx>]\n", instr, instrptr);
+               "%0*lx at [<%08lx>]\n",
+               thumb_mode(regs) ? 4 : 8,
+               thumb_mode(regs) ? tinstr : instr, instrptr);
        ai_skipped += 1;
        return 1;
 
@@ -557,9 +706,12 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        ai_user += 1;
 
        if (ai_usermode & 1)
-               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%08lx "
+               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
                       "Address=0x%08lx FSR 0x%03x\n", current->comm,
-                       current->pid, instrptr, instr, addr, fsr);
+                       current->pid, instrptr,
+                       thumb_mode(regs) ? 4 : 8,
+                       thumb_mode(regs) ? tinstr : instr,
+                       addr, fsr);
 
        if (ai_usermode & 2)
                goto fixup;