X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-parisc%2Fsystem.h;h=a5a973c0c07f55f272d4fccd7c6587f516afff84;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=318a3deaabc21f1729fb4bb112387b6d2418c028;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 318a3deaa..a5a973c0c 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * (last) = _switch_to(prev, next); \ } while(0) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} /* interrupt control */ @@ -80,7 +89,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * #define mtctl(gr, cr) \ __asm__ __volatile__("mtctl %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr)) + : "r" (gr), "i" (cr) : "memory") /* these are here to de-mystefy the calling code, and to provide hooks */ /* which I needed for debugging EIEM problems -PB */ @@ -102,7 +111,7 @@ static inline void set_eiem(unsigned long val) #define mtsp(gr, cr) \ __asm__ __volatile__("mtsp %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr)) + : "r" (gr), "i" (cr) : "memory") /* @@ -125,7 +134,7 @@ static inline void set_eiem(unsigned long val) ** The __asm__ op below simple prevents gcc/ld from reordering ** instructions across the mb() "call". */ -#define mb() __asm__ __volatile__("":::"memory"); /* barrier() */ +#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ #define rmb() mb() #define wmb() mb() #define smp_mb() mb() @@ -138,22 +147,49 @@ static inline void set_eiem(unsigned long val) #define set_wmb(var, value) do { var = value; wmb(); } while (0) +#ifndef CONFIG_PA20 +/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, + and GCC only guarantees 8-byte alignment for stack locals, we can't + be assured of 16-byte alignment for atomic lock data even if we + specify "__attribute ((aligned(16)))" in the type declaration. So, + we use a struct containing an array of four ints for the atomic lock + type and dynamically select the 16-byte aligned int from the array + for the semaphore. */ + +#define __PA_LDCW_ALIGNMENT 16 +#define __ldcw_align(a) ({ \ + unsigned long __ret = (unsigned long) &(a)->lock[0]; \ + __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ + (volatile unsigned int *) __ret; \ +}) +#define LDCW "ldcw" + +#else /*CONFIG_PA20*/ +/* From: "Jim Hull" + I've attached a summary of the change, but basically, for PA 2.0, as + long as the ",CO" (coherent operation) completer is specified, then the + 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead + they only require "natural" alignment (4-byte for ldcw, 8-byte for + ldcd). */ + +#define __PA_LDCW_ALIGNMENT 4 +#define __ldcw_align(a) ((volatile unsigned int *)a) +#define LDCW "ldcw,co" + +#endif /*!CONFIG_PA20*/ + /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ #define __ldcw(a) ({ \ unsigned __ret; \ - __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \ + __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \ __ret; \ }) - #ifdef CONFIG_SMP -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile unsigned int __attribute__((aligned(16))) lock; -} spinlock_t; +# define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) #endif +#define KERNEL_START (0x10100000 - 0x1000) +#define arch_align_stack(x) (x) + #endif