X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-parisc%2Fsystem.h;h=a5a973c0c07f55f272d4fccd7c6587f516afff84;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=e2b84285da9386054b87075c52835af934d3cd69;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index e2b84285d..a5a973c0c 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * (last) = _switch_to(prev, next); \ } while(0) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} /* interrupt control */ @@ -80,7 +89,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * #define mtctl(gr, cr) \ __asm__ __volatile__("mtctl %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr)) + : "r" (gr), "i" (cr) : "memory") /* these are here to de-mystefy the calling code, and to provide hooks */ /* which I needed for debugging EIEM problems -PB */ @@ -102,7 +111,7 @@ static inline void set_eiem(unsigned long val) #define mtsp(gr, cr) \ __asm__ __volatile__("mtsp %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr)) + : "r" (gr), "i" (cr) : "memory") /* @@ -125,7 +134,7 @@ static inline void set_eiem(unsigned long val) ** The __asm__ op below simple prevents gcc/ld from reordering ** instructions across the mb() "call". */ -#define mb() __asm__ __volatile__("":::"memory"); /* barrier() */ +#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ #define rmb() mb() #define wmb() mb() #define smp_mb() mb() @@ -138,13 +147,7 @@ static inline void set_eiem(unsigned long val) #define set_wmb(var, value) do { var = value; wmb(); } while (0) -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ -#define __ldcw(a) ({ \ - unsigned __ret; \ - __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \ - __ret; \ -}) - +#ifndef CONFIG_PA20 /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, and GCC only guarantees 8-byte alignment for stack locals, we can't be assured of 16-byte alignment for atomic lock data even if we @@ -152,36 +155,41 @@ static inline void set_eiem(unsigned long val) we use a struct containing an array of four ints for the atomic lock type and dynamically select the 16-byte aligned int from the array for the semaphore. */ + #define __PA_LDCW_ALIGNMENT 16 #define __ldcw_align(a) ({ \ - unsigned long __ret = (unsigned long) a; \ + unsigned long __ret = (unsigned long) &(a)->lock[0]; \ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ (volatile unsigned int *) __ret; \ }) +#define LDCW "ldcw" -#ifdef CONFIG_SMP -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ +#else /*CONFIG_PA20*/ +/* From: "Jim Hull" + I've attached a summary of the change, but basically, for PA 2.0, as + long as the ",CO" (coherent operation) completer is specified, then the + 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead + they only require "natural" alignment (4-byte for ldcw, 8-byte for + ldcd). */ -typedef struct { - volatile unsigned int lock[4]; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned long magic; - volatile unsigned int babble; - const char *module; - char *bfile; - int bline; - int oncpu; - void *previous; - struct task_struct * task; -#endif -} spinlock_t; +#define __PA_LDCW_ALIGNMENT 4 +#define __ldcw_align(a) ((volatile unsigned int *)a) +#define LDCW "ldcw,co" -#define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) +#endif /*!CONFIG_PA20*/ +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ +#define __ldcw(a) ({ \ + unsigned __ret; \ + __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) #endif #define KERNEL_START (0x10100000 - 0x1000) +#define arch_align_stack(x) (x) #endif