X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-parisc%2Fsystem.h;h=74f037a39e6f032ce6a76ae974d521f6357d1f6e;hb=353442f4fa3f17a2e386191ca2af8705408fcc60;hp=d73c20d42af96aee9c87434a40595b88c8f97ede;hpb=bc77d24c47b89f1e0efed0b8e4be5f8aad102883;p=linux-2.6.git diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index d73c20d42..74f037a39 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -1,7 +1,6 @@ #ifndef __PARISC_SYSTEM_H #define __PARISC_SYSTEM_H -#include #include /* The program status word as bitfields. */ @@ -49,6 +48,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * (last) = _switch_to(prev, next); \ } while(0) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} /* interrupt control */ @@ -80,7 +88,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * #define mtctl(gr, cr) \ __asm__ __volatile__("mtctl %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr)) + : "r" (gr), "i" (cr) : "memory") /* these are here to de-mystefy the calling code, and to provide hooks */ /* which I needed for debugging EIEM problems -PB */ @@ -102,7 +110,7 @@ static inline void set_eiem(unsigned long val) #define mtsp(gr, cr) \ __asm__ __volatile__("mtsp %0,%1" \ : /* no outputs */ \ - : "r" (gr), "i" (cr)) + : "r" (gr), "i" (cr) : "memory") /* @@ -125,7 +133,7 @@ static inline void set_eiem(unsigned long val) ** The __asm__ op below simple prevents gcc/ld from reordering ** instructions across the mb() "call". */ -#define mb() __asm__ __volatile__("":::"memory"); /* barrier() */ +#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ #define rmb() mb() #define wmb() mb() #define smp_mb() mb() @@ -135,16 +143,8 @@ static inline void set_eiem(unsigned long val) #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - - -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ -#define __ldcw(a) ({ \ - unsigned __ret; \ - __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \ - __ret; \ -}) +#ifndef CONFIG_PA20 /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, and GCC only guarantees 8-byte alignment for stack locals, we can't be assured of 16-byte alignment for atomic lock data even if we @@ -152,26 +152,43 @@ static inline void set_eiem(unsigned long val) we use a struct containing an array of four ints for the atomic lock type and dynamically select the 16-byte aligned int from the array for the semaphore. */ -#define __PA_LDCW_ALIGNMENT 16 -#define __ldcw_align(a) ({ \ - unsigned long __ret = (unsigned long) a; \ - __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ - (volatile unsigned int *) __ret; \ + +#define __PA_LDCW_ALIGNMENT 16 +#define __ldcw_align(a) ({ \ + unsigned long __ret = (unsigned long) &(a)->lock[0]; \ + __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ + & ~(__PA_LDCW_ALIGNMENT - 1); \ + (volatile unsigned int *) __ret; \ }) +#define __LDCW "ldcw" -#ifdef CONFIG_SMP -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ +#else /*CONFIG_PA20*/ +/* From: "Jim Hull" + I've attached a summary of the change, but basically, for PA 2.0, as + long as the ",CO" (coherent operation) completer is specified, then the + 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead + they only require "natural" alignment (4-byte for ldcw, 8-byte for + ldcd). */ -typedef struct { - volatile unsigned int lock[4]; -} spinlock_t; +#define __PA_LDCW_ALIGNMENT 4 +#define __ldcw_align(a) ((volatile unsigned int *)a) +#define __LDCW "ldcw,co" -#define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) +#endif /*!CONFIG_PA20*/ +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ +#define __ldcw(a) ({ \ + unsigned __ret; \ + __asm__ __volatile__(__LDCW " 0(%1),%0" \ + : "=r" (__ret) : "r" (a)); \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) #endif #define KERNEL_START (0x10100000 - 0x1000) +#define arch_align_stack(x) (x) #endif