X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-s390%2Fsystem.h;h=6a89dbb03c1e363e86870718e2633fdbc112a6d3;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=81514d76edcf3d1d498cfc5e53dfa14803c24fd3;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 81514d76e..6a89dbb03 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef __KERNEL__ @@ -103,29 +104,29 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -#define prepare_arch_switch(rq, next) do { } while(0) -#define task_running(rq, p) ((rq)->curr == (p)) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} #ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void account_user_vtime(struct task_struct *); +extern void account_vtime(struct task_struct *); +extern void account_tick_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *); - -#define finish_arch_switch(rq, prev) do { \ - set_fs(current->thread.mm_segment); \ - spin_unlock(&(rq)->lock); \ - account_system_vtime(prev); \ - local_irq_enable(); \ -} while (0) - #else +#define account_vtime(x) do { /* empty */ } while (0) +#endif -#define finish_arch_switch(rq, prev) do { \ +#define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ - spin_unlock_irq(&(rq)->lock); \ + account_vtime(prev); \ } while (0) -#endif - #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,x) \ @@ -331,9 +332,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #ifdef __s390x__ -#define __load_psw(psw) \ - __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -390,9 +388,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #else /* __s390x__ */ -#define __load_psw(psw) \ - __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -451,6 +446,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) /* For spinlocks etc */ #define local_irq_save(x) ((x) = local_irq_disable()) +/* + * Use to set psw mask except for the first byte which + * won't be changed by this function. + */ +static inline void +__set_psw_mask(unsigned long mask) +{ + local_save_flags(mask); + __load_psw_mask(mask); +} + +#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) +#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) + #ifdef CONFIG_SMP extern void smp_ctl_set_bit(int cr, int bit);