Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / include / asm-s390 / system.h
index 81514d7..6a89dbb 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/types.h>
 #include <asm/ptrace.h>
 #include <asm/setup.h>
+#include <asm/processor.h>
 
 #ifdef __KERNEL__
 
@@ -103,29 +104,29 @@ static inline void restore_access_regs(unsigned int *acrs)
        prev = __switch_to(prev,next);                                       \
 } while (0)
 
-#define prepare_arch_switch(rq, next)  do { } while(0)
-#define task_running(rq, p)            ((rq)->curr == (p))
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_user_vtime(struct task_struct *);
+extern void account_vtime(struct task_struct *);
+extern void account_tick_vtime(struct task_struct *);
 extern void account_system_vtime(struct task_struct *);
-
-#define finish_arch_switch(rq, prev) do {                                   \
-       set_fs(current->thread.mm_segment);                                  \
-       spin_unlock(&(rq)->lock);                                            \
-       account_system_vtime(prev);                                          \
-       local_irq_enable();                                                  \
-} while (0)
-
 #else
+#define account_vtime(x) do { /* empty */ } while (0)
+#endif
 
-#define finish_arch_switch(rq, prev) do {                                   \
+#define finish_arch_switch(prev) do {                                       \
        set_fs(current->thread.mm_segment);                                  \
-       spin_unlock_irq(&(rq)->lock);                                        \
+       account_vtime(prev);                                                 \
 } while (0)
 
-#endif
-
 #define nop() __asm__ __volatile__ ("nop")
 
 #define xchg(ptr,x) \
@@ -331,9 +332,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 
 #ifdef __s390x__
 
-#define __load_psw(psw) \
-        __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
-
 #define __ctl_load(array, low, high) ({ \
        typedef struct { char _[sizeof(array)]; } addrtype; \
        __asm__ __volatile__ ( \
@@ -390,9 +388,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 
 #else /* __s390x__ */
 
-#define __load_psw(psw) \
-       __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
-
 #define __ctl_load(array, low, high) ({ \
        typedef struct { char _[sizeof(array)]; } addrtype; \
        __asm__ __volatile__ ( \
@@ -451,6 +446,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 /* For spinlocks etc */
 #define local_irq_save(x)      ((x) = local_irq_disable())
 
+/*
+ * Use to set psw mask except for the first byte which
+ * won't be changed by this function.
+ */
+static inline void
+__set_psw_mask(unsigned long mask)
+{
+       local_save_flags(mask);
+       __load_psw_mask(mask);
+}
+
+#define local_mcck_enable()  __set_psw_mask(PSW_KERNEL_BITS)
+#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK)
+
 #ifdef CONFIG_SMP
 
 extern void smp_ctl_set_bit(int cr, int bit);