Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / include / asm-alpha / system.h
index c08ce97..f3b7b1a 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/config.h>
 #include <asm/pal.h>
 #include <asm/page.h>
+#include <asm/barrier.h>
 
 /*
  * System defines.. Note that this is included both from .c and .S
@@ -130,44 +131,24 @@ struct el_common_EV6_mcheck {
 extern void halt(void) __attribute__((noreturn));
 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
 
-#define switch_to(P,N,L)                                               \
-  do {                                                                 \
-    (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P));  \
-    check_mmu_context();                                               \
+#define switch_to(P,N,L)                                                \
+  do {                                                                  \
+    (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
+    check_mmu_context();                                                \
   } while (0)
 
 struct task_struct;
 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 
-#define mb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define rmb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define wmb() \
-__asm__ __volatile__("wmb": : :"memory")
-
-#define read_barrier_depends() \
-__asm__ __volatile__("mb": : :"memory")
-
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     barrier()
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
-
-#define set_wmb(var, value) \
-do { var = value; wmb(); } while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
 
 #define imb() \
 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
@@ -443,22 +424,19 @@ __xchg_u64(volatile long *m, unsigned long val)
    if something tries to do an invalid xchg().  */
 extern void __xchg_called_with_bad_pointer(void);
 
-static inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, int size)
-{
-       switch (size) {
-               case 1:
-                       return __xchg_u8(ptr, x);
-               case 2:
-                       return __xchg_u16(ptr, x);
-               case 4:
-                       return __xchg_u32(ptr, x);
-               case 8:
-                       return __xchg_u64(ptr, x);
-       }
-       __xchg_called_with_bad_pointer();
-       return x;
-}
+#define __xchg(ptr, x, size) \
+({ \
+       unsigned long __xchg__res; \
+       volatile void *__xchg__ptr = (ptr); \
+       switch (size) { \
+               case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
+               case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
+               case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
+               case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
+               default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
+       } \
+       __xchg__res; \
+})
 
 #define xchg(ptr,x)                                                         \
   ({                                                                        \
@@ -594,7 +572,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
    if something tries to do an invalid cmpxchg().  */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-static inline unsigned long
+static __always_inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 {
        switch (size) {