fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / include / asm-s390 / atomic.h
index d5a05cf..af20c74 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef __ARCH_S390_ATOMIC__
 #define __ARCH_S390_ATOMIC__
 
+#include <linux/compiler.h>
+
 /*
  *  include/asm-s390/atomic.h
  *
  *  S390 version
- *    Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  *               Denis Joseph Barrow,
  *              Arnd Bergmann (arndb@de.ibm.com)
@@ -28,76 +30,113 @@ typedef struct {
 
 #ifdef __KERNEL__
 
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+
+#define __CS_LOOP(ptr, op_val, op_string) ({                           \
+       typeof(ptr->counter) old_val, new_val;                          \
+       asm volatile(                                                   \
+               "       l       %0,%2\n"                                \
+               "0:     lr      %1,%0\n"                                \
+               op_string "     %1,%3\n"                                \
+               "       cs      %0,%1,%2\n"                             \
+               "       jl      0b"                                     \
+               : "=&d" (old_val), "=&d" (new_val),                     \
+                 "=Q" (((atomic_t *)(ptr))->counter)                   \
+               : "d" (op_val),  "Q" (((atomic_t *)(ptr))->counter)     \
+               : "cc", "memory");                                      \
+       new_val;                                                        \
+})
+
+#else /* __GNUC__ */
+
 #define __CS_LOOP(ptr, op_val, op_string) ({                           \
        typeof(ptr->counter) old_val, new_val;                          \
-        __asm__ __volatile__("   l     %0,0(%3)\n"                     \
-                             "0: lr    %1,%0\n"                                \
-                             op_string "  %1,%4\n"                     \
-                             "   cs    %0,%1,0(%3)\n"                  \
-                             "   jl    0b"                             \
-                             : "=&d" (old_val), "=&d" (new_val),       \
-                              "=m" (((atomic_t *)(ptr))->counter)      \
-                            : "a" (ptr), "d" (op_val),                 \
-                              "m" (((atomic_t *)(ptr))->counter)       \
-                            : "cc", "memory" );                        \
+       asm volatile(                                                   \
+               "       l       %0,0(%3)\n"                             \
+               "0:     lr      %1,%0\n"                                \
+               op_string "     %1,%4\n"                                \
+               "       cs      %0,%1,0(%3)\n"                          \
+               "       jl      0b"                                     \
+               : "=&d" (old_val), "=&d" (new_val),                     \
+                 "=m" (((atomic_t *)(ptr))->counter)                   \
+               : "a" (ptr), "d" (op_val),                              \
+                 "m" (((atomic_t *)(ptr))->counter)                    \
+               : "cc", "memory");                                      \
        new_val;                                                        \
 })
+
+#endif /* __GNUC__ */
+
 #define atomic_read(v)          ((v)->counter)
 #define atomic_set(v,i)         (((v)->counter) = (i))
 
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
-              __CS_LOOP(v, i, "ar");
-}
 static __inline__ int atomic_add_return(int i, atomic_t * v)
 {
        return __CS_LOOP(v, i, "ar");
 }
-static __inline__ int atomic_add_negative(int i, atomic_t * v)
-{
-       return __CS_LOOP(v, i, "ar") < 0;
-}
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
-              __CS_LOOP(v, i, "sr");
-}
+#define atomic_add(_i, _v)             atomic_add_return(_i, _v)
+#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v)                 atomic_add_return(1, _v)
+#define atomic_inc_return(_v)          atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
+
 static __inline__ int atomic_sub_return(int i, atomic_t * v)
 {
        return __CS_LOOP(v, i, "sr");
 }
-static __inline__ void atomic_inc(volatile atomic_t * v)
-{
-              __CS_LOOP(v, 1, "ar");
-}
-static __inline__ int atomic_inc_return(volatile atomic_t * v)
-{
-       return __CS_LOOP(v, 1, "ar");
-}
+#define atomic_sub(_i, _v)             atomic_sub_return(_i, _v)
+#define atomic_sub_and_test(_i, _v)    (atomic_sub_return(_i, _v) == 0)
+#define atomic_dec(_v)                 atomic_sub_return(1, _v)
+#define atomic_dec_return(_v)          atomic_sub_return(1, _v)
+#define atomic_dec_and_test(_v)                (atomic_sub_return(1, _v) == 0)
 
-static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
-{
-       return __CS_LOOP(v, 1, "ar") == 0;
-}
-static __inline__ void atomic_dec(volatile atomic_t * v)
-{
-              __CS_LOOP(v, 1, "sr");
-}
-static __inline__ int atomic_dec_return(volatile atomic_t * v)
-{
-       return __CS_LOOP(v, 1, "sr");
-}
-static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
-{
-       return __CS_LOOP(v, 1, "sr") == 0;
-}
 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
 {
               __CS_LOOP(v, ~mask, "nr");
 }
+
 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
 {
               __CS_LOOP(v, mask, "or");
 }
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+       asm volatile(
+               "       cs      %0,%2,%1"
+               : "+d" (old), "=Q" (v->counter)
+               : "d" (new), "Q" (v->counter)
+               : "cc", "memory");
+#else /* __GNUC__ */
+       asm volatile(
+               "       cs      %0,%3,0(%2)"
+               : "+d" (old), "=m" (v->counter)
+               : "a" (v), "d" (new), "m" (v->counter)
+               : "cc", "memory");
+#endif /* __GNUC__ */
+       return old;
+}
+
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == u))
+                       break;
+               old = atomic_cmpxchg(v, c, c + a);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != u;
+}
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
 #undef __CS_LOOP
 
 #ifdef __s390x__
@@ -106,102 +145,121 @@ typedef struct {
 } __attribute__ ((aligned (8))) atomic64_t;
 #define ATOMIC64_INIT(i)  { (i) }
 
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+
+#define __CSG_LOOP(ptr, op_val, op_string) ({                          \
+       typeof(ptr->counter) old_val, new_val;                          \
+       asm volatile(                                                   \
+               "       lg      %0,%2\n"                                \
+               "0:     lgr     %1,%0\n"                                \
+               op_string "     %1,%3\n"                                \
+               "       csg     %0,%1,%2\n"                             \
+               "       jl      0b"                                     \
+               : "=&d" (old_val), "=&d" (new_val),                     \
+                 "=Q" (((atomic_t *)(ptr))->counter)                   \
+               : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter)      \
+               : "cc", "memory" );                                     \
+       new_val;                                                        \
+})
+
+#else /* __GNUC__ */
+
 #define __CSG_LOOP(ptr, op_val, op_string) ({                          \
        typeof(ptr->counter) old_val, new_val;                          \
-        __asm__ __volatile__("   lg    %0,0(%3)\n"                     \
-                             "0: lgr   %1,%0\n"                                \
-                             op_string "  %1,%4\n"                     \
-                             "   csg   %0,%1,0(%3)\n"                  \
-                             "   jl    0b"                             \
-                             : "=&d" (old_val), "=&d" (new_val),       \
-                              "=m" (((atomic_t *)(ptr))->counter)      \
-                            : "a" (ptr), "d" (op_val),                 \
-                              "m" (((atomic_t *)(ptr))->counter)       \
-                            : "cc", "memory" );                        \
+       asm volatile(                                                   \
+               "       lg      %0,0(%3)\n"                             \
+               "0:     lgr     %1,%0\n"                                \
+               op_string "     %1,%4\n"                                \
+               "       csg     %0,%1,0(%3)\n"                          \
+               "       jl      0b"                                     \
+               : "=&d" (old_val), "=&d" (new_val),                     \
+                 "=m" (((atomic_t *)(ptr))->counter)                   \
+               : "a" (ptr), "d" (op_val),                              \
+                 "m" (((atomic_t *)(ptr))->counter)                    \
+               : "cc", "memory" );                                     \
        new_val;                                                        \
 })
+
+#endif /* __GNUC__ */
+
 #define atomic64_read(v)          ((v)->counter)
 #define atomic64_set(v,i)         (((v)->counter) = (i))
 
-static __inline__ void atomic64_add(int i, atomic64_t * v)
-{
-              __CSG_LOOP(v, i, "agr");
-}
-static __inline__ long long atomic64_add_return(int i, atomic64_t * v)
+static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
 {
        return __CSG_LOOP(v, i, "agr");
 }
-static __inline__ long long atomic64_add_negative(int i, atomic64_t * v)
-{
-       return __CSG_LOOP(v, i, "agr") < 0;
-}
-static __inline__ void atomic64_sub(int i, atomic64_t * v)
-{
-              __CSG_LOOP(v, i, "sgr");
-}
-static __inline__ void atomic64_inc(volatile atomic64_t * v)
-{
-              __CSG_LOOP(v, 1, "agr");
-}
-static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
-{
-       return __CSG_LOOP(v, 1, "agr");
-}
-static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
-{
-       return __CSG_LOOP(v, 1, "agr") == 0;
-}
-static __inline__ void atomic64_dec(volatile atomic64_t * v)
-{
-              __CSG_LOOP(v, 1, "sgr");
-}
-static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
-{
-       return __CSG_LOOP(v, 1, "sgr");
-}
-static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
+#define atomic64_add(_i, _v)           atomic64_add_return(_i, _v)
+#define atomic64_add_negative(_i, _v)  (atomic64_add_return(_i, _v) < 0)
+#define atomic64_inc(_v)               atomic64_add_return(1, _v)
+#define atomic64_inc_return(_v)                atomic64_add_return(1, _v)
+#define atomic64_inc_and_test(_v)      (atomic64_add_return(1, _v) == 0)
+
+static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
 {
-       return __CSG_LOOP(v, 1, "sgr") == 0;
+       return __CSG_LOOP(v, i, "sgr");
 }
+#define atomic64_sub(_i, _v)           atomic64_sub_return(_i, _v)
+#define atomic64_sub_and_test(_i, _v)  (atomic64_sub_return(_i, _v) == 0)
+#define atomic64_dec(_v)               atomic64_sub_return(1, _v)
+#define atomic64_dec_return(_v)                atomic64_sub_return(1, _v)
+#define atomic64_dec_and_test(_v)      (atomic64_sub_return(1, _v) == 0)
+
 static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
 {
               __CSG_LOOP(v, ~mask, "ngr");
 }
+
 static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
 {
               __CSG_LOOP(v, mask, "ogr");
 }
 
-#undef __CSG_LOOP
-#endif
-
-/*
-  returns 0  if expected_oldval==value in *v ( swap was successful )
-  returns 1  if unsuccessful.
+static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
+                                            long long old, long long new)
+{
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+       asm volatile(
+               "       csg     %0,%2,%1"
+               : "+d" (old), "=Q" (v->counter)
+               : "d" (new), "Q" (v->counter)
+               : "cc", "memory");
+#else /* __GNUC__ */
+       asm volatile(
+               "       csg     %0,%3,0(%2)"
+               : "+d" (old), "=m" (v->counter)
+               : "a" (v), "d" (new), "m" (v->counter)
+               : "cc", "memory");
+#endif /* __GNUC__ */
+       return old;
+}
 
-  This is non-portable, use bitops or spinlocks instead!
-*/
-static __inline__ int
-atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
+static __inline__ int atomic64_add_unless(atomic64_t *v,
+                                         long long a, long long u)
 {
-        int retval;
-
-        __asm__ __volatile__(
-                "  lr   %0,%3\n"
-                "  cs   %0,%4,0(%2)\n"
-                "  ipm  %0\n"
-                "  srl  %0,28\n"
-                "0:"
-                : "=&d" (retval), "=m" (v->counter)
-                : "a" (v), "d" (expected_oldval) , "d" (new_val),
-                 "m" (v->counter) : "cc", "memory" );
-        return retval;
+       long long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == u))
+                       break;
+               old = atomic64_cmpxchg(v, c, c + a);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != u;
 }
 
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+#undef __CSG_LOOP
+#endif
+
 #define smp_mb__before_atomic_dec()    smp_mb()
 #define smp_mb__after_atomic_dec()     smp_mb()
 #define smp_mb__before_atomic_inc()    smp_mb()
 #define smp_mb__after_atomic_inc()     smp_mb()
 
+#include <asm-generic/atomic.h>
 #endif /* __KERNEL__ */
 #endif /* __ARCH_S390_ATOMIC__  */