#ifndef __PARISC_SYSTEM_H
#define __PARISC_SYSTEM_H
-#include <linux/config.h>
#include <asm/psw.h>
/* The program status word as bitfields. */
(last) = _switch_to(prev, next); \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
/* interrupt control */
#define mtctl(gr, cr) \
__asm__ __volatile__("mtctl %0,%1" \
: /* no outputs */ \
- : "r" (gr), "i" (cr))
+ : "r" (gr), "i" (cr) : "memory")
/* these are here to de-mystefy the calling code, and to provide hooks */
/* which I needed for debugging EIEM problems -PB */
#define mtsp(gr, cr) \
__asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
- : "r" (gr), "i" (cr))
+ : "r" (gr), "i" (cr) : "memory")
/*
** The __asm__ op below simple prevents gcc/ld from reordering
** instructions across the mb() "call".
*/
-#define mb() __asm__ __volatile__("":::"memory"); /* barrier() */
+#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
#define rmb() mb()
#define wmb() mb()
#define smp_mb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#ifndef CONFIG_PA20
+/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
+ and GCC only guarantees 8-byte alignment for stack locals, we can't
+ be assured of 16-byte alignment for atomic lock data even if we
+ specify "__attribute ((aligned(16)))" in the type declaration. So,
+ we use a struct containing an array of four ints for the atomic lock
+ type and dynamically select the 16-byte aligned int from the array
+ for the semaphore. */
+
+#define __PA_LDCW_ALIGNMENT 16
+#define __ldcw_align(a) ({ \
+ unsigned long __ret = (unsigned long) &(a)->lock[0]; \
+ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
+ & ~(__PA_LDCW_ALIGNMENT - 1); \
+ (volatile unsigned int *) __ret; \
+})
+#define __LDCW "ldcw"
+
+#else /*CONFIG_PA20*/
+/* From: "Jim Hull" <jim.hull of hp.com>
+ I've attached a summary of the change, but basically, for PA 2.0, as
+ long as the ",CO" (coherent operation) completer is specified, then the
+ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+ they only require "natural" alignment (4-byte for ldcw, 8-byte for
+ ldcd). */
+
+#define __PA_LDCW_ALIGNMENT 4
+#define __ldcw_align(a) ((volatile unsigned int *)a)
+#define __LDCW "ldcw,co"
+
+#endif /*!CONFIG_PA20*/
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
-#define __ldcw(a) ({ \
- unsigned __ret; \
- __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
- __ret; \
+#define __ldcw(a) ({ \
+ unsigned __ret; \
+ __asm__ __volatile__(__LDCW " 0(%1),%0" \
+ : "=r" (__ret) : "r" (a)); \
+ __ret; \
})
-
#ifdef CONFIG_SMP
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
- volatile unsigned int __attribute__((aligned(16))) lock;
-} spinlock_t;
+# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
#endif
+#define KERNEL_START (0x10100000 - 0x1000)
+#define arch_align_stack(x) (x)
+
#endif