#ifndef __ALPHA_SYSTEM_H
#define __ALPHA_SYSTEM_H
-#include <linux/config.h>
#include <asm/pal.h>
#include <asm/page.h>
+#include <asm/barrier.h>
/*
* System defines.. Note that this is included both from .c and .S
*/
struct el_common {
unsigned int size; /* size in bytes of logout area */
- int sbz1 : 30; /* should be zero */
- int err2 : 1; /* second error */
- int retry : 1; /* retry flag */
+ unsigned int sbz1 : 30; /* should be zero */
+ unsigned int err2 : 1; /* second error */
+ unsigned int retry : 1; /* retry flag */
unsigned int proc_offset; /* processor-specific offset */
unsigned int sys_offset; /* system-specific offset */
unsigned int code; /* machine check code */
extern void halt(void) __attribute__((noreturn));
#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
-#define switch_to(P,N,L) \
- do { \
- (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \
- check_mmu_context(); \
+#define switch_to(P,N,L) \
+ do { \
+ (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
+ check_mmu_context(); \
} while (0)
struct task_struct;
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
-#define mb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define rmb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define wmb() \
-__asm__ __volatile__("wmb": : :"memory")
-
-#define read_barrier_depends() \
-__asm__ __volatile__("mb": : :"memory")
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() barrier()
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
-
-#define set_wmb(var, value) \
-do { var = value; wmb(); } while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
#define imb() \
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
if something tries to do an invalid xchg(). */
extern void __xchg_called_with_bad_pointer(void);
-static inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, int size)
-{
- switch (size) {
- case 1:
- return __xchg_u8(ptr, x);
- case 2:
- return __xchg_u16(ptr, x);
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
+#define __xchg(ptr, x, size) \
+({ \
+ unsigned long __xchg__res; \
+ volatile void *__xchg__ptr = (ptr); \
+ switch (size) { \
+ case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
+ case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
+ case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
+ case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
+ default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
+ } \
+ __xchg__res; \
+})
#define xchg(ptr,x) \
({ \
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
-static inline unsigned long
+static __always_inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
#endif /* __ASSEMBLY__ */
+#define arch_align_stack(x) (x)
+
#endif