*
* We have to use the sync instructions for mb(), since lwsync doesn't
* order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though. Note that lwsync is interpreted as sync by
- * 32-bit and older 64-bit CPUs.
+ * rmb(), though. Note that rmb() actually uses a sync on 32-bit
+ * architectures.
*
* For wmb(), we use sync since wmb is used in drivers to order
* stores to system memory with respect to writes to the device.
* SMP since it is only used to order updates to system memory.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef __KERNEL__
#ifdef CONFIG_SMP
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */
+/*
+ * This is a barrier which prevents following instructions from being
+ * started until the value of the argument x is known. For example, if
+ * x is a variable loaded from memory, this prevents following
+ * instructions from being executed until the load has been performed.
+ */
+#define data_barrier(x) \
+ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+
struct task_struct;
struct pt_regs;
DEBUGGER_BOILERPLATE(debugger_dabr_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
-#ifdef CONFIG_XMON
-extern void xmon_init(int enable);
-#endif
-
#else
static inline int debugger(struct pt_regs *regs) { return 0; }
static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
extern u32 booke_wdt_period;
#endif /* CONFIG_BOOKE_WDT */
-/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
-extern unsigned char e2a(unsigned char);
-
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
" stwcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
- : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
: "cc", "memory");
return prev;
" stdcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
- : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
: "cc", "memory");
return prev;
ISYNC_ON_SMP
"\n\
2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
: "cc", "memory");
return prev;
ISYNC_ON_SMP
"\n\
2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
: "cc", "memory");
return prev;
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
+ * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
+ * cacheline alignment of buffers.
*/
-#define NET_IP_ALIGN 0
+#define NET_IP_ALIGN 0
+#define NET_SKB_PAD L1_CACHE_BYTES
#endif
#define arch_align_stack(x) (x)
create_branch(addr, func_addr, BRANCH_SET_LINK);
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYSTEM_H */