2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
10 #ifndef _ASM_IA64_SN_BTE_H
11 #define _ASM_IA64_SN_BTE_H
13 #include <linux/timer.h>
14 #include <linux/spinlock.h>
15 #include <linux/cache.h>
16 #include <asm/sn/types.h>
19 /* #define BTE_DEBUG */
20 /* #define BTE_DEBUG_VERBOSE */
23 # define BTE_PRINTK(x) printk x /* Terse */
24 # ifdef BTE_DEBUG_VERBOSE
25 # define BTE_PRINTKV(x) printk x /* Verbose */
27 # define BTE_PRINTKV(x)
28 # endif /* BTE_DEBUG_VERBOSE */
30 # define BTE_PRINTK(x)
31 # define BTE_PRINTKV(x)
32 #endif /* BTE_DEBUG */
35 /* BTE status register only supports 16 bits for length field */
36 #define BTE_LEN_BITS (16)
37 #define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
38 #define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES)
42 #define BTES_PER_NODE 2
45 /* Define hardware modes */
46 #define BTE_NOTIFY (IBCT_NOTIFY)
47 #define BTE_NORMAL BTE_NOTIFY
48 #define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
49 /* Use a reserved bit to let the caller specify a wait for any BTE */
50 #define BTE_WACQUIRE (0x4000)
51 /* macro to force the IBCT0 value valid */
52 #define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
56 * Handle locking of the bte interfaces.
58 * All transfers spinlock the interface before setting up the SHUB
59 * registers. Sync transfers hold the lock until all processing is
60 * complete. Async transfers release the lock as soon as the transfer
63 * To determine if an interface is available, we must check both the
64 * busy bit and the spinlock for that interface.
66 #define BTE_LOCK_IF_AVAIL(_x) (\
67 (*pda->cpu_bte_if[_x]->most_rcnt_na & (IBLS_BUSY | IBLS_ERROR)) && \
68 (!(spin_trylock(&(pda->cpu_bte_if[_x]->spinlock)))) \
72 * Some macros to simplify reading.
73 * Start with macros to locate the BTE control registers.
75 #define BTEREG_LNSTAT_ADDR ((u64 *)(bte->bte_base_addr))
76 #define BTEREG_SRC_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_SRC))
77 #define BTEREG_DEST_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_DEST))
78 #define BTEREG_CTRL_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_CTRL))
79 #define BTEREG_NOTIF_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_NOTIFY))
82 /* Possible results from bte_copy and bte_unaligned_copy */
83 /* The following error codes map into the BTE hardware codes
84 * IIO_ICRB_ECODE_* (in shubio.h). The hardware uses
85 * an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero
86 * to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error
87 * codes to give the following error codes.
89 #define BTEFAIL_OFFSET 1
92 BTE_SUCCESS, /* 0 is success */
93 BTEFAIL_DIR, /* Directory error due to IIO access*/
94 BTEFAIL_POISON, /* poison error on IO access (write to poison page) */
95 BTEFAIL_WERR, /* Write error (ie WINV to a Read only line) */
96 BTEFAIL_ACCESS, /* access error (protection violation) */
97 BTEFAIL_PWERR, /* Partial Write Error */
98 BTEFAIL_PRERR, /* Partial Read Error */
99 BTEFAIL_TOUT, /* CRB Time out */
100 BTEFAIL_XTERR, /* Incoming xtalk pkt had error bit */
101 BTEFAIL_NOTAVAIL, /* BTE not available */
106 * Structure defining a bte. An instance of this
107 * structure is created in the nodepda for each
108 * bte on that node (as defined by BTES_PER_NODE)
109 * This structure contains everything necessary
110 * to work with a BTE.
113 u64 volatile notify ____cacheline_aligned;
114 char *bte_base_addr ____cacheline_aligned;
116 cnodeid_t bte_cnode; /* cnode */
117 int bte_error_count; /* Number of errors encountered */
118 int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
119 int cleanup_active; /* Interface is locked for cleanup */
120 volatile bte_result_t bh_error; /* error while processing */
121 u64 volatile *most_rcnt_na;
122 void *scratch_buf; /* Node local scratch buffer */
127 * Function prototypes (functions defined in bte.c, used elsewhere)
129 extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
130 extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
131 extern void bte_error_handler(unsigned long);
135 * The following is the prefered way of calling bte_unaligned_copy
136 * If the copy is fully cache line aligned, then bte_copy is
137 * used instead. Since bte_copy is inlined, this saves a call
138 * stack. NOTE: bte_copy is called synchronously and does block
139 * until the transfer is complete. In order to get the asynch
140 * version of bte_copy, you must perform this check yourself.
142 #define BTE_UNALIGNED_COPY(src, dest, len, mode) \
143 (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
144 (dest & L1_CACHE_MASK)) ? \
145 bte_unaligned_copy(src, dest, len, mode) : \
146 bte_copy(src, dest, len, mode, NULL))
149 #endif /* _ASM_IA64_SN_BTE_H */