2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <asm/sn/sgi.h>
12 #include <asm/sn/nodepda.h>
13 #include <asm/sn/addrs.h>
14 #include <asm/sn/arch.h>
15 #include <asm/sn/sn_cpuid.h>
16 #include <asm/sn/pda.h>
17 #include <asm/sn/sn2/shubio.h>
18 #include <asm/nodedata.h>
20 #include <linux/bootmem.h>
21 #include <linux/string.h>
22 #include <linux/sched.h>
24 #include <asm/sn/bte.h>
27 #define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
30 /* two interfaces on two btes */
31 #define MAX_INTERFACES_TO_TRY 4
33 static struct bteinfo_s *
34 bte_if_on_node(nasid_t nasid, int interface)
36 nodepda_t *tmp_nodepda;
38 tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
39 return &tmp_nodepda->bte_if[interface];
44 /************************************************************************
45 * Block Transfer Engine copy related functions.
47 ***********************************************************************/
51 * bte_copy(src, dest, len, mode, notification)
53 * Use the block transfer engine to move kernel memory from src to dest
54 * using the assigned mode.
57 * src - physical address of the transfer source.
58 * dest - physical address of the transfer destination.
59 * len - number of bytes to transfer from source to dest.
60 * mode - hardware defined. See reference information
61 * for IBCT0/1 in the SHUB Programmers Reference
62 * notification - kernel virtual address of the notification cache
63 * line. If NULL, the default is used and
64 * the bte_copy is synchronous.
66 * NOTE: This function requires src, dest, and len to
67 * be cacheline aligned.
70 bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
74 struct bteinfo_s *bte;
75 bte_result_t bte_status;
76 unsigned long irq_flags;
77 struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
81 BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
82 src, dest, len, mode, notification));
88 ASSERT(!((len & L1_CACHE_MASK) ||
89 (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)));
90 ASSERT(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
92 if (mode & BTE_USE_DEST) {
93 /* try remote then local */
94 btes_to_try[0] = bte_if_on_node(NASID_GET(dest), 0);
95 btes_to_try[1] = bte_if_on_node(NASID_GET(dest), 1);
96 if (mode & BTE_USE_ANY) {
97 btes_to_try[2] = bte_if_on_node(get_nasid(), 0);
98 btes_to_try[3] = bte_if_on_node(get_nasid(), 1);
100 btes_to_try[2] = NULL;
101 btes_to_try[3] = NULL;
104 /* try local then remote */
105 btes_to_try[0] = bte_if_on_node(get_nasid(), 0);
106 btes_to_try[1] = bte_if_on_node(get_nasid(), 1);
107 if (mode & BTE_USE_ANY) {
108 btes_to_try[2] = bte_if_on_node(NASID_GET(dest), 0);
109 btes_to_try[3] = bte_if_on_node(NASID_GET(dest), 1);
111 btes_to_try[2] = NULL;
112 btes_to_try[3] = NULL;
117 local_irq_save(irq_flags);
121 /* Attempt to lock one of the BTE interfaces. */
122 while (bte_if_index < MAX_INTERFACES_TO_TRY) {
123 bte = btes_to_try[bte_if_index++];
129 if (spin_trylock(&bte->spinlock)) {
130 if ((*bte->most_rcnt_na & BTE_ACTIVE) ||
131 (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
132 /* Got the lock but BTE still busy */
133 spin_unlock(&bte->spinlock);
136 /* we got the lock and it's not busy */
146 local_irq_restore(irq_flags);
148 if (!(mode & BTE_WACQUIRE)) {
149 return BTEFAIL_NOTAVAIL;
154 if (notification == NULL) {
155 /* User does not want to be notified. */
156 bte->most_rcnt_na = &bte->notify;
158 bte->most_rcnt_na = notification;
161 /* Calculate the number of cache lines to transfer. */
162 transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
164 /* Initialize the notification to a known value. */
165 *bte->most_rcnt_na = -1L;
167 /* Set the status reg busy bit and transfer length */
168 BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
169 BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
171 /* Set the source and destination registers */
172 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
173 BTE_SRC_STORE(bte, TO_PHYS(src));
174 BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
175 BTE_DEST_STORE(bte, TO_PHYS(dest));
177 /* Set the notification register */
178 BTE_PRINTKV(("IBNA = 0x%lx)\n",
179 TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
180 BTE_NOTIF_STORE(bte, TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
183 /* Initiate the transfer */
184 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
185 BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
187 spin_unlock_irqrestore(&bte->spinlock, irq_flags);
190 if (notification != NULL) {
194 while ((transfer_stat = *bte->most_rcnt_na) == -1UL) {
198 BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
199 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
201 if (transfer_stat & IBLS_ERROR) {
202 bte_status = transfer_stat & ~IBLS_ERROR;
203 *bte->most_rcnt_na = 0L;
205 bte_status = BTE_SUCCESS;
207 BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
208 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
212 EXPORT_SYMBOL(bte_copy);
216 * bte_unaligned_copy(src, dest, len, mode)
218 * use the block transfer engine to move kernel
219 * memory from src to dest using the assigned mode.
222 * src - physical address of the transfer source.
223 * dest - physical address of the transfer destination.
224 * len - number of bytes to transfer from source to dest.
225 * mode - hardware defined. See reference information
226 * for IBCT0/1 in the SGI documentation.
228 * NOTE: If the source, dest, and len are all cache line aligned,
229 * then it would be _FAR_ preferrable to use bte_copy instead.
232 bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
234 int destFirstCacheOffset;
237 u64 headBcopySrcOffset;
245 char *bteBlock, *bteBlock_unaligned;
251 /* temporary buffer used during unaligned transfers */
252 bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
253 GFP_KERNEL | GFP_DMA);
254 if (bteBlock_unaligned == NULL) {
255 return BTEFAIL_NOTAVAIL;
257 bteBlock = (char *) L1_CACHE_ALIGN((u64) bteBlock_unaligned);
259 headBcopySrcOffset = src & L1_CACHE_MASK;
260 destFirstCacheOffset = dest & L1_CACHE_MASK;
263 * At this point, the transfer is broken into
264 * (up to) three sections. The first section is
265 * from the start address to the first physical
266 * cache line, the second is from the first physical
267 * cache line to the last complete cache line,
268 * and the third is from the last cache line to the
269 * end of the buffer. The first and third sections
270 * are handled by bte copying into a temporary buffer
271 * and then bcopy'ing the necessary section into the
272 * final location. The middle section is handled with
273 * a standard bte copy.
275 * One nasty exception to the above rule is when the
276 * source and destination are not symetrically
277 * mis-aligned. If the source offset from the first
278 * cache line is different from the destination offset,
279 * we make the first section be the entire transfer
280 * and the bcopy the entire block into place.
282 if (headBcopySrcOffset == destFirstCacheOffset) {
285 * Both the source and destination are the same
286 * distance from a cache line boundary so we can
287 * use the bte to transfer the bulk of the
290 headBteSource = src & ~L1_CACHE_MASK;
291 headBcopyDest = dest;
292 if (headBcopySrcOffset) {
296 headBcopySrcOffset) ? L1_CACHE_BYTES
297 - headBcopySrcOffset : len);
298 headBteLen = L1_CACHE_BYTES;
304 if (len > headBcopyLen) {
306 (len - headBcopyLen) & L1_CACHE_MASK;
307 footBteLen = L1_CACHE_BYTES;
309 footBteSource = src + len - footBcopyLen;
310 footBcopyDest = dest + len - footBcopyLen;
313 (headBcopyDest + headBcopyLen)) {
315 * We have two contigous bcopy
316 * blocks. Merge them.
318 headBcopyLen += footBcopyLen;
319 headBteLen += footBteLen;
320 } else if (footBcopyLen > 0) {
321 rv = bte_copy(footBteSource,
322 ia64_tpa((unsigned long)bteBlock),
323 footBteLen, mode, NULL);
324 if (rv != BTE_SUCCESS) {
325 kfree(bteBlock_unaligned);
330 memcpy(__va(footBcopyDest),
331 (char *) bteBlock, footBcopyLen);
338 if (len > (headBcopyLen + footBcopyLen)) {
339 /* now transfer the middle. */
340 rv = bte_copy((src + headBcopyLen),
343 (len - headBcopyLen -
344 footBcopyLen), mode, NULL);
345 if (rv != BTE_SUCCESS) {
346 kfree(bteBlock_unaligned);
355 * The transfer is not symetric, we will
356 * allocate a buffer large enough for all the
357 * data, bte_copy into that buffer and then
358 * bcopy to the destination.
361 /* Add the leader from source */
362 headBteLen = len + (src & L1_CACHE_MASK);
363 /* Add the trailing bytes from footer. */
365 L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
366 headBteSource = src & ~L1_CACHE_MASK;
367 headBcopySrcOffset = src & L1_CACHE_MASK;
368 headBcopyDest = dest;
372 if (headBcopyLen > 0) {
373 rv = bte_copy(headBteSource,
374 ia64_tpa((unsigned long)bteBlock), headBteLen, mode, NULL);
375 if (rv != BTE_SUCCESS) {
376 kfree(bteBlock_unaligned);
380 memcpy(__va(headBcopyDest), ((char *) bteBlock +
384 kfree(bteBlock_unaligned);
387 EXPORT_SYMBOL(bte_unaligned_copy);
390 /************************************************************************
391 * Block Transfer Engine initialization functions.
393 ***********************************************************************/
397 * bte_init_node(nodepda, cnode)
399 * Initialize the nodepda structure with BTE base addresses and
403 bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
409 * Indicate that all the block transfer engines on this node
414 * Allocate one bte_recover_t structure per node. It holds
415 * the recovery lock for node. All the bte interface structures
416 * will point at this one bte_recover structure to get the lock.
418 spin_lock_init(&mynodepda->bte_recovery_lock);
419 init_timer(&mynodepda->bte_recovery_timer);
420 mynodepda->bte_recovery_timer.function = bte_error_handler;
421 mynodepda->bte_recovery_timer.data = (unsigned long) mynodepda;
423 for (i = 0; i < BTES_PER_NODE; i++) {
424 (u64) mynodepda->bte_if[i].bte_base_addr =
425 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode),
426 (i == 0 ? IIO_IBLS0 : IIO_IBLS1));
429 * Initialize the notification and spinlock
430 * so the first transfer can occur.
432 mynodepda->bte_if[i].most_rcnt_na =
433 &(mynodepda->bte_if[i].notify);
434 mynodepda->bte_if[i].notify = 0L;
435 spin_lock_init(&mynodepda->bte_if[i].spinlock);
437 mynodepda->bte_if[i].bte_cnode = cnode;
438 mynodepda->bte_if[i].bte_error_count = 0;
439 mynodepda->bte_if[i].bte_num = i;
440 mynodepda->bte_if[i].cleanup_active = 0;
441 mynodepda->bte_if[i].bh_error = 0;