*/
#include <linux/config.h>
+#include <linux/module.h>
#include <asm/sn/sgi.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/nodedata.h>
-#include <asm/delay.h>
#include <linux/bootmem.h>
#include <linux/string.h>
#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
#endif
-/*
- * The base address of for each set of bte registers.
- */
-static int bte_offsets[] = { IIO_IBLS0, IIO_IBLS1 };
+/* two interfaces on two btes */
+#define MAX_INTERFACES_TO_TRY 4
+
+static struct bteinfo_s *
+bte_if_on_node(nasid_t nasid, int interface)
+{
+ nodepda_t *tmp_nodepda;
+
+ tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
+ return &tmp_nodepda->bte_if[interface];
+
+}
/************************************************************************
bte_result_t
bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
- int bte_to_use;
u64 transfer_size;
+ u64 transfer_stat;
struct bteinfo_s *bte;
bte_result_t bte_status;
unsigned long irq_flags;
+ struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
+ int bte_if_index;
BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
(src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)));
ASSERT(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
+ if (mode & BTE_USE_DEST) {
+ /* try remote then local */
+ btes_to_try[0] = bte_if_on_node(NASID_GET(dest), 0);
+ btes_to_try[1] = bte_if_on_node(NASID_GET(dest), 1);
+ if (mode & BTE_USE_ANY) {
+ btes_to_try[2] = bte_if_on_node(get_nasid(), 0);
+ btes_to_try[3] = bte_if_on_node(get_nasid(), 1);
+ } else {
+ btes_to_try[2] = NULL;
+ btes_to_try[3] = NULL;
+ }
+ } else {
+ /* try local then remote */
+ btes_to_try[0] = bte_if_on_node(get_nasid(), 0);
+ btes_to_try[1] = bte_if_on_node(get_nasid(), 1);
+ if (mode & BTE_USE_ANY) {
+ btes_to_try[2] = bte_if_on_node(NASID_GET(dest), 0);
+ btes_to_try[3] = bte_if_on_node(NASID_GET(dest), 1);
+ } else {
+ btes_to_try[2] = NULL;
+ btes_to_try[3] = NULL;
+ }
+ }
+
do {
local_irq_save(irq_flags);
- bte_to_use = 0;
+ bte_if_index = 0;
+
/* Attempt to lock one of the BTE interfaces. */
- while ((bte_to_use < BTES_PER_NODE) &&
- BTE_LOCK_IF_AVAIL(bte_to_use)) {
- bte_to_use++;
+ while (bte_if_index < MAX_INTERFACES_TO_TRY) {
+ bte = btes_to_try[bte_if_index++];
+
+ if (bte == NULL) {
+ continue;
+ }
+
+ if (spin_trylock(&bte->spinlock)) {
+ if ((*bte->most_rcnt_na & BTE_ACTIVE) ||
+ (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
+ /* Got the lock but BTE still busy */
+ spin_unlock(&bte->spinlock);
+ bte = NULL;
+ } else {
+ /* we got the lock and it's not busy */
+ break;
+ }
+ }
}
- if (bte_to_use < BTES_PER_NODE) {
+ if (bte != NULL) {
break;
}
if (!(mode & BTE_WACQUIRE)) {
return BTEFAIL_NOTAVAIL;
}
-
- /* Wait until a bte is available. */
- udelay(10);
} while (1);
- bte = pda->cpu_bte_if[bte_to_use];
- BTE_PRINTKV(("Got a lock on bte %d\n", bte_to_use));
-
if (notification == NULL) {
/* User does not want to be notified. */
*bte->most_rcnt_na = -1L;
/* Set the status reg busy bit and transfer length */
- BTE_PRINTKV(("IBLS - HUB_S(0x%p, 0x%lx)\n",
- BTEREG_LNSTAT_ADDR, IBLS_BUSY | transfer_size));
- HUB_S(BTEREG_LNSTAT_ADDR, (IBLS_BUSY | transfer_size));
+ BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
+ BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
/* Set the source and destination registers */
- BTE_PRINTKV(("IBSA - HUB_S(0x%p, 0x%lx)\n", BTEREG_SRC_ADDR,
- (TO_PHYS(src))));
- HUB_S(BTEREG_SRC_ADDR, (TO_PHYS(src)));
- BTE_PRINTKV(("IBDA - HUB_S(0x%p, 0x%lx)\n", BTEREG_DEST_ADDR,
- (TO_PHYS(dest))));
- HUB_S(BTEREG_DEST_ADDR, (TO_PHYS(dest)));
+ BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
+ BTE_SRC_STORE(bte, TO_PHYS(src));
+ BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
+ BTE_DEST_STORE(bte, TO_PHYS(dest));
/* Set the notification register */
- BTE_PRINTKV(("IBNA - HUB_S(0x%p, 0x%lx)\n", BTEREG_NOTIF_ADDR,
- (TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)))));
- HUB_S(BTEREG_NOTIF_ADDR, (TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
+ BTE_PRINTKV(("IBNA = 0x%lx)\n",
+ TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
+ BTE_NOTIF_STORE(bte, TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
/* Initiate the transfer */
- BTE_PRINTK(("IBCT - HUB_S(0x%p, 0x%lx)\n", BTEREG_CTRL_ADDR,
- BTE_VALID_MODE(mode)));
- HUB_S(BTEREG_CTRL_ADDR, BTE_VALID_MODE(mode));
+ BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
+ BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
spin_unlock_irqrestore(&bte->spinlock, irq_flags);
return BTE_SUCCESS;
}
- while (*bte->most_rcnt_na == -1UL) {
+ while ((transfer_stat = *bte->most_rcnt_na) == -1UL) {
}
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR), *bte->most_rcnt_na));
+ BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
- if (*bte->most_rcnt_na & IBLS_ERROR) {
- bte_status = *bte->most_rcnt_na & ~IBLS_ERROR;
+ if (transfer_stat & IBLS_ERROR) {
+ bte_status = transfer_stat & ~IBLS_ERROR;
*bte->most_rcnt_na = 0L;
} else {
bte_status = BTE_SUCCESS;
}
BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR), *bte->most_rcnt_na));
+ BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
return bte_status;
}
+EXPORT_SYMBOL(bte_copy);
/*
u64 footBcopyDest;
u64 footBcopyLen;
bte_result_t rv;
- char *bteBlock;
+ char *bteBlock, *bteBlock_unaligned;
if (len == 0) {
return BTE_SUCCESS;
}
/* temporary buffer used during unaligned transfers */
- bteBlock = pda->cpu_bte_if[0]->scratch_buf;
+ bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
+ GFP_KERNEL | GFP_DMA);
+ if (bteBlock_unaligned == NULL) {
+ return BTEFAIL_NOTAVAIL;
+ }
+ bteBlock = (char *) L1_CACHE_ALIGN((u64) bteBlock_unaligned);
headBcopySrcOffset = src & L1_CACHE_MASK;
destFirstCacheOffset = dest & L1_CACHE_MASK;
ia64_tpa((unsigned long)bteBlock),
footBteLen, mode, NULL);
if (rv != BTE_SUCCESS) {
+ kfree(bteBlock_unaligned);
return rv;
}
(len - headBcopyLen -
footBcopyLen), mode, NULL);
if (rv != BTE_SUCCESS) {
+ kfree(bteBlock_unaligned);
return rv;
}
rv = bte_copy(headBteSource,
ia64_tpa((unsigned long)bteBlock), headBteLen, mode, NULL);
if (rv != BTE_SUCCESS) {
+ kfree(bteBlock_unaligned);
return rv;
}
headBcopySrcOffset),
headBcopyLen);
}
+ kfree(bteBlock_unaligned);
return BTE_SUCCESS;
}
+EXPORT_SYMBOL(bte_unaligned_copy);
/************************************************************************
mynodepda->bte_recovery_timer.data = (unsigned long) mynodepda;
for (i = 0; i < BTES_PER_NODE; i++) {
- /* >>> Don't know why the 0x1800000L is here. Robin */
- mynodepda->bte_if[i].bte_base_addr =
- (char *) LOCAL_MMR_ADDR(bte_offsets[i] | 0x1800000L);
+ /* Which link status register should we use? */
+ unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
+ mynodepda->bte_if[i].bte_base_addr = (u64 *)
+ REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
/*
* Initialize the notification and spinlock
mynodepda->bte_if[i].notify = 0L;
spin_lock_init(&mynodepda->bte_if[i].spinlock);
- mynodepda->bte_if[i].scratch_buf =
- alloc_bootmem_node(NODE_DATA(cnode), BTE_MAX_XFER);
mynodepda->bte_if[i].bte_cnode = cnode;
mynodepda->bte_if[i].bte_error_count = 0;
mynodepda->bte_if[i].bte_num = i;
}
}
-
-/*
- * bte_init_cpu()
- *
- * Initialize the cpupda structure with pointers to the
- * nodepda bte blocks.
- *
- */
-void
-bte_init_cpu(void)
-{
- /* Called by setup.c as each cpu is being added to the nodepda */
- if (local_node_data->active_cpu_count & 0x1) {
- pda->cpu_bte_if[0] = &(nodepda->bte_if[0]);
- pda->cpu_bte_if[1] = &(nodepda->bte_if[1]);
- } else {
- pda->cpu_bte_if[0] = &(nodepda->bte_if[1]);
- pda->cpu_bte_if[1] = &(nodepda->bte_if[0]);
- }
-}