2 ** IA64 System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2002-2004 Alex Williamson
5 ** (c) Copyright 2002-2003 Grant Grundler
6 ** (c) Copyright 2002-2004 Hewlett-Packard Company
8 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11 ** This program is free software; you can redistribute it and/or modify
12 ** it under the terms of the GNU General Public License as published by
13 ** the Free Software Foundation; either version 2 of the License, or
14 ** (at your option) any later version.
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
30 #include <linux/string.h>
31 #include <linux/pci.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/acpi.h>
35 #include <linux/efi.h>
37 #include <asm/delay.h> /* ia64_get_itc() */
39 #include <asm/page.h> /* PAGE_OFFSET */
41 #include <asm/system.h> /* wmb() */
42 #include <asm/bitops.h> /* hweight64() */
44 #include <asm/acpi-ext.h>
49 ** Enabling timing search of the pdir resource map. Output in /proc.
50 ** Disabled by default to optimize performance.
52 #undef PDIR_SEARCH_TIMING
55 ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
56 ** not defined, all DMA will be 32bit and go through the TLB.
57 ** There's potentially a conflict in the bio merge code with us
58 ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
59 ** appears to give more performance than bio-level virtual merging, we'll
60 ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
61 ** completely restrict DMA to the IOMMU.
63 #define ALLOW_IOV_BYPASS
66 ** This option specifically allows/disallows bypassing scatterlists with
67 ** multiple entries. Coalescing these entries can allow better DMA streaming
68 ** and in some cases shows better performance than entirely bypassing the
69 ** IOMMU. Performance increase on the order of 1-2% sequential output/input
70 ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
72 #undef ALLOW_IOV_BYPASS_SG
75 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
76 ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
77 ** disconnect on 4k boundaries and prevent such issues. If the device is
78 ** particularly agressive, this option will keep the entire pdir valid such
79 ** that prefetching will hit a valid address. This could severely impact
80 ** error containment, and is therefore off by default. The page that is
81 ** used for spill-over is poisoned, so that should help debugging somewhat.
83 #undef FULL_VALID_PDIR
85 #define ENABLE_MARK_CLEAN
88 ** The number of debug flags is a clue - this code is fragile. NOTE: since
89 ** tightening the use of res_lock the resource bitmap and actual pdir are no
90 ** longer guaranteed to stay in sync. The sanity checking code isn't going to
95 #undef DEBUG_SBA_RUN_SG
96 #undef DEBUG_SBA_RESOURCE
97 #undef ASSERT_PDIR_SANITY
98 #undef DEBUG_LARGE_SG_ENTRIES
101 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
102 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
105 #define SBA_INLINE __inline__
106 /* #define SBA_INLINE */
108 #ifdef DEBUG_SBA_INIT
109 #define DBG_INIT(x...) printk(x)
111 #define DBG_INIT(x...)
115 #define DBG_RUN(x...) printk(x)
117 #define DBG_RUN(x...)
120 #ifdef DEBUG_SBA_RUN_SG
121 #define DBG_RUN_SG(x...) printk(x)
123 #define DBG_RUN_SG(x...)
127 #ifdef DEBUG_SBA_RESOURCE
128 #define DBG_RES(x...) printk(x)
130 #define DBG_RES(x...)
134 #define DBG_BYPASS(x...) printk(x)
136 #define DBG_BYPASS(x...)
139 #ifdef ASSERT_PDIR_SANITY
140 #define ASSERT(expr) \
142 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
150 ** The number of pdir entries to "free" before issuing
151 ** a read to PCOM register to flush out PCOM writes.
152 ** Interacts with allocation granularity (ie 4 or 8 entries
153 ** allocated and free'd/purged at a time might make this
154 ** less interesting).
156 #define DELAYED_RESOURCE_CNT 64
158 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
159 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
160 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
162 #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
164 #define IOC_FUNC_ID 0x000
165 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
166 #define IOC_IBASE 0x300 /* IO TLB */
167 #define IOC_IMASK 0x308
168 #define IOC_PCOM 0x310
169 #define IOC_TCNFG 0x318
170 #define IOC_PDIR_BASE 0x320
172 #define IOC_ROPE0_CFG 0x500
173 #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
176 /* AGP GART driver looks for this */
177 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
180 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
182 ** Some IOCs (sx1000) can run at the above pages sizes, but are
183 ** really only supported using the IOC at a 4k page size.
185 ** iovp_size could only be greater than PAGE_SIZE if we are
186 ** confident the drivers really only touch the next physical
187 ** page iff that driver instance owns it.
189 static unsigned long iovp_size;
190 static unsigned long iovp_shift;
191 static unsigned long iovp_mask;
194 void __iomem *ioc_hpa; /* I/O MMU base address */
195 char *res_map; /* resource map, bit == pdir entry */
196 u64 *pdir_base; /* physical base address */
197 unsigned long ibase; /* pdir IOV Space base */
198 unsigned long imask; /* pdir IOV Space mask */
200 unsigned long *res_hint; /* next avail IOVP - circular search */
201 unsigned long dma_mask;
202 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
203 /* clearing pdir to prevent races with allocations. */
204 unsigned int res_bitshift; /* from the RIGHT! */
205 unsigned int res_size; /* size of resource map in bytes */
207 unsigned int node; /* node where this IOC lives */
209 #if DELAYED_RESOURCE_CNT > 0
210 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
211 /* than res_lock for bigger systems. */
213 struct sba_dma_pair {
216 } saved[DELAYED_RESOURCE_CNT];
219 #ifdef PDIR_SEARCH_TIMING
220 #define SBA_SEARCH_SAMPLE 0x100
221 unsigned long avg_search[SBA_SEARCH_SAMPLE];
222 unsigned long avg_idx; /* current index into avg_search */
225 /* Stuff we don't need in performance path */
226 struct ioc *next; /* list of IOC's in system */
227 acpi_handle handle; /* for multiple IOC's */
229 unsigned int func_id;
230 unsigned int rev; /* HW revision of chip */
232 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
233 struct pci_dev *sac_only_dev;
236 static struct ioc *ioc_list;
237 static int reserve_sba_gart = 1;
239 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
240 static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
242 #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
244 #ifdef FULL_VALID_PDIR
245 static u64 prefetch_spill_page;
249 # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
250 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
252 # define GET_IOC(dev) NULL
256 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
257 ** (or rather not merge) DMA's into managable chunks.
258 ** On parisc, this is more of the software/tuning constraint
259 ** rather than the HW. I/O MMU allocation alogorithms can be
260 ** faster with smaller size is (to some degree).
262 #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
264 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
266 /************************************
267 ** SBA register read and write support
269 ** BE WARNED: register writes are posted.
270 ** (ie follow writes which must reach HW with a read)
273 #define READ_REG(addr) __raw_readq(addr)
274 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
276 #ifdef DEBUG_SBA_INIT
279 * sba_dump_tlb - debugging only - print IOMMU operating parameters
280 * @hpa: base address of the IOMMU
282 * Print the size/location of the IO MMU PDIR.
285 sba_dump_tlb(char *hpa)
287 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
288 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
289 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
290 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
291 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
297 #ifdef ASSERT_PDIR_SANITY
300 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
301 * @ioc: IO MMU structure which owns the pdir we are interested in.
302 * @msg: text to print ont the output line.
305 * Print one entry of the IO MMU PDIR in human readable form.
308 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
310 /* start printing from lowest pde in rval */
311 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
312 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
315 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
316 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
319 while (rcnt < BITS_PER_LONG) {
320 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
321 (rcnt == (pide & (BITS_PER_LONG - 1)))
323 rcnt, ptr, (unsigned long long) *ptr );
327 printk(KERN_DEBUG "%s", msg);
332 * sba_check_pdir - debugging only - consistency checker
333 * @ioc: IO MMU structure which owns the pdir we are interested in.
334 * @msg: text to print ont the output line.
336 * Verify the resource map and pdir state is consistent
339 sba_check_pdir(struct ioc *ioc, char *msg)
341 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
342 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
343 u64 *pptr = ioc->pdir_base; /* pdir ptr */
346 while (rptr < rptr_end) {
348 int rcnt; /* number of bits we might check */
354 /* Get last byte and highest bit from that */
355 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
356 if ((rval & 0x1) ^ pde)
359 ** BUMMER! -- res_map != pdir --
360 ** Dump rval and matching pdir entries
362 sba_dump_pdir_entry(ioc, msg, pide);
366 rval >>= 1; /* try the next bit */
370 rptr++; /* look at next word of res_map */
372 /* It'd be nice if we always got here :^) */
378 * sba_dump_sg - debugging only - print Scatter-Gather list
379 * @ioc: IO MMU structure which owns the pdir we are interested in.
380 * @startsg: head of the SG list
381 * @nents: number of entries in SG list
383 * print the SG list so we can verify it's correct by hand.
386 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
388 while (nents-- > 0) {
389 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
390 startsg->dma_address, startsg->dma_length,
391 sba_sg_address(startsg));
397 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
399 struct scatterlist *the_sg = startsg;
400 int the_nents = nents;
402 while (the_nents-- > 0) {
403 if (sba_sg_address(the_sg) == 0x0UL)
404 sba_dump_sg(NULL, startsg, nents);
409 #endif /* ASSERT_PDIR_SANITY */
414 /**************************************************************
416 * I/O Pdir Resource Management
418 * Bits set in the resource map are in use.
419 * Each bit can represent a number of pages.
420 * LSbs represent lower addresses (IOVA's).
422 ***************************************************************/
423 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
425 /* Convert from IOVP to IOVA and vice versa. */
426 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
427 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
429 #define PDIR_ENTRY_SIZE sizeof(u64)
431 #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
433 #define RESMAP_MASK(n) ~(~0UL << (n))
434 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
438 * For most cases the normal get_order is sufficient, however it limits us
439 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
440 * It only incurs about 1 clock cycle to use this one with the static variable
441 * and makes the code more intuitive.
443 static SBA_INLINE int
444 get_iovp_order (unsigned long size)
446 long double d = size - 1;
449 order = ia64_getf_exp(d);
450 order = order - iovp_shift - 0xffff + 1;
457 * sba_search_bitmap - find free space in IO PDIR resource bitmap
458 * @ioc: IO MMU structure which owns the pdir we are interested in.
459 * @bits_wanted: number of entries we need.
461 * Find consecutive free bits in resource bitmap.
462 * Each bit represents one entry in the IO Pdir.
463 * Cool perf optimization: search for log2(size) bits at a time.
465 static SBA_INLINE unsigned long
466 sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
468 unsigned long *res_ptr = ioc->res_hint;
469 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
470 unsigned long pide = ~0UL;
472 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
473 ASSERT(res_ptr < res_end);
476 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
477 * if a TLB entry is purged while in use. sba_mark_invalid()
478 * purges IOTLB entries in power-of-two sizes, so we also
479 * allocate IOVA space in power-of-two sizes.
481 bits_wanted = 1UL << get_iovp_order(bits_wanted << PAGE_SHIFT);
483 if (likely(bits_wanted == 1)) {
484 unsigned int bitshiftcnt;
485 for(; res_ptr < res_end ; res_ptr++) {
486 if (likely(*res_ptr != ~0UL)) {
487 bitshiftcnt = ffz(*res_ptr);
488 *res_ptr |= (1UL << bitshiftcnt);
489 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
490 pide <<= 3; /* convert to bit address */
492 ioc->res_bitshift = bitshiftcnt + bits_wanted;
500 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
502 ** Search the resource bit map on well-aligned values.
503 ** "o" is the alignment.
504 ** We need the alignment to invalidate I/O TLB using
505 ** SBA HW features in the unmap path.
507 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
508 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
509 unsigned long mask, base_mask;
511 base_mask = RESMAP_MASK(bits_wanted);
512 mask = base_mask << bitshiftcnt;
514 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
515 for(; res_ptr < res_end ; res_ptr++)
517 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
519 for (; mask ; mask <<= o, bitshiftcnt += o) {
520 if(0 == ((*res_ptr) & mask)) {
521 *res_ptr |= mask; /* mark resources busy! */
522 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
523 pide <<= 3; /* convert to bit address */
525 ioc->res_bitshift = bitshiftcnt + bits_wanted;
539 qwords = bits_wanted >> 6; /* /64 */
540 bits = bits_wanted - (qwords * BITS_PER_LONG);
542 end = res_end - qwords;
544 for (; res_ptr < end; res_ptr++) {
545 for (i = 0 ; i < qwords ; i++) {
549 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
552 /* Found it, mark it */
553 for (i = 0 ; i < qwords ; i++)
555 res_ptr[i] |= RESMAP_MASK(bits);
557 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
558 pide <<= 3; /* convert to bit address */
560 ioc->res_bitshift = bits;
568 prefetch(ioc->res_map);
569 ioc->res_hint = (unsigned long *) ioc->res_map;
570 ioc->res_bitshift = 0;
574 ioc->res_hint = res_ptr;
580 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
581 * @ioc: IO MMU structure which owns the pdir we are interested in.
582 * @size: number of bytes to create a mapping for
584 * Given a size, find consecutive unmarked and then mark those bits in the
588 sba_alloc_range(struct ioc *ioc, size_t size)
590 unsigned int pages_needed = size >> iovp_shift;
591 #ifdef PDIR_SEARCH_TIMING
592 unsigned long itc_start;
597 ASSERT(pages_needed);
598 ASSERT(0 == (size & ~iovp_mask));
600 spin_lock_irqsave(&ioc->res_lock, flags);
602 #ifdef PDIR_SEARCH_TIMING
603 itc_start = ia64_get_itc();
606 ** "seek and ye shall find"...praying never hurts either...
608 pide = sba_search_bitmap(ioc, pages_needed);
609 if (unlikely(pide >= (ioc->res_size << 3))) {
610 pide = sba_search_bitmap(ioc, pages_needed);
611 if (unlikely(pide >= (ioc->res_size << 3))) {
612 #if DELAYED_RESOURCE_CNT > 0
614 ** With delayed resource freeing, we can give this one more shot. We're
615 ** getting close to being in trouble here, so do what we can to make this
618 spin_lock(&ioc->saved_lock);
619 if (ioc->saved_cnt > 0) {
620 struct sba_dma_pair *d;
621 int cnt = ioc->saved_cnt;
623 d = &(ioc->saved[ioc->saved_cnt]);
626 sba_mark_invalid(ioc, d->iova, d->size);
627 sba_free_range(ioc, d->iova, d->size);
631 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
633 spin_unlock(&ioc->saved_lock);
635 pide = sba_search_bitmap(ioc, pages_needed);
636 if (unlikely(pide >= (ioc->res_size << 3)))
637 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
640 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
646 #ifdef PDIR_SEARCH_TIMING
647 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
648 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
651 prefetchw(&(ioc->pdir_base[pide]));
653 #ifdef ASSERT_PDIR_SANITY
654 /* verify the first enable bit is clear */
655 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
656 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
660 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
661 __FUNCTION__, size, pages_needed, pide,
662 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
665 spin_unlock_irqrestore(&ioc->res_lock, flags);
672 * sba_free_range - unmark bits in IO PDIR resource bitmap
673 * @ioc: IO MMU structure which owns the pdir we are interested in.
674 * @iova: IO virtual address which was previously allocated.
675 * @size: number of bytes to create a mapping for
677 * clear bits in the ioc's resource map
679 static SBA_INLINE void
680 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
682 unsigned long iovp = SBA_IOVP(ioc, iova);
683 unsigned int pide = PDIR_INDEX(iovp);
684 unsigned int ridx = pide >> 3; /* convert bit to byte address */
685 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
686 int bits_not_wanted = size >> iovp_shift;
689 /* Round up to power-of-two size: see AR2305 note above */
690 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << PAGE_SHIFT);
691 for (; bits_not_wanted > 0 ; res_ptr++) {
693 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
695 /* these mappings start 64bit aligned */
697 bits_not_wanted -= BITS_PER_LONG;
698 pide += BITS_PER_LONG;
702 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
703 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
706 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size,
707 bits_not_wanted, m, pide, res_ptr, *res_ptr);
710 ASSERT(bits_not_wanted);
711 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
718 /**************************************************************
720 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
722 ***************************************************************/
725 * sba_io_pdir_entry - fill in one IO PDIR entry
726 * @pdir_ptr: pointer to IO PDIR entry
727 * @vba: Virtual CPU address of buffer to map
729 * SBA Mapping Routine
731 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
732 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
733 * Each IO Pdir entry consists of 8 bytes as shown below
737 * +-+---------------------+----------------------------------+----+--------+
738 * |V| U | PPN[39:12] | U | FF |
739 * +-+---------------------+----------------------------------+----+--------+
743 * PPN == Physical Page Number
745 * The physical address fields are filled with the results of virt_to_phys()
750 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
751 | 0x8000000000000000ULL)
754 sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
756 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
760 #ifdef ENABLE_MARK_CLEAN
762 * Since DMA is i-cache coherent, any (complete) pages that were written via
763 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
764 * flush them when they get mapped into an executable vm-area.
767 mark_clean (void *addr, size_t size)
769 unsigned long pg_addr, end;
771 pg_addr = PAGE_ALIGN((unsigned long) addr);
772 end = (unsigned long) addr + size;
773 while (pg_addr + PAGE_SIZE <= end) {
774 struct page *page = virt_to_page((void *)pg_addr);
775 set_bit(PG_arch_1, &page->flags);
776 pg_addr += PAGE_SIZE;
782 * sba_mark_invalid - invalidate one or more IO PDIR entries
783 * @ioc: IO MMU structure which owns the pdir we are interested in.
784 * @iova: IO Virtual Address mapped earlier
785 * @byte_cnt: number of bytes this mapping covers.
787 * Marking the IO PDIR entry(ies) as Invalid and invalidate
788 * corresponding IO TLB entry. The PCOM (Purge Command Register)
789 * is to purge stale entries in the IO TLB when unmapping entries.
791 * The PCOM register supports purging of multiple pages, with a minium
792 * of 1 page and a maximum of 2GB. Hardware requires the address be
793 * aligned to the size of the range being purged. The size of the range
794 * must be a power of 2. The "Cool perf optimization" in the
795 * allocation routine helps keep that true.
797 static SBA_INLINE void
798 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
800 u32 iovp = (u32) SBA_IOVP(ioc,iova);
802 int off = PDIR_INDEX(iovp);
804 /* Must be non-zero and rounded up */
805 ASSERT(byte_cnt > 0);
806 ASSERT(0 == (byte_cnt & ~iovp_mask));
808 #ifdef ASSERT_PDIR_SANITY
809 /* Assert first pdir entry is set */
810 if (!(ioc->pdir_base[off] >> 60)) {
811 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
815 if (byte_cnt <= iovp_size)
817 ASSERT(off < ioc->pdir_size);
819 iovp |= iovp_shift; /* set "size" field for PCOM */
821 #ifndef FULL_VALID_PDIR
823 ** clear I/O PDIR entry "valid" bit
824 ** Do NOT clear the rest - save it for debugging.
825 ** We should only clear bits that have previously
828 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
831 ** If we want to maintain the PDIR as valid, put in
832 ** the spill page so devices prefetching won't
833 ** cause a hard fail.
835 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
838 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
841 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
844 /* verify this pdir entry is enabled */
845 ASSERT(ioc->pdir_base[off] >> 63);
846 #ifndef FULL_VALID_PDIR
847 /* clear I/O Pdir entry "valid" bit first */
848 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
850 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
853 byte_cnt -= iovp_size;
854 } while (byte_cnt > 0);
857 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
861 * sba_map_single - map one buffer and return IOVA for DMA
862 * @dev: instance of PCI owned by the driver that's asking.
863 * @addr: driver buffer to map.
864 * @size: number of bytes to map in driver buffer.
867 * See Documentation/DMA-mapping.txt
870 sba_map_single(struct device *dev, void *addr, size_t size, int dir)
877 #ifdef ASSERT_PDIR_SANITY
880 #ifdef ALLOW_IOV_BYPASS
881 unsigned long pci_addr = virt_to_phys(addr);
884 #ifdef ALLOW_IOV_BYPASS
885 ASSERT(to_pci_dev(dev)->dma_mask);
887 ** Check if the PCI device can DMA to ptr... if so, just return ptr
889 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
891 ** Device is bit capable of DMA'ing to the buffer...
892 ** just return the PCI address of ptr
894 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
895 to_pci_dev(dev)->dma_mask, pci_addr);
902 prefetch(ioc->res_hint);
905 ASSERT(size <= DMA_CHUNK_SIZE);
907 /* save offset bits */
908 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
910 /* round up to nearest iovp_size */
911 size = (size + offset + ~iovp_mask) & iovp_mask;
913 #ifdef ASSERT_PDIR_SANITY
914 spin_lock_irqsave(&ioc->res_lock, flags);
915 if (sba_check_pdir(ioc,"Check before sba_map_single()"))
916 panic("Sanity check failed");
917 spin_unlock_irqrestore(&ioc->res_lock, flags);
920 pide = sba_alloc_range(ioc, size);
922 iovp = (dma_addr_t) pide << iovp_shift;
924 DBG_RUN("%s() 0x%p -> 0x%lx\n",
925 __FUNCTION__, addr, (long) iovp | offset);
927 pdir_start = &(ioc->pdir_base[pide]);
930 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
931 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
933 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
939 /* force pdir update */
942 /* form complete address */
943 #ifdef ASSERT_PDIR_SANITY
944 spin_lock_irqsave(&ioc->res_lock, flags);
945 sba_check_pdir(ioc,"Check after sba_map_single()");
946 spin_unlock_irqrestore(&ioc->res_lock, flags);
948 return SBA_IOVA(ioc, iovp, offset);
952 * sba_unmap_single - unmap one IOVA and free resources
953 * @dev: instance of PCI owned by the driver that's asking.
954 * @iova: IOVA of driver buffer previously mapped.
955 * @size: number of bytes mapped in driver buffer.
958 * See Documentation/DMA-mapping.txt
960 void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
963 #if DELAYED_RESOURCE_CNT > 0
964 struct sba_dma_pair *d;
972 #ifdef ALLOW_IOV_BYPASS
973 if (likely((iova & ioc->imask) != ioc->ibase)) {
975 ** Address does not fall w/in IOVA, must be bypassing
977 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
979 #ifdef ENABLE_MARK_CLEAN
980 if (dir == DMA_FROM_DEVICE) {
981 mark_clean(phys_to_virt(iova), size);
987 offset = iova & ~iovp_mask;
989 DBG_RUN("%s() iovp 0x%lx/%x\n",
990 __FUNCTION__, (long) iova, size);
992 iova ^= offset; /* clear offset bits */
994 size = ROUNDUP(size, iovp_size);
997 #if DELAYED_RESOURCE_CNT > 0
998 spin_lock_irqsave(&ioc->saved_lock, flags);
999 d = &(ioc->saved[ioc->saved_cnt]);
1002 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1003 int cnt = ioc->saved_cnt;
1004 spin_lock(&ioc->res_lock);
1006 sba_mark_invalid(ioc, d->iova, d->size);
1007 sba_free_range(ioc, d->iova, d->size);
1011 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1012 spin_unlock(&ioc->res_lock);
1014 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1015 #else /* DELAYED_RESOURCE_CNT == 0 */
1016 spin_lock_irqsave(&ioc->res_lock, flags);
1017 sba_mark_invalid(ioc, iova, size);
1018 sba_free_range(ioc, iova, size);
1019 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1020 spin_unlock_irqrestore(&ioc->res_lock, flags);
1021 #endif /* DELAYED_RESOURCE_CNT == 0 */
1022 #ifdef ENABLE_MARK_CLEAN
1023 if (dir == DMA_FROM_DEVICE) {
1024 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1025 int off = PDIR_INDEX(iovp);
1028 if (size <= iovp_size) {
1029 addr = phys_to_virt(ioc->pdir_base[off] &
1030 ~0xE000000000000FFFULL);
1031 mark_clean(addr, size);
1033 size_t byte_cnt = size;
1036 addr = phys_to_virt(ioc->pdir_base[off] &
1037 ~0xE000000000000FFFULL);
1038 mark_clean(addr, min(byte_cnt, iovp_size));
1040 byte_cnt -= iovp_size;
1042 } while (byte_cnt > 0);
1050 * sba_alloc_coherent - allocate/map shared mem for DMA
1051 * @dev: instance of PCI owned by the driver that's asking.
1052 * @size: number of bytes mapped in driver buffer.
1053 * @dma_handle: IOVA of new buffer.
1055 * See Documentation/DMA-mapping.txt
1058 sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
1069 page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
1070 numa_node_id() : ioc->node, flags,
1073 if (unlikely(!page))
1076 addr = page_address(page);
1079 addr = (void *) __get_free_pages(flags, get_order(size));
1081 if (unlikely(!addr))
1084 memset(addr, 0, size);
1085 *dma_handle = virt_to_phys(addr);
1087 #ifdef ALLOW_IOV_BYPASS
1088 ASSERT(dev->coherent_dma_mask);
1090 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1092 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1093 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1094 dev->coherent_dma_mask, *dma_handle);
1101 * If device can't bypass or bypass is disabled, pass the 32bit fake
1102 * device to map single to get an iova mapping.
1104 *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
1111 * sba_free_coherent - free/unmap shared mem for DMA
1112 * @dev: instance of PCI owned by the driver that's asking.
1113 * @size: number of bytes mapped in driver buffer.
1114 * @vaddr: virtual address IOVA of "consistent" buffer.
1115 * @dma_handler: IO virtual address of "consistent" buffer.
1117 * See Documentation/DMA-mapping.txt
1119 void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
1121 sba_unmap_single(dev, dma_handle, size, 0);
1122 free_pages((unsigned long) vaddr, get_order(size));
1127 ** Since 0 is a valid pdir_base index value, can't use that
1128 ** to determine if a value is valid or not. Use a flag to indicate
1129 ** the SG list entry contains a valid pdir index.
1131 #define PIDE_FLAG 0x1UL
1133 #ifdef DEBUG_LARGE_SG_ENTRIES
1134 int dump_run_sg = 0;
1139 * sba_fill_pdir - write allocated SG entries into IO PDIR
1140 * @ioc: IO MMU structure which owns the pdir we are interested in.
1141 * @startsg: list of IOVA/size pairs
1142 * @nents: number of entries in startsg list
1144 * Take preprocessed SG list and write corresponding entries
1148 static SBA_INLINE int
1151 struct scatterlist *startsg,
1154 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1157 unsigned long dma_offset = 0;
1160 while (nents-- > 0) {
1161 int cnt = startsg->dma_length;
1162 startsg->dma_length = 0;
1164 #ifdef DEBUG_LARGE_SG_ENTRIES
1166 printk(" %2d : %08lx/%05x %p\n",
1167 nents, startsg->dma_address, cnt,
1168 sba_sg_address(startsg));
1170 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1171 nents, startsg->dma_address, cnt,
1172 sba_sg_address(startsg));
1175 ** Look for the start of a new DMA stream
1177 if (startsg->dma_address & PIDE_FLAG) {
1178 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1179 dma_offset = (unsigned long) pide & ~iovp_mask;
1180 startsg->dma_address = 0;
1182 dma_sg->dma_address = pide | ioc->ibase;
1183 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1188 ** Look for a VCONTIG chunk
1191 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1194 /* Since multiple Vcontig blocks could make up
1195 ** one DMA stream, *add* cnt to dma_len.
1197 dma_sg->dma_length += cnt;
1199 dma_offset=0; /* only want offset on first chunk */
1200 cnt = ROUNDUP(cnt, iovp_size);
1202 sba_io_pdir_entry(pdirp, vaddr);
1210 /* force pdir update */
1213 #ifdef DEBUG_LARGE_SG_ENTRIES
1221 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1222 ** "start of next" are both on an IOV page boundary.
1224 ** (shift left is a quick trick to mask off upper bits)
1226 #define DMA_CONTIG(__X, __Y) \
1227 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1231 * sba_coalesce_chunks - preprocess the SG list
1232 * @ioc: IO MMU structure which owns the pdir we are interested in.
1233 * @startsg: list of IOVA/size pairs
1234 * @nents: number of entries in startsg list
1236 * First pass is to walk the SG list and determine where the breaks are
1237 * in the DMA stream. Allocates PDIR entries but does not fill them.
1238 * Returns the number of DMA chunks.
1240 * Doing the fill separate from the coalescing/allocation keeps the
1241 * code simpler. Future enhancement could make one pass through
1242 * the sglist do both.
1244 static SBA_INLINE int
1245 sba_coalesce_chunks( struct ioc *ioc,
1246 struct scatterlist *startsg,
1249 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1250 unsigned long vcontig_len; /* len of VCONTIG chunk */
1251 unsigned long vcontig_end;
1252 struct scatterlist *dma_sg; /* next DMA stream head */
1253 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1257 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1260 ** Prepare for first/next DMA stream
1262 dma_sg = vcontig_sg = startsg;
1263 dma_len = vcontig_len = vcontig_end = startsg->length;
1264 vcontig_end += vaddr;
1265 dma_offset = vaddr & ~iovp_mask;
1267 /* PARANOID: clear entries */
1268 startsg->dma_address = startsg->dma_length = 0;
1271 ** This loop terminates one iteration "early" since
1272 ** it's always looking one "ahead".
1274 while (--nents > 0) {
1275 unsigned long vaddr; /* tmp */
1280 startsg->dma_address = startsg->dma_length = 0;
1282 /* catch brokenness in SCSI layer */
1283 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1286 ** First make sure current dma stream won't
1287 ** exceed DMA_CHUNK_SIZE if we coalesce the
1290 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1295 ** Then look for virtually contiguous blocks.
1297 ** append the next transaction?
1299 vaddr = (unsigned long) sba_sg_address(startsg);
1300 if (vcontig_end == vaddr)
1302 vcontig_len += startsg->length;
1303 vcontig_end += startsg->length;
1304 dma_len += startsg->length;
1308 #ifdef DEBUG_LARGE_SG_ENTRIES
1309 dump_run_sg = (vcontig_len > iovp_size);
1313 ** Not virtually contigous.
1314 ** Terminate prev chunk.
1315 ** Start a new chunk.
1317 ** Once we start a new VCONTIG chunk, dma_offset
1318 ** can't change. And we need the offset from the first
1319 ** chunk - not the last one. Ergo Successive chunks
1320 ** must start on page boundaries and dove tail
1321 ** with it's predecessor.
1323 vcontig_sg->dma_length = vcontig_len;
1325 vcontig_sg = startsg;
1326 vcontig_len = startsg->length;
1329 ** 3) do the entries end/start on page boundaries?
1330 ** Don't update vcontig_end until we've checked.
1332 if (DMA_CONTIG(vcontig_end, vaddr))
1334 vcontig_end = vcontig_len + vaddr;
1335 dma_len += vcontig_len;
1343 ** End of DMA Stream
1344 ** Terminate last VCONTIG block.
1345 ** Allocate space for DMA stream.
1347 vcontig_sg->dma_length = vcontig_len;
1348 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1349 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1350 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
1351 | (sba_alloc_range(ioc, dma_len) << iovp_shift)
1361 * sba_map_sg - map Scatter/Gather list
1362 * @dev: instance of PCI owned by the driver that's asking.
1363 * @sglist: array of buffer/length pairs
1364 * @nents: number of entries in list
1365 * @dir: R/W or both.
1367 * See Documentation/DMA-mapping.txt
1369 int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
1372 int coalesced, filled = 0;
1373 #ifdef ASSERT_PDIR_SANITY
1374 unsigned long flags;
1376 #ifdef ALLOW_IOV_BYPASS_SG
1377 struct scatterlist *sg;
1380 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
1384 #ifdef ALLOW_IOV_BYPASS_SG
1385 ASSERT(to_pci_dev(dev)->dma_mask);
1386 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1387 for (sg = sglist ; filled < nents ; filled++, sg++){
1388 sg->dma_length = sg->length;
1389 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1394 /* Fast path single entry scatterlists. */
1396 sglist->dma_length = sglist->length;
1397 sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
1401 #ifdef ASSERT_PDIR_SANITY
1402 spin_lock_irqsave(&ioc->res_lock, flags);
1403 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
1405 sba_dump_sg(ioc, sglist, nents);
1406 panic("Check before sba_map_sg()");
1408 spin_unlock_irqrestore(&ioc->res_lock, flags);
1411 prefetch(ioc->res_hint);
1414 ** First coalesce the chunks and allocate I/O pdir space
1416 ** If this is one DMA stream, we can properly map using the
1417 ** correct virtual address associated with each DMA page.
1418 ** w/o this association, we wouldn't have coherent DMA!
1419 ** Access to the virtual address is what forces a two pass algorithm.
1421 coalesced = sba_coalesce_chunks(ioc, sglist, nents);
1424 ** Program the I/O Pdir
1426 ** map the virtual addresses to the I/O Pdir
1427 ** o dma_address will contain the pdir index
1428 ** o dma_len will contain the number of bytes to map
1429 ** o address contains the virtual address.
1431 filled = sba_fill_pdir(ioc, sglist, nents);
1433 #ifdef ASSERT_PDIR_SANITY
1434 spin_lock_irqsave(&ioc->res_lock, flags);
1435 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1437 sba_dump_sg(ioc, sglist, nents);
1438 panic("Check after sba_map_sg()\n");
1440 spin_unlock_irqrestore(&ioc->res_lock, flags);
1443 ASSERT(coalesced == filled);
1444 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
1451 * sba_unmap_sg - unmap Scatter/Gather list
1452 * @dev: instance of PCI owned by the driver that's asking.
1453 * @sglist: array of buffer/length pairs
1454 * @nents: number of entries in list
1455 * @dir: R/W or both.
1457 * See Documentation/DMA-mapping.txt
1459 void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
1461 #ifdef ASSERT_PDIR_SANITY
1463 unsigned long flags;
1466 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1467 __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
1469 #ifdef ASSERT_PDIR_SANITY
1473 spin_lock_irqsave(&ioc->res_lock, flags);
1474 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1475 spin_unlock_irqrestore(&ioc->res_lock, flags);
1478 while (nents && sglist->dma_length) {
1480 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
1485 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1487 #ifdef ASSERT_PDIR_SANITY
1488 spin_lock_irqsave(&ioc->res_lock, flags);
1489 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1490 spin_unlock_irqrestore(&ioc->res_lock, flags);
1495 /**************************************************************
1497 * Initialization and claim
1499 ***************************************************************/
1502 ioc_iova_init(struct ioc *ioc)
1506 struct pci_dev *device = NULL;
1507 #ifdef FULL_VALID_PDIR
1508 unsigned long index;
1512 ** Firmware programs the base and size of a "safe IOVA space"
1513 ** (one that doesn't overlap memory or LMMIO space) in the
1514 ** IBASE and IMASK registers.
1516 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1517 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1519 ioc->iov_size = ~ioc->imask + 1;
1521 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1522 __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1523 ioc->iov_size >> 20);
1525 switch (iovp_size) {
1526 case 4*1024: tcnfg = 0; break;
1527 case 8*1024: tcnfg = 1; break;
1528 case 16*1024: tcnfg = 2; break;
1529 case 64*1024: tcnfg = 3; break;
1531 panic(PFX "Unsupported IOTLB page size %ldK",
1535 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1537 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1538 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1539 get_order(ioc->pdir_size));
1540 if (!ioc->pdir_base)
1541 panic(PFX "Couldn't allocate I/O Page Table\n");
1543 memset(ioc->pdir_base, 0, ioc->pdir_size);
1545 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
1546 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1548 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1549 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1552 ** If an AGP device is present, only use half of the IOV space
1553 ** for PCI DMA. Unfortunately we can't know ahead of time
1554 ** whether GART support will actually be used, for now we
1555 ** can just key on an AGP device found in the system.
1556 ** We program the next pdir index after we stop w/ a key for
1557 ** the GART code to handshake on.
1559 while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL)
1560 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1562 if (agp_found && reserve_sba_gart) {
1563 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1564 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1565 ioc->pdir_size /= 2;
1566 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1568 #ifdef FULL_VALID_PDIR
1570 ** Check to see if the spill page has been allocated, we don't need more than
1571 ** one across multiple SBAs.
1573 if (!prefetch_spill_page) {
1574 char *spill_poison = "SBAIOMMU POISON";
1575 int poison_size = 16;
1576 void *poison_addr, *addr;
1578 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1580 panic(PFX "Couldn't allocate PDIR spill page\n");
1583 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1584 memcpy(poison_addr, spill_poison, poison_size);
1586 prefetch_spill_page = virt_to_phys(addr);
1588 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
1591 ** Set all the PDIR entries valid w/ the spill page as the target
1593 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1594 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1597 /* Clear I/O TLB of any possible entries */
1598 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1599 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1601 /* Enable IOVA translation */
1602 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1603 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1607 ioc_resource_init(struct ioc *ioc)
1609 spin_lock_init(&ioc->res_lock);
1610 #if DELAYED_RESOURCE_CNT > 0
1611 spin_lock_init(&ioc->saved_lock);
1614 /* resource map size dictated by pdir_size */
1615 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1616 ioc->res_size >>= 3; /* convert bit count to byte count */
1617 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
1619 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1620 get_order(ioc->res_size));
1622 panic(PFX "Couldn't allocate resource map\n");
1624 memset(ioc->res_map, 0, ioc->res_size);
1625 /* next available IOVP - circular search */
1626 ioc->res_hint = (unsigned long *) ioc->res_map;
1628 #ifdef ASSERT_PDIR_SANITY
1629 /* Mark first bit busy - ie no IOVA 0 */
1630 ioc->res_map[0] = 0x1;
1631 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1633 #ifdef FULL_VALID_PDIR
1634 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1635 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1636 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1637 | prefetch_spill_page);
1640 DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
1641 ioc->res_size, (void *) ioc->res_map);
1645 ioc_sac_init(struct ioc *ioc)
1647 struct pci_dev *sac = NULL;
1648 struct pci_controller *controller = NULL;
1651 * pci_alloc_coherent() must return a DMA address which is
1652 * SAC (single address cycle) addressable, so allocate a
1653 * pseudo-device to enforce that.
1655 sac = kmalloc(sizeof(*sac), GFP_KERNEL);
1657 panic(PFX "Couldn't allocate struct pci_dev");
1658 memset(sac, 0, sizeof(*sac));
1660 controller = kmalloc(sizeof(*controller), GFP_KERNEL);
1662 panic(PFX "Couldn't allocate struct pci_controller");
1663 memset(controller, 0, sizeof(*controller));
1665 controller->iommu = ioc;
1666 sac->sysdata = controller;
1667 sac->dma_mask = 0xFFFFFFFFUL;
1669 sac->dev.bus = &pci_bus_type;
1671 ioc->sac_only_dev = sac;
1675 ioc_zx1_init(struct ioc *ioc)
1677 unsigned long rope_config;
1680 if (ioc->rev < 0x20)
1681 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1683 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1684 ioc->dma_mask = (0x1UL << 39) - 1;
1687 ** Clear ROPE(N)_CONFIG AO bit.
1688 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1689 ** Overrides bit 1 in DMA Hint Sets.
1690 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1692 for (i=0; i<(8*8); i+=8) {
1693 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1694 rope_config &= ~IOC_ROPE_AO;
1695 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1699 typedef void (initfunc)(struct ioc *);
1707 static struct ioc_iommu ioc_iommu_info[] __initdata = {
1708 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1709 { SX1000_IOC_ID, "sx1000", NULL },
1712 static struct ioc * __init
1713 ioc_init(u64 hpa, void *handle)
1716 struct ioc_iommu *info;
1718 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
1722 memset(ioc, 0, sizeof(*ioc));
1724 ioc->next = ioc_list;
1727 ioc->handle = handle;
1728 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1730 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1731 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1732 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1734 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1735 if (ioc->func_id == info->func_id) {
1736 ioc->name = info->name;
1742 iovp_size = (1 << iovp_shift);
1743 iovp_mask = ~(iovp_size - 1);
1745 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__,
1746 PAGE_SIZE >> 10, iovp_size >> 10);
1749 ioc->name = kmalloc(24, GFP_KERNEL);
1751 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1752 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1754 ioc->name = "Unknown";
1758 ioc_resource_init(ioc);
1761 if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1762 ia64_max_iommu_merge_mask = ~iovp_mask;
1764 printk(KERN_INFO PFX
1765 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1766 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1767 hpa, ioc->iov_size >> 20, ioc->ibase);
1774 /**************************************************************************
1776 ** SBA initialization code (HW and SW)
1778 ** o identify SBA chip itself
1779 ** o FIXME: initialize DMA hints for reasonable defaults
1781 **************************************************************************/
1783 #ifdef CONFIG_PROC_FS
1785 ioc_start(struct seq_file *s, loff_t *pos)
1790 for (ioc = ioc_list; ioc; ioc = ioc->next)
1798 ioc_next(struct seq_file *s, void *v, loff_t *pos)
1800 struct ioc *ioc = v;
1807 ioc_stop(struct seq_file *s, void *v)
1812 ioc_show(struct seq_file *s, void *v)
1814 struct ioc *ioc = v;
1815 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1818 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1819 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1821 if (ioc->node != MAX_NUMNODES)
1822 seq_printf(s, "NUMA node : %d\n", ioc->node);
1824 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1825 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1827 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1828 used += hweight64(*res_ptr);
1830 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1831 seq_printf(s, "PDIR used : %d entries\n", used);
1833 #ifdef PDIR_SEARCH_TIMING
1835 unsigned long i = 0, avg = 0, min, max;
1836 min = max = ioc->avg_search[0];
1837 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1838 avg += ioc->avg_search[i];
1839 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1840 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1842 avg /= SBA_SEARCH_SAMPLE;
1843 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1847 #ifndef ALLOW_IOV_BYPASS
1848 seq_printf(s, "IOVA bypass disabled\n");
1853 static struct seq_operations ioc_seq_ops = {
1861 ioc_open(struct inode *inode, struct file *file)
1863 return seq_open(file, &ioc_seq_ops);
1866 static struct file_operations ioc_fops = {
1869 .llseek = seq_lseek,
1870 .release = seq_release
1876 struct proc_dir_entry *dir, *entry;
1878 dir = proc_mkdir("bus/mckinley", NULL);
1882 entry = create_proc_entry(ioc_list->name, 0, dir);
1884 entry->proc_fops = &ioc_fops;
1889 sba_connect_bus(struct pci_bus *bus)
1891 acpi_handle handle, parent;
1895 if (!PCI_CONTROLLER(bus))
1896 panic(PFX "no sysdata on bus %d!\n", bus->number);
1898 if (PCI_CONTROLLER(bus)->iommu)
1901 handle = PCI_CONTROLLER(bus)->acpi_handle;
1906 * The IOC scope encloses PCI root bridges in the ACPI
1907 * namespace, so work our way out until we find an IOC we
1908 * claimed previously.
1911 for (ioc = ioc_list; ioc; ioc = ioc->next)
1912 if (ioc->handle == handle) {
1913 PCI_CONTROLLER(bus)->iommu = ioc;
1917 status = acpi_get_parent(handle, &parent);
1919 } while (ACPI_SUCCESS(status));
1921 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1926 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1928 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
1929 union acpi_object *obj;
1930 acpi_handle phandle;
1933 ioc->node = MAX_NUMNODES;
1936 * Check for a _PXM on this node first. We don't typically see
1937 * one here, so we'll end up getting it from the parent.
1939 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) {
1940 if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
1943 /* Reset the acpi buffer */
1944 buffer.length = ACPI_ALLOCATE_BUFFER;
1945 buffer.pointer = NULL;
1947 if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL,
1952 if (!buffer.length || !buffer.pointer)
1955 obj = buffer.pointer;
1957 if (obj->type != ACPI_TYPE_INTEGER ||
1958 obj->integer.value >= MAX_PXM_DOMAINS) {
1959 acpi_os_free(buffer.pointer);
1963 node = pxm_to_nid_map[obj->integer.value];
1964 acpi_os_free(buffer.pointer);
1966 if (node >= MAX_NUMNODES || !node_online(node))
1973 #define sba_map_ioc_to_node(ioc, handle)
1977 acpi_sba_ioc_add(struct acpi_device *device)
1982 struct acpi_buffer buffer;
1983 struct acpi_device_info *dev_info;
1985 status = hp_acpi_csr_space(device->handle, &hpa, &length);
1986 if (ACPI_FAILURE(status))
1989 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
1990 status = acpi_get_object_info(device->handle, &buffer);
1991 if (ACPI_FAILURE(status))
1993 dev_info = buffer.pointer;
1996 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
1997 * root bridges, and its CSR space includes the IOC function.
1999 if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
2000 hpa += ZX1_IOC_OFFSET;
2001 /* zx1 based systems default to kernel page size iommu pages */
2003 iovp_shift = min(PAGE_SHIFT, 16);
2005 ACPI_MEM_FREE(dev_info);
2008 * default anything not caught above or specified on cmdline to 4k
2014 ioc = ioc_init(hpa, device->handle);
2018 /* setup NUMA node association */
2019 sba_map_ioc_to_node(ioc, device->handle);
2023 static struct acpi_driver acpi_sba_ioc_driver = {
2024 .name = "IOC IOMMU Driver",
2025 .ids = "HWP0001,HWP0004",
2027 .add = acpi_sba_ioc_add,
2034 acpi_bus_register_driver(&acpi_sba_ioc_driver);
2040 struct pci_bus *b = NULL;
2041 while ((b = pci_find_next_bus(b)) != NULL)
2046 #ifdef CONFIG_PROC_FS
2052 subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2054 extern void dig_setup(char**);
2056 * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good,
2057 * so we use the platform_setup hook to fix it up.
2060 sba_setup(char **cmdline_p)
2062 MAX_DMA_ADDRESS = ~0UL;
2063 dig_setup(cmdline_p);
2067 nosbagart(char *str)
2069 reserve_sba_gart = 0;
2074 sba_dma_supported (struct device *dev, u64 mask)
2076 /* make sure it's at least 32bit capable */
2077 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2081 sba_dma_mapping_error (dma_addr_t dma_addr)
2086 __setup("nosbagart", nosbagart);
2089 sba_page_override(char *str)
2091 unsigned long page_size;
2093 page_size = memparse(str, &str);
2094 switch (page_size) {
2099 iovp_shift = ffs(page_size) - 1;
2102 printk("%s: unknown/unsupported iommu page size %ld\n",
2103 __FUNCTION__, page_size);
2109 __setup("sbapagesize=",sba_page_override);
2111 EXPORT_SYMBOL(sba_dma_mapping_error);
2112 EXPORT_SYMBOL(sba_map_single);
2113 EXPORT_SYMBOL(sba_unmap_single);
2114 EXPORT_SYMBOL(sba_map_sg);
2115 EXPORT_SYMBOL(sba_unmap_sg);
2116 EXPORT_SYMBOL(sba_dma_supported);
2117 EXPORT_SYMBOL(sba_alloc_coherent);
2118 EXPORT_SYMBOL(sba_free_coherent);