1 /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
14 #include "iommu_common.h"
16 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
19 /* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
23 #define pci_iommu_read(__reg) \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
31 #define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
37 /* Must be invoked under the IOMMU lock. */
38 static void __iommu_flushall(struct pci_iommu *iommu)
43 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
44 for (entry = 0; entry < 16; entry++) {
45 pci_iommu_write(tag, 0);
49 /* Ensure completion of previous PIO writes. */
50 (void) pci_iommu_read(iommu->write_complete_reg);
52 /* Now update everyone's flush point. */
53 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
54 iommu->alloc_info[entry].flush =
55 iommu->alloc_info[entry].next;
59 static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
61 iopte_t *iopte, *limit, *first;
62 unsigned long cnum, ent, flush_point;
65 while ((1UL << cnum) < npages)
67 iopte = (iommu->page_table +
68 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
71 limit = (iommu->page_table +
72 iommu->lowest_consistent_map);
75 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
77 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
78 flush_point = iommu->alloc_info[cnum].flush;
82 if (iopte_val(*iopte) == 0UL) {
83 if ((iopte + (1 << cnum)) >= limit)
87 iommu->alloc_info[cnum].next = ent;
88 if (ent == flush_point)
89 __iommu_flushall(iommu);
95 iopte = (iommu->page_table +
97 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
100 if (ent == flush_point)
101 __iommu_flushall(iommu);
106 /* I've got your streaming cluster right here buddy boy... */
110 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
115 static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
116 unsigned long npages, unsigned long ctx)
118 unsigned long cnum, ent;
121 while ((1UL << cnum) < npages)
124 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
125 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
127 /* If the global flush might not have caught this entry,
128 * adjust the flush point such that we will flush before
129 * ever trying to reuse it.
131 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
132 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
133 iommu->alloc_info[cnum].flush = ent;
137 /* We allocate consistent mappings from the end of cluster zero. */
138 static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
142 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
143 while (iopte > iommu->page_table) {
145 if (!(iopte_val(*iopte) & IOPTE_VALID)) {
146 unsigned long tmp = npages;
150 if (iopte_val(*iopte) & IOPTE_VALID)
154 u32 entry = (iopte - iommu->page_table);
156 if (entry < iommu->lowest_consistent_map)
157 iommu->lowest_consistent_map = entry;
165 #define IOPTE_CONSISTENT(CTX) \
166 (IOPTE_VALID | IOPTE_CACHE | \
167 (((CTX) << 47) & IOPTE_CONTEXT))
169 #define IOPTE_STREAMING(CTX) \
170 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
172 #define IOPTE_INVALID 0UL
174 /* Allocate and map kernel buffer of size SIZE using consistent mode
175 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
176 * successful and set *DMA_ADDRP to the PCI side dma address.
178 void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
180 struct pcidev_cookie *pcp;
181 struct pci_iommu *iommu;
183 unsigned long flags, order, first_page, ctx;
187 size = IO_PAGE_ALIGN(size);
188 order = get_order(size);
192 first_page = __get_free_pages(GFP_ATOMIC, order);
193 if (first_page == 0UL)
195 memset((char *)first_page, 0, PAGE_SIZE << order);
198 iommu = pcp->pbm->iommu;
200 spin_lock_irqsave(&iommu->lock, flags);
201 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
203 spin_unlock_irqrestore(&iommu->lock, flags);
204 free_pages(first_page, order);
208 *dma_addrp = (iommu->page_table_map_base +
209 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
210 ret = (void *) first_page;
211 npages = size >> IO_PAGE_SHIFT;
213 if (iommu->iommu_ctxflush)
214 ctx = iommu->iommu_cur_ctx++;
215 first_page = __pa(first_page);
217 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
219 (first_page & IOPTE_PAGE));
221 first_page += IO_PAGE_SIZE;
226 u32 daddr = *dma_addrp;
228 npages = size >> IO_PAGE_SHIFT;
229 for (i = 0; i < npages; i++) {
230 pci_iommu_write(iommu->iommu_flush, daddr);
231 daddr += IO_PAGE_SIZE;
235 spin_unlock_irqrestore(&iommu->lock, flags);
240 /* Free and unmap a consistent DMA translation. */
241 void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
243 struct pcidev_cookie *pcp;
244 struct pci_iommu *iommu;
246 unsigned long flags, order, npages, i, ctx;
248 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
250 iommu = pcp->pbm->iommu;
251 iopte = iommu->page_table +
252 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
254 spin_lock_irqsave(&iommu->lock, flags);
256 if ((iopte - iommu->page_table) ==
257 iommu->lowest_consistent_map) {
258 iopte_t *walk = iopte + npages;
261 limit = (iommu->page_table +
262 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
263 while (walk < limit) {
264 if (iopte_val(*walk) != IOPTE_INVALID)
268 iommu->lowest_consistent_map =
269 (walk - iommu->page_table);
272 /* Data for consistent mappings cannot enter the streaming
273 * buffers, so we only need to update the TSB. We flush
274 * the IOMMU here as well to prevent conflicts with the
275 * streaming mapping deferred tlb flush scheme.
279 if (iommu->iommu_ctxflush)
280 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
282 for (i = 0; i < npages; i++, iopte++)
283 iopte_val(*iopte) = IOPTE_INVALID;
285 if (iommu->iommu_ctxflush) {
286 pci_iommu_write(iommu->iommu_ctxflush, ctx);
288 for (i = 0; i < npages; i++) {
289 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
291 pci_iommu_write(iommu->iommu_flush, daddr);
295 spin_unlock_irqrestore(&iommu->lock, flags);
297 order = get_order(size);
299 free_pages((unsigned long)cpu, order);
302 /* Map a single buffer at PTR of SZ bytes for PCI DMA
305 dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
307 struct pcidev_cookie *pcp;
308 struct pci_iommu *iommu;
309 struct pci_strbuf *strbuf;
311 unsigned long flags, npages, oaddr;
312 unsigned long i, base_paddr, ctx;
314 unsigned long iopte_protection;
317 iommu = pcp->pbm->iommu;
318 strbuf = &pcp->pbm->stc;
320 if (direction == PCI_DMA_NONE)
323 oaddr = (unsigned long)ptr;
324 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
325 npages >>= IO_PAGE_SHIFT;
327 spin_lock_irqsave(&iommu->lock, flags);
329 base = alloc_streaming_cluster(iommu, npages);
332 bus_addr = (iommu->page_table_map_base +
333 ((base - iommu->page_table) << IO_PAGE_SHIFT));
334 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
335 base_paddr = __pa(oaddr & IO_PAGE_MASK);
337 if (iommu->iommu_ctxflush)
338 ctx = iommu->iommu_cur_ctx++;
339 if (strbuf->strbuf_enabled)
340 iopte_protection = IOPTE_STREAMING(ctx);
342 iopte_protection = IOPTE_CONSISTENT(ctx);
343 if (direction != PCI_DMA_TODEVICE)
344 iopte_protection |= IOPTE_WRITE;
346 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
347 iopte_val(*base) = iopte_protection | base_paddr;
349 spin_unlock_irqrestore(&iommu->lock, flags);
354 spin_unlock_irqrestore(&iommu->lock, flags);
355 return PCI_DMA_ERROR_CODE;
358 /* Unmap a single streaming mode DMA translation. */
359 void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
361 struct pcidev_cookie *pcp;
362 struct pci_iommu *iommu;
363 struct pci_strbuf *strbuf;
365 unsigned long flags, npages, i, ctx;
367 if (direction == PCI_DMA_NONE)
371 iommu = pcp->pbm->iommu;
372 strbuf = &pcp->pbm->stc;
374 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
375 npages >>= IO_PAGE_SHIFT;
376 base = iommu->page_table +
377 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
378 #ifdef DEBUG_PCI_IOMMU
379 if (iopte_val(*base) == IOPTE_INVALID)
380 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
381 bus_addr, sz, __builtin_return_address(0));
383 bus_addr &= IO_PAGE_MASK;
385 spin_lock_irqsave(&iommu->lock, flags);
387 /* Record the context, if any. */
389 if (iommu->iommu_ctxflush)
390 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
392 /* Step 1: Kick data out of streaming buffers if necessary. */
393 if (strbuf->strbuf_enabled) {
394 u32 vaddr = bus_addr;
396 PCI_STC_FLUSHFLAG_INIT(strbuf);
397 if (strbuf->strbuf_ctxflush &&
398 iommu->iommu_ctxflush) {
399 unsigned long matchreg, flushreg;
401 flushreg = strbuf->strbuf_ctxflush;
402 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
404 pci_iommu_write(flushreg, ctx);
405 } while(((long)pci_iommu_read(matchreg)) < 0L);
407 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
408 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
411 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
412 (void) pci_iommu_read(iommu->write_complete_reg);
413 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
417 /* Step 2: Clear out first TSB entry. */
418 iopte_val(*base) = IOPTE_INVALID;
420 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
423 spin_unlock_irqrestore(&iommu->lock, flags);
426 #define SG_ENT_PHYS_ADDRESS(SG) \
427 (__pa(page_address((SG)->page)) + (SG)->offset)
429 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
430 int nused, int nelems, unsigned long iopte_protection)
432 struct scatterlist *dma_sg = sg;
433 struct scatterlist *sg_end = sg + nelems;
436 for (i = 0; i < nused; i++) {
437 unsigned long pteval = ~0UL;
440 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
442 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
444 unsigned long offset;
447 /* If we are here, we know we have at least one
448 * more page to map. So walk forward until we
449 * hit a page crossing, and begin creating new
450 * mappings from that spot.
455 tmp = SG_ENT_PHYS_ADDRESS(sg);
457 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
458 pteval = tmp & IO_PAGE_MASK;
459 offset = tmp & (IO_PAGE_SIZE - 1UL);
462 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
463 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
465 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
471 pteval = iopte_protection | (pteval & IOPTE_PAGE);
473 *iopte++ = __iopte(pteval);
474 pteval += IO_PAGE_SIZE;
475 len -= (IO_PAGE_SIZE - offset);
480 pteval = (pteval & IOPTE_PAGE) + len;
483 /* Skip over any tail mappings we've fully mapped,
484 * adjusting pteval along the way. Stop when we
485 * detect a page crossing event.
487 while (sg < sg_end &&
488 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
489 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
491 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
492 pteval += sg->length;
495 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
497 } while (dma_npages != 0);
502 /* Map a set of buffers described by SGLIST with NELEMS array
503 * elements in streaming mode for PCI DMA.
504 * When making changes here, inspect the assembly output. I was having
505 * hard time to kepp this routine out of using stack slots for holding variables.
507 int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
509 struct pcidev_cookie *pcp;
510 struct pci_iommu *iommu;
511 struct pci_strbuf *strbuf;
512 unsigned long flags, ctx, npages, iopte_protection;
515 struct scatterlist *sgtmp;
518 /* Fast path single entry scatterlists. */
520 sglist->dma_address =
522 (page_address(sglist->page) + sglist->offset),
523 sglist->length, direction);
524 sglist->dma_length = sglist->length;
529 iommu = pcp->pbm->iommu;
530 strbuf = &pcp->pbm->stc;
532 if (direction == PCI_DMA_NONE)
535 /* Step 1: Prepare scatter list. */
537 npages = prepare_sg(sglist, nelems);
539 /* Step 2: Allocate a cluster. */
541 spin_lock_irqsave(&iommu->lock, flags);
543 base = alloc_streaming_cluster(iommu, npages);
546 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
548 /* Step 3: Normalize DMA addresses. */
552 while (used && sgtmp->dma_length) {
553 sgtmp->dma_address += dma_base;
557 used = nelems - used;
559 /* Step 4: Choose a context if necessary. */
561 if (iommu->iommu_ctxflush)
562 ctx = iommu->iommu_cur_ctx++;
564 /* Step 5: Create the mappings. */
565 if (strbuf->strbuf_enabled)
566 iopte_protection = IOPTE_STREAMING(ctx);
568 iopte_protection = IOPTE_CONSISTENT(ctx);
569 if (direction != PCI_DMA_TODEVICE)
570 iopte_protection |= IOPTE_WRITE;
571 fill_sg (base, sglist, used, nelems, iopte_protection);
573 verify_sglist(sglist, nelems, base, npages);
576 spin_unlock_irqrestore(&iommu->lock, flags);
581 spin_unlock_irqrestore(&iommu->lock, flags);
582 return PCI_DMA_ERROR_CODE;
585 /* Unmap a set of streaming mode DMA translations. */
586 void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
588 struct pcidev_cookie *pcp;
589 struct pci_iommu *iommu;
590 struct pci_strbuf *strbuf;
592 unsigned long flags, ctx, i, npages;
595 if (direction == PCI_DMA_NONE)
599 iommu = pcp->pbm->iommu;
600 strbuf = &pcp->pbm->stc;
602 bus_addr = sglist->dma_address & IO_PAGE_MASK;
604 for (i = 1; i < nelems; i++)
605 if (sglist[i].dma_length == 0)
608 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
610 base = iommu->page_table +
611 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
613 #ifdef DEBUG_PCI_IOMMU
614 if (iopte_val(*base) == IOPTE_INVALID)
615 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
618 spin_lock_irqsave(&iommu->lock, flags);
620 /* Record the context, if any. */
622 if (iommu->iommu_ctxflush)
623 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
625 /* Step 1: Kick data out of streaming buffers if necessary. */
626 if (strbuf->strbuf_enabled) {
627 u32 vaddr = (u32) bus_addr;
629 PCI_STC_FLUSHFLAG_INIT(strbuf);
630 if (strbuf->strbuf_ctxflush &&
631 iommu->iommu_ctxflush) {
632 unsigned long matchreg, flushreg;
634 flushreg = strbuf->strbuf_ctxflush;
635 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
637 pci_iommu_write(flushreg, ctx);
638 } while(((long)pci_iommu_read(matchreg)) < 0L);
640 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
641 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
644 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
645 (void) pci_iommu_read(iommu->write_complete_reg);
646 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
650 /* Step 2: Clear out first TSB entry. */
651 iopte_val(*base) = IOPTE_INVALID;
653 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
656 spin_unlock_irqrestore(&iommu->lock, flags);
659 /* Make physical memory consistent for a single
660 * streaming mode DMA translation after a transfer.
662 void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
664 struct pcidev_cookie *pcp;
665 struct pci_iommu *iommu;
666 struct pci_strbuf *strbuf;
667 unsigned long flags, ctx, npages;
670 iommu = pcp->pbm->iommu;
671 strbuf = &pcp->pbm->stc;
673 if (!strbuf->strbuf_enabled)
676 spin_lock_irqsave(&iommu->lock, flags);
678 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
679 npages >>= IO_PAGE_SHIFT;
680 bus_addr &= IO_PAGE_MASK;
682 /* Step 1: Record the context, if any. */
684 if (iommu->iommu_ctxflush &&
685 strbuf->strbuf_ctxflush) {
688 iopte = iommu->page_table +
689 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
690 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
693 /* Step 2: Kick data out of streaming buffers. */
694 PCI_STC_FLUSHFLAG_INIT(strbuf);
695 if (iommu->iommu_ctxflush &&
696 strbuf->strbuf_ctxflush) {
697 unsigned long matchreg, flushreg;
699 flushreg = strbuf->strbuf_ctxflush;
700 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
702 pci_iommu_write(flushreg, ctx);
703 } while(((long)pci_iommu_read(matchreg)) < 0L);
707 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
708 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
711 /* Step 3: Perform flush synchronization sequence. */
712 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
713 (void) pci_iommu_read(iommu->write_complete_reg);
714 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
717 spin_unlock_irqrestore(&iommu->lock, flags);
720 /* Make physical memory consistent for a set of streaming
721 * mode DMA translations after a transfer.
723 void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
725 struct pcidev_cookie *pcp;
726 struct pci_iommu *iommu;
727 struct pci_strbuf *strbuf;
728 unsigned long flags, ctx;
731 iommu = pcp->pbm->iommu;
732 strbuf = &pcp->pbm->stc;
734 if (!strbuf->strbuf_enabled)
737 spin_lock_irqsave(&iommu->lock, flags);
739 /* Step 1: Record the context, if any. */
741 if (iommu->iommu_ctxflush &&
742 strbuf->strbuf_ctxflush) {
745 iopte = iommu->page_table +
746 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
747 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
750 /* Step 2: Kick data out of streaming buffers. */
751 PCI_STC_FLUSHFLAG_INIT(strbuf);
752 if (iommu->iommu_ctxflush &&
753 strbuf->strbuf_ctxflush) {
754 unsigned long matchreg, flushreg;
756 flushreg = strbuf->strbuf_ctxflush;
757 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
759 pci_iommu_write(flushreg, ctx);
760 } while (((long)pci_iommu_read(matchreg)) < 0L);
762 unsigned long i, npages;
765 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
767 for(i = 1; i < nelems; i++)
768 if (!sglist[i].dma_length)
771 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
772 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
773 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
776 /* Step 3: Perform flush synchronization sequence. */
777 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
778 (void) pci_iommu_read(iommu->write_complete_reg);
779 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
782 spin_unlock_irqrestore(&iommu->lock, flags);
785 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
787 struct pci_dev *ali_isa_bridge;
790 /* ALI sound chips generate 31-bits of DMA, a special register
791 * determines what bit 31 is emitted as.
793 ali_isa_bridge = pci_find_device(PCI_VENDOR_ID_AL,
794 PCI_DEVICE_ID_AL_M1533,
797 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
802 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
805 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
807 struct pcidev_cookie *pcp = pdev->sysdata;
811 dma_addr_mask = 0xffffffff;
813 struct pci_iommu *iommu = pcp->pbm->iommu;
815 dma_addr_mask = iommu->dma_addr_mask;
817 if (pdev->vendor == PCI_VENDOR_ID_AL &&
818 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
819 device_mask == 0x7fffffff) {
820 ali_sound_dma_hack(pdev,
821 (dma_addr_mask & 0x80000000) != 0);
826 if (device_mask >= (1UL << 32UL))
829 return (device_mask & dma_addr_mask) == dma_addr_mask;