1 /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
2 * sbus.c: UltraSparc SBUS controller support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
19 #include <asm/cache.h>
22 #include <asm/starfire.h>
24 #include "iommu_common.h"
26 /* These should be allocated on an SMP_CACHE_BYTES
27 * aligned boundary for optimal performance.
29 * On SYSIO, using an 8K page size we have 1GB of SBUS
30 * DMA space mapped. We divide this space into equally
31 * sized clusters. Currently we allow clusters up to a
32 * size of 1MB. If anything begins to generate DMA
33 * mapping requests larger than this we will need to
34 * increase things a bit.
38 #define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
39 #define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
40 #define CLUSTER_MASK (CLUSTER_SIZE - 1)
41 #define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
42 #define MAP_BASE ((u32)0xc0000000)
45 /*0x00*/spinlock_t lock;
47 /*0x08*/iopte_t *page_table;
48 /*0x10*/unsigned long strbuf_regs;
49 /*0x18*/unsigned long iommu_regs;
50 /*0x20*/unsigned long sbus_control_reg;
52 /*0x28*/volatile unsigned long strbuf_flushflag;
54 /* If NCLUSTERS is ever decresed to 4 or lower,
55 * you must increase the size of the type of
56 * these counters. You have been duly warned. -DaveM
61 } alloc_info[NCLUSTERS];
63 /* The lowest used consistent mapping entry. Since
64 * we allocate consistent maps out of cluster 0 this
65 * is relative to the beginning of closter 0.
67 /*0x50*/u32 lowest_consistent_map;
70 /* Offsets from iommu_regs */
71 #define SYSIO_IOMMUREG_BASE 0x2400UL
72 #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
73 #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
74 #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
75 #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
76 #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
77 #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
78 #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
79 #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
81 #define IOMMU_DRAM_VALID (1UL << 30UL)
83 static void __iommu_flushall(struct sbus_iommu *iommu)
85 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
88 for (entry = 0; entry < 16; entry++) {
92 upa_readq(iommu->sbus_control_reg);
94 for (entry = 0; entry < NCLUSTERS; entry++) {
95 iommu->alloc_info[entry].flush =
96 iommu->alloc_info[entry].next;
100 static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
103 upa_writeq(base + (npages << IO_PAGE_SHIFT),
104 iommu->iommu_regs + IOMMU_FLUSH);
105 upa_readq(iommu->sbus_control_reg);
108 /* Offsets from strbuf_regs */
109 #define SYSIO_STRBUFREG_BASE 0x2800UL
110 #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
111 #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
112 #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
113 #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
114 #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
115 #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
116 #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
118 #define STRBUF_TAG_VALID 0x02UL
120 static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
122 iommu->strbuf_flushflag = 0UL;
124 upa_writeq(base + (npages << IO_PAGE_SHIFT),
125 iommu->strbuf_regs + STRBUF_PFLUSH);
127 /* Whoopee cushion! */
128 upa_writeq(__pa(&iommu->strbuf_flushflag),
129 iommu->strbuf_regs + STRBUF_FSYNC);
130 upa_readq(iommu->sbus_control_reg);
131 while (iommu->strbuf_flushflag == 0UL)
135 static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
137 iopte_t *iopte, *limit, *first;
138 unsigned long cnum, ent, flush_point;
141 while ((1UL << cnum) < npages)
143 iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
146 limit = (iommu->page_table +
147 iommu->lowest_consistent_map);
149 limit = (iopte + CLUSTER_NPAGES);
151 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
152 flush_point = iommu->alloc_info[cnum].flush;
156 if (iopte_val(*iopte) == 0UL) {
157 if ((iopte + (1 << cnum)) >= limit)
161 iommu->alloc_info[cnum].next = ent;
162 if (ent == flush_point)
163 __iommu_flushall(iommu);
166 iopte += (1 << cnum);
168 if (iopte >= limit) {
169 iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
172 if (ent == flush_point)
173 __iommu_flushall(iommu);
178 /* I've got your streaming cluster right here buddy boy... */
182 printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
187 static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
189 unsigned long cnum, ent;
193 while ((1UL << cnum) < npages)
195 ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
196 iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
197 iopte_val(*iopte) = 0UL;
199 /* If the global flush might not have caught this entry,
200 * adjust the flush point such that we will flush before
201 * ever trying to reuse it.
203 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
204 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
205 iommu->alloc_info[cnum].flush = ent;
209 /* We allocate consistent mappings from the end of cluster zero. */
210 static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
214 iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
215 while (iopte > iommu->page_table) {
217 if (!(iopte_val(*iopte) & IOPTE_VALID)) {
218 unsigned long tmp = npages;
222 if (iopte_val(*iopte) & IOPTE_VALID)
226 u32 entry = (iopte - iommu->page_table);
228 if (entry < iommu->lowest_consistent_map)
229 iommu->lowest_consistent_map = entry;
237 static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
239 iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
241 if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
242 iopte_t *walk = iopte + npages;
245 limit = iommu->page_table + CLUSTER_NPAGES;
246 while (walk < limit) {
247 if (iopte_val(*walk) != 0UL)
251 iommu->lowest_consistent_map =
252 (walk - iommu->page_table);
256 *iopte++ = __iopte(0UL);
259 void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
261 unsigned long order, first_page, flags;
262 struct sbus_iommu *iommu;
267 if (size <= 0 || sdev == NULL || dvma_addr == NULL)
270 size = IO_PAGE_ALIGN(size);
271 order = get_order(size);
274 first_page = __get_free_pages(GFP_KERNEL, order);
275 if (first_page == 0UL)
277 memset((char *)first_page, 0, PAGE_SIZE << order);
279 iommu = sdev->bus->iommu;
281 spin_lock_irqsave(&iommu->lock, flags);
282 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
284 spin_unlock_irqrestore(&iommu->lock, flags);
285 free_pages(first_page, order);
289 /* Ok, we're committed at this point. */
290 *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
291 ret = (void *) first_page;
292 npages = size >> IO_PAGE_SHIFT;
294 *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
295 (__pa(first_page) & IOPTE_PAGE));
296 first_page += IO_PAGE_SIZE;
298 iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
299 spin_unlock_irqrestore(&iommu->lock, flags);
304 void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
306 unsigned long order, npages;
307 struct sbus_iommu *iommu;
309 if (size <= 0 || sdev == NULL || cpu == NULL)
312 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
313 iommu = sdev->bus->iommu;
315 spin_lock_irq(&iommu->lock);
316 free_consistent_cluster(iommu, dvma, npages);
317 iommu_flush(iommu, dvma, npages);
318 spin_unlock_irq(&iommu->lock);
320 order = get_order(size);
322 free_pages((unsigned long)cpu, order);
325 dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
327 struct sbus_iommu *iommu = sdev->bus->iommu;
328 unsigned long npages, pbase, flags;
330 u32 dma_base, offset;
331 unsigned long iopte_bits;
333 if (dir == SBUS_DMA_NONE)
336 pbase = (unsigned long) ptr;
337 offset = (u32) (pbase & ~IO_PAGE_MASK);
338 size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
339 pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
341 spin_lock_irqsave(&iommu->lock, flags);
342 npages = size >> IO_PAGE_SHIFT;
343 iopte = alloc_streaming_cluster(iommu, npages);
346 dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
347 npages = size >> IO_PAGE_SHIFT;
348 iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
349 if (dir != SBUS_DMA_TODEVICE)
350 iopte_bits |= IOPTE_WRITE;
352 *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
353 pbase += IO_PAGE_SIZE;
355 npages = size >> IO_PAGE_SHIFT;
356 spin_unlock_irqrestore(&iommu->lock, flags);
358 return (dma_base | offset);
361 spin_unlock_irqrestore(&iommu->lock, flags);
366 void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
368 struct sbus_iommu *iommu = sdev->bus->iommu;
369 u32 dma_base = dma_addr & IO_PAGE_MASK;
372 size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
374 spin_lock_irqsave(&iommu->lock, flags);
375 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
376 strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
377 spin_unlock_irqrestore(&iommu->lock, flags);
380 #define SG_ENT_PHYS_ADDRESS(SG) \
381 (__pa(page_address((SG)->page)) + (SG)->offset)
383 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
385 struct scatterlist *dma_sg = sg;
386 struct scatterlist *sg_end = sg + nelems;
389 for (i = 0; i < nused; i++) {
390 unsigned long pteval = ~0UL;
393 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
395 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
397 unsigned long offset;
400 /* If we are here, we know we have at least one
401 * more page to map. So walk forward until we
402 * hit a page crossing, and begin creating new
403 * mappings from that spot.
408 tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
410 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
411 pteval = tmp & IO_PAGE_MASK;
412 offset = tmp & (IO_PAGE_SIZE - 1UL);
415 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
416 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
418 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
424 pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
426 *iopte++ = __iopte(pteval);
427 pteval += IO_PAGE_SIZE;
428 len -= (IO_PAGE_SIZE - offset);
433 pteval = (pteval & IOPTE_PAGE) + len;
436 /* Skip over any tail mappings we've fully mapped,
437 * adjusting pteval along the way. Stop when we
438 * detect a page crossing event.
440 while (sg < sg_end &&
441 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
442 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
444 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
445 pteval += sg->length;
448 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
450 } while (dma_npages != 0);
455 int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
457 struct sbus_iommu *iommu = sdev->bus->iommu;
458 unsigned long flags, npages;
461 struct scatterlist *sgtmp;
463 unsigned long iopte_bits;
465 if (dir == SBUS_DMA_NONE)
468 /* Fast path single entry scatterlists. */
471 sbus_map_single(sdev,
472 (page_address(sg->page) + sg->offset),
474 sg->dma_length = sg->length;
478 npages = prepare_sg(sg, nents);
480 spin_lock_irqsave(&iommu->lock, flags);
481 iopte = alloc_streaming_cluster(iommu, npages);
484 dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
486 /* Normalize DVMA addresses. */
490 while (used && sgtmp->dma_length) {
491 sgtmp->dma_address += dma_base;
497 iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
498 if (dir != SBUS_DMA_TODEVICE)
499 iopte_bits |= IOPTE_WRITE;
501 fill_sg(iopte, sg, used, nents, iopte_bits);
503 verify_sglist(sg, nents, iopte, npages);
505 spin_unlock_irqrestore(&iommu->lock, flags);
510 spin_unlock_irqrestore(&iommu->lock, flags);
515 void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
517 unsigned long size, flags;
518 struct sbus_iommu *iommu;
522 /* Fast path single entry scatterlists. */
524 sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
528 dvma_base = sg[0].dma_address & IO_PAGE_MASK;
529 for (i = 0; i < nents; i++) {
530 if (sg[i].dma_length == 0)
534 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
536 iommu = sdev->bus->iommu;
537 spin_lock_irqsave(&iommu->lock, flags);
538 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
539 strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
540 spin_unlock_irqrestore(&iommu->lock, flags);
543 void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
545 struct sbus_iommu *iommu = sdev->bus->iommu;
548 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
550 spin_lock_irqsave(&iommu->lock, flags);
551 strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
552 spin_unlock_irqrestore(&iommu->lock, flags);
555 void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
559 void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
561 struct sbus_iommu *iommu = sdev->bus->iommu;
562 unsigned long flags, size;
566 base = sg[0].dma_address & IO_PAGE_MASK;
567 for (i = 0; i < nents; i++) {
568 if (sg[i].dma_length == 0)
572 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
574 spin_lock_irqsave(&iommu->lock, flags);
575 strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
576 spin_unlock_irqrestore(&iommu->lock, flags);
579 void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
583 /* Enable 64-bit DVMA mode for the given device. */
584 void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
586 struct sbus_iommu *iommu = sdev->bus->iommu;
587 int slot = sdev->slot;
588 unsigned long cfg_reg;
591 cfg_reg = iommu->sbus_control_reg;
619 val = upa_readq(cfg_reg);
620 if (val & (1UL << 14UL)) {
621 /* Extended transfer mode already enabled. */
625 val |= (1UL << 14UL);
627 if (bursts & DMA_BURST8)
629 if (bursts & DMA_BURST16)
631 if (bursts & DMA_BURST32)
633 if (bursts & DMA_BURST64)
635 upa_writeq(val, cfg_reg);
638 /* SBUS SYSIO INO number to Sparc PIL level. */
639 static unsigned char sysio_ino_to_pil[] = {
640 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */
641 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */
642 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */
643 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */
644 4, /* Onboard SCSI */
645 5, /* Onboard Ethernet */
646 /*XXX*/ 8, /* Onboard BPP */
649 /*XXX*/15, /* PowerFail */
652 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
654 0, /* Spare Hardware (bogon for now) */
655 0, /* Keyboard (bogon for now) */
656 0, /* Mouse (bogon for now) */
657 0, /* Serial (bogon for now) */
658 0, 0, /* Bogon, Bogon */
661 0, 0, /* Bogon, Bogon */
662 15, /* Uncorrectable SBUS Error */
663 15, /* Correctable SBUS Error */
665 /*XXX*/ 0, /* Power Management (bogon for now) */
668 /* INO number to IMAP register offset for SYSIO external IRQ's.
669 * This should conform to both Sunfire/Wildfire server and Fusion
672 #define SYSIO_IMAP_SLOT0 0x2c04UL
673 #define SYSIO_IMAP_SLOT1 0x2c0cUL
674 #define SYSIO_IMAP_SLOT2 0x2c14UL
675 #define SYSIO_IMAP_SLOT3 0x2c1cUL
676 #define SYSIO_IMAP_SCSI 0x3004UL
677 #define SYSIO_IMAP_ETH 0x300cUL
678 #define SYSIO_IMAP_BPP 0x3014UL
679 #define SYSIO_IMAP_AUDIO 0x301cUL
680 #define SYSIO_IMAP_PFAIL 0x3024UL
681 #define SYSIO_IMAP_KMS 0x302cUL
682 #define SYSIO_IMAP_FLPY 0x3034UL
683 #define SYSIO_IMAP_SHW 0x303cUL
684 #define SYSIO_IMAP_KBD 0x3044UL
685 #define SYSIO_IMAP_MS 0x304cUL
686 #define SYSIO_IMAP_SER 0x3054UL
687 #define SYSIO_IMAP_TIM0 0x3064UL
688 #define SYSIO_IMAP_TIM1 0x306cUL
689 #define SYSIO_IMAP_UE 0x3074UL
690 #define SYSIO_IMAP_CE 0x307cUL
691 #define SYSIO_IMAP_SBERR 0x3084UL
692 #define SYSIO_IMAP_PMGMT 0x308cUL
693 #define SYSIO_IMAP_GFX 0x3094UL
694 #define SYSIO_IMAP_EUPA 0x309cUL
696 #define bogon ((unsigned long) -1)
697 static unsigned long sysio_irq_offsets[] = {
698 /* SBUS Slot 0 --> 3, level 1 --> 7 */
699 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
700 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
701 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
702 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
703 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
704 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
705 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
706 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
708 /* Onboard devices (not relevant/used on SunFire). */
737 #define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
739 /* Convert Interrupt Mapping register pointer to associated
740 * Interrupt Clear register pointer, SYSIO specific version.
742 #define SYSIO_ICLR_UNUSED0 0x3400UL
743 #define SYSIO_ICLR_SLOT0 0x340cUL
744 #define SYSIO_ICLR_SLOT1 0x344cUL
745 #define SYSIO_ICLR_SLOT2 0x348cUL
746 #define SYSIO_ICLR_SLOT3 0x34ccUL
747 static unsigned long sysio_imap_to_iclr(unsigned long imap)
749 unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
753 unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
755 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
756 struct sbus_iommu *iommu = sbus->iommu;
757 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
758 unsigned long imap, iclr;
759 int pil, sbus_level = 0;
761 pil = sysio_ino_to_pil[ino];
763 printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
764 panic("Bad SYSIO IRQ translations...");
767 if (PIL_RESERVED(pil))
770 imap = sysio_irq_offsets[ino];
771 if (imap == ((unsigned long)-1)) {
772 prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
778 /* SYSIO inconsistency. For external SLOTS, we have to select
779 * the right ICLR register based upon the lower SBUS irq level
783 iclr = sysio_imap_to_iclr(imap);
785 int sbus_slot = (ino & 0x18)>>3;
787 sbus_level = ino & 0x7;
791 iclr = reg_base + SYSIO_ICLR_SLOT0;
794 iclr = reg_base + SYSIO_ICLR_SLOT1;
797 iclr = reg_base + SYSIO_ICLR_SLOT2;
801 iclr = reg_base + SYSIO_ICLR_SLOT3;
805 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
807 return build_irq(pil, sbus_level, iclr, imap);
810 /* Error interrupt handling. */
811 #define SYSIO_UE_AFSR 0x0030UL
812 #define SYSIO_UE_AFAR 0x0038UL
813 #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
814 #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
815 #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
816 #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
817 #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
818 #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
819 #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
820 #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
821 #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
822 #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
823 #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
824 static irqreturn_t sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
826 struct sbus_bus *sbus = dev_id;
827 struct sbus_iommu *iommu = sbus->iommu;
828 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
829 unsigned long afsr_reg, afar_reg;
830 unsigned long afsr, afar, error_bits;
833 afsr_reg = reg_base + SYSIO_UE_AFSR;
834 afar_reg = reg_base + SYSIO_UE_AFAR;
836 /* Latch error status. */
837 afsr = upa_readq(afsr_reg);
838 afar = upa_readq(afar_reg);
840 /* Clear primary/secondary error status bits. */
842 (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
843 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
844 upa_writeq(error_bits, afsr_reg);
847 printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
849 (((error_bits & SYSIO_UEAFSR_PPIO) ?
851 ((error_bits & SYSIO_UEAFSR_PDRD) ?
853 ((error_bits & SYSIO_UEAFSR_PDWR) ?
854 "DVMA Write" : "???")))));
855 printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
857 (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
858 (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
859 (afsr & SYSIO_UEAFSR_MID) >> 37UL);
860 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
861 printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
863 if (afsr & SYSIO_UEAFSR_SPIO) {
867 if (afsr & SYSIO_UEAFSR_SDRD) {
869 printk("(DVMA Read)");
871 if (afsr & SYSIO_UEAFSR_SDWR) {
873 printk("(DVMA Write)");
882 #define SYSIO_CE_AFSR 0x0040UL
883 #define SYSIO_CE_AFAR 0x0048UL
884 #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
885 #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
886 #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
887 #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
888 #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
889 #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
890 #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
891 #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
892 #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
893 #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
894 #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
895 #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
896 static irqreturn_t sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
898 struct sbus_bus *sbus = dev_id;
899 struct sbus_iommu *iommu = sbus->iommu;
900 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
901 unsigned long afsr_reg, afar_reg;
902 unsigned long afsr, afar, error_bits;
905 afsr_reg = reg_base + SYSIO_CE_AFSR;
906 afar_reg = reg_base + SYSIO_CE_AFAR;
908 /* Latch error status. */
909 afsr = upa_readq(afsr_reg);
910 afar = upa_readq(afar_reg);
912 /* Clear primary/secondary error status bits. */
914 (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
915 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
916 upa_writeq(error_bits, afsr_reg);
918 printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
920 (((error_bits & SYSIO_CEAFSR_PPIO) ?
922 ((error_bits & SYSIO_CEAFSR_PDRD) ?
924 ((error_bits & SYSIO_CEAFSR_PDWR) ?
925 "DVMA Write" : "???")))));
927 /* XXX Use syndrome and afar to print out module string just like
928 * XXX UDB CE trap handler does... -DaveM
930 printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
932 (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
933 (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
934 (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
935 (afsr & SYSIO_CEAFSR_MID) >> 37UL);
936 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
938 printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
940 if (afsr & SYSIO_CEAFSR_SPIO) {
944 if (afsr & SYSIO_CEAFSR_SDRD) {
946 printk("(DVMA Read)");
948 if (afsr & SYSIO_CEAFSR_SDWR) {
950 printk("(DVMA Write)");
959 #define SYSIO_SBUS_AFSR 0x2010UL
960 #define SYSIO_SBUS_AFAR 0x2018UL
961 #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
962 #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
963 #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
964 #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
965 #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
966 #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
967 #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
968 #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
969 #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
970 #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
971 #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
972 #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
973 static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
975 struct sbus_bus *sbus = dev_id;
976 struct sbus_iommu *iommu = sbus->iommu;
977 unsigned long afsr_reg, afar_reg, reg_base;
978 unsigned long afsr, afar, error_bits;
981 reg_base = iommu->sbus_control_reg - 0x2000UL;
982 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
983 afar_reg = reg_base + SYSIO_SBUS_AFAR;
985 afsr = upa_readq(afsr_reg);
986 afar = upa_readq(afar_reg);
988 /* Clear primary/secondary error status bits. */
990 (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
991 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
992 upa_writeq(error_bits, afsr_reg);
995 printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
997 (((error_bits & SYSIO_SBAFSR_PLE) ?
999 ((error_bits & SYSIO_SBAFSR_PTO) ?
1001 ((error_bits & SYSIO_SBAFSR_PBERR) ?
1002 "Error Ack" : "???")))),
1003 (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
1004 printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
1006 (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
1007 (afsr & SYSIO_SBAFSR_MID) >> 37UL);
1008 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
1009 printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
1011 if (afsr & SYSIO_SBAFSR_SLE) {
1013 printk("(Late PIO Error)");
1015 if (afsr & SYSIO_SBAFSR_STO) {
1017 printk("(Time Out)");
1019 if (afsr & SYSIO_SBAFSR_SBERR) {
1021 printk("(Error Ack)");
1027 /* XXX check iommu/strbuf for further error status XXX */
1032 #define ECC_CONTROL 0x0020UL
1033 #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
1034 #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
1035 #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
1037 #define SYSIO_UE_INO 0x34
1038 #define SYSIO_CE_INO 0x35
1039 #define SYSIO_SBUSERR_INO 0x36
1041 static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
1043 struct sbus_iommu *iommu = sbus->iommu;
1044 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
1048 irq = sbus_build_irq(sbus, SYSIO_UE_INO);
1049 if (request_irq(irq, sysio_ue_handler,
1050 SA_SHIRQ, "SYSIO UE", sbus) < 0) {
1051 prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
1056 irq = sbus_build_irq(sbus, SYSIO_CE_INO);
1057 if (request_irq(irq, sysio_ce_handler,
1058 SA_SHIRQ, "SYSIO CE", sbus) < 0) {
1059 prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
1064 irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
1065 if (request_irq(irq, sysio_sbus_error_handler,
1066 SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
1067 prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1072 /* Now turn the error interrupts on and also enable ECC checking. */
1073 upa_writeq((SYSIO_ECNTRL_ECCEN |
1076 reg_base + ECC_CONTROL);
1078 control = upa_readq(iommu->sbus_control_reg);
1079 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1080 upa_writeq(control, iommu->sbus_control_reg);
1083 /* Boot time initialization. */
1084 void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
1086 struct linux_prom64_registers rprop;
1087 struct sbus_iommu *iommu;
1088 unsigned long regs, tsb_base;
1092 sbus->portid = prom_getintdefault(sbus->prom_node,
1095 err = prom_getproperty(prom_node, "reg",
1096 (char *)&rprop, sizeof(rprop));
1098 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1101 regs = rprop.phys_addr;
1103 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
1104 if (iommu == NULL) {
1105 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1109 /* Align on E$ line boundary. */
1110 iommu = (struct sbus_iommu *)
1111 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1112 ~(SMP_CACHE_BYTES - 1UL));
1114 memset(iommu, 0, sizeof(*iommu));
1116 /* We start with no consistent mappings. */
1117 iommu->lowest_consistent_map = CLUSTER_NPAGES;
1119 for (i = 0; i < NCLUSTERS; i++) {
1120 iommu->alloc_info[i].flush = 0;
1121 iommu->alloc_info[i].next = 0;
1124 /* Setup spinlock. */
1125 spin_lock_init(&iommu->lock);
1127 /* Init register offsets. */
1128 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
1129 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
1131 /* The SYSIO SBUS control register is used for dummy reads
1132 * in order to ensure write completion.
1134 iommu->sbus_control_reg = regs + 0x2000UL;
1136 /* Link into SYSIO software state. */
1137 sbus->iommu = iommu;
1139 printk("SYSIO: UPA portID %x, at %016lx\n",
1140 sbus->portid, regs);
1142 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1143 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
1144 control = ((7UL << 16UL) |
1149 /* Using the above configuration we need 1MB iommu page
1150 * table (128K ioptes * 8 bytes per iopte). This is
1151 * page order 7 on UltraSparc.
1153 tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
1154 if (tsb_base == 0UL) {
1155 prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
1159 iommu->page_table = (iopte_t *) tsb_base;
1160 memset(iommu->page_table, 0, IO_TSB_SIZE);
1162 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
1164 /* Clean out any cruft in the IOMMU using
1165 * diagnostic accesses.
1167 for (i = 0; i < 16; i++) {
1168 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
1169 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
1171 dram += (unsigned long)i * 8UL;
1172 tag += (unsigned long)i * 8UL;
1173 upa_writeq(0, dram);
1176 upa_readq(iommu->sbus_control_reg);
1178 /* Give the TSB to SYSIO. */
1179 upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
1181 /* Setup streaming buffer, DE=1 SB_EN=1 */
1182 control = (1UL << 1UL) | (1UL << 0UL);
1183 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
1185 /* Clear out the tags using diagnostics. */
1186 for (i = 0; i < 16; i++) {
1187 unsigned long ptag, ltag;
1189 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
1190 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
1191 ptag += (unsigned long)i * 8UL;
1192 ltag += (unsigned long)i * 8UL;
1194 upa_writeq(0UL, ptag);
1195 upa_writeq(0UL, ltag);
1198 /* Enable DVMA arbitration for all devices/slots. */
1199 control = upa_readq(iommu->sbus_control_reg);
1201 upa_writeq(control, iommu->sbus_control_reg);
1203 /* Now some Xfire specific grot... */
1204 if (this_is_starfire)
1205 sbus->starfire_cookie = starfire_hookup(sbus->portid);
1207 sbus->starfire_cookie = NULL;
1209 sysio_register_error_handlers(sbus);