/*
+ * arch/ppc64/kernel/iommu.c
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup, new allocation schemes, virtual merging:
*/
+#include <linux/config.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
-#include <asm/kdump.h>
#define DBG(...)
static unsigned long iommu_range_alloc(struct iommu_table *tbl,
unsigned long npages,
unsigned long *handle,
- unsigned long mask,
unsigned int align_order)
{
unsigned long n, end, i, start;
*/
if (start >= limit)
start = largealloc ? tbl->it_largehint : tbl->it_hint;
-
+
again:
- if (limit + tbl->it_offset > mask) {
- limit = mask - tbl->it_offset + 1;
- /* If we're constrained on address range, first try
- * at the masked hint to avoid O(n) search complexity,
- * but on second pass, start at 0.
- */
- if ((start & mask) >= limit || pass > 0)
- start = 0;
- else
- start &= mask;
- }
-
n = find_next_zero_bit(tbl->it_map, limit, start);
/* Align allocation */
static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
unsigned int npages, enum dma_data_direction direction,
- unsigned long mask, unsigned int align_order)
+ unsigned int align_order)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
-
+
spin_lock_irqsave(&(tbl->it_lock), flags);
- entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
+ entry = iommu_range_alloc(tbl, npages, NULL, align_order);
if (unlikely(entry == DMA_ERROR_CODE)) {
spin_unlock_irqrestore(&(tbl->it_lock), flags);
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
- unsigned long mask, enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
vaddr = (unsigned long)page_address(s->page) + s->offset;
npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
npages >>= PAGE_SHIFT;
- entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
+ entry = iommu_range_alloc(tbl, npages, &handle, 0);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
-struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
+struct iommu_table *iommu_init_table(struct iommu_table *tbl)
{
unsigned long sz;
static int welcomed = 0;
- struct page *page;
/* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4;
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
- page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
- if (!page)
+ tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
+ if (!tbl->it_map)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
- tbl->it_map = page_address(page);
+
memset(tbl->it_map, 0, sz);
tbl->it_hint = 0;
tbl->it_largehint = tbl->it_halfpoint;
spin_lock_init(&tbl->it_lock);
-#ifdef CONFIG_CRASH_DUMP
- if (ppc_md.tce_get) {
- unsigned long index, tceval;
- unsigned long tcecount = 0;
-
- /*
- * Reserve the existing mappings left by the first kernel.
- */
- for (index = 0; index < tbl->it_size; index++) {
- tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
- /*
- * Freed TCE entry contains 0x7fffffffffffffff on JS20
- */
- if (tceval && (tceval != 0x7fffffffffffffffUL)) {
- __set_bit(index, tbl->it_map);
- tcecount++;
- }
- }
- if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
- printk(KERN_WARNING "TCE table is full; ");
- printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
- KDUMP_MIN_TCE_ENTRIES);
- for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
- index < tbl->it_size; index++)
- __clear_bit(index, tbl->it_map);
- }
- }
-#else
/* Clear the hardware table in case firmware left allocations in it */
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
-#endif
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
* byte within the page as vaddr.
*/
dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
- size_t size, unsigned long mask,
- enum dma_data_direction direction)
+ size_t size, enum dma_data_direction direction)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
npages >>= PAGE_SHIFT;
if (tbl) {
- dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
- mask >> PAGE_SHIFT, 0);
+ dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
+ dma_addr_t *dma_handle, gfp_t flag)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
- struct page *page;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
return NULL;
/* Alloc enough pages (and possibly more) */
- page = alloc_pages_node(node, flag, order);
- if (!page)
+ ret = (void *)__get_free_pages(flag, order);
+ if (!ret)
return NULL;
- ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
- mask >> PAGE_SHIFT, order);
+ mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
- return NULL;
- }
- *dma_handle = mapping;
+ ret = NULL;
+ } else
+ *dma_handle = mapping;
return ret;
}