+/* Fallback functions when the main IOMMU code is not compiled in. This
+ code is roughly equivalent to i386. */
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <asm/proto.h>
+#include <asm/processor.h>
+#include <asm/dma.h>
-int iommu_merge = 0;
-EXPORT_SYMBOL(iommu_merge);
-
-dma_addr_t bad_dma_address;
-EXPORT_SYMBOL(bad_dma_address);
-
-/*
- * Dummy IO MMU functions
- */
-
-void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
+static int
+check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
- void *ret;
- int gfp = GFP_ATOMIC;
-
- if (hwdev == NULL ||
- end_pfn > (hwdev->dma_mask>>PAGE_SHIFT) || /* XXX */
- (u32)hwdev->dma_mask < 0xffffffff)
- gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, get_order(size));
-
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_bus(ret);
+ if (hwdev && bus + size > *hwdev->dma_mask) {
+ printk(KERN_ERR
+ "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
+ name, (long long)bus, size, (long long)*hwdev->dma_mask);
+ return 0;
}
- return ret;
+ return 1;
}
-void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static dma_addr_t
+nommu_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction)
{
- free_pages((unsigned long)vaddr, get_order(size));
+ dma_addr_t bus = virt_to_bus(ptr);
+ if (!check_addr("map_single", hwdev, bus, size))
+ return bad_dma_address;
+ return bus;
}
-int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+ int direction)
{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- * RED-PEN this won't work for pci_map_single. Caller has to
- * use GFP_DMA in the first place.
- */
- if (mask < 0x00ffffff)
- return 0;
+}
- return 1;
-}
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scatter-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ int i;
-EXPORT_SYMBOL(pci_dma_supported);
+ BUG_ON(direction == DMA_NONE);
+ for (i = 0; i < nents; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
+ return 0;
+ s->dma_length = s->length;
+ }
+ return nents;
+}
-static int __init check_ram(void)
-{
- if (end_pfn >= 0xffffffff>>PAGE_SHIFT) {
- printk(KERN_ERR "WARNING more than 4GB of memory but no IOMMU.\n"
- KERN_ERR "WARNING 32bit PCI may malfunction.\n");
- }
- return 0;
-}
-__initcall(check_ram);
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int dir)
+{
+}
+
+struct dma_mapping_ops nommu_dma_ops = {
+ .map_single = nommu_map_single,
+ .unmap_single = nommu_unmap_single,
+ .map_sg = nommu_map_sg,
+ .unmap_sg = nommu_unmap_sg,
+ .is_phys = 1,
+};
+void __init no_iommu_init(void)
+{
+ if (dma_ops)
+ return;
+ dma_ops = &nommu_dma_ops;
+}