Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / arch / x86_64 / kernel / pci-nommu.c
index 67d90b8..1f6ecc6 100644 (file)
@@ -6,89 +6,88 @@
 #include <linux/string.h>
 #include <asm/proto.h>
 #include <asm/processor.h>
+#include <asm/dma.h>
 
-int iommu_merge = 0;
-EXPORT_SYMBOL(iommu_merge);
-
-dma_addr_t bad_dma_address;
-EXPORT_SYMBOL(bad_dma_address);
-
-int iommu_bio_merge = 0;
-EXPORT_SYMBOL(iommu_bio_merge);
-
-int iommu_sac_force = 0;
-EXPORT_SYMBOL(iommu_sac_force);
-
-/* 
- * Dummy IO MMU functions
- */
-
-void *dma_alloc_coherent(struct device *hwdev, size_t size,
-                        dma_addr_t *dma_handle, unsigned gfp)
+static int
+check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
 {
-       void *ret;
-       u64 mask;
-       int order = get_order(size);
-
-       if (hwdev)
-               mask = hwdev->coherent_dma_mask & *hwdev->dma_mask;
-       else
-               mask = 0xffffffff;
-       for (;;) {
-               ret = (void *)__get_free_pages(gfp, order);
-               if (ret == NULL)
-                       return NULL;
-               *dma_handle = virt_to_bus(ret);
-               if ((*dma_handle & ~mask) == 0)
-                       break;
-               free_pages((unsigned long)ret, order);
-               if (gfp & GFP_DMA)
-                       return NULL;
-               gfp |= GFP_DMA;
+        if (hwdev && bus + size > *hwdev->dma_mask) {
+               if (*hwdev->dma_mask >= 0xffffffffULL)
+                       printk(KERN_ERR
+                           "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
+                               name, (long long)bus, size, (long long)*hwdev->dma_mask);
+               return 0;
        }
+       return 1;
+}
 
-       memset(ret, 0, size);
-       return ret;
+static dma_addr_t
+nommu_map_single(struct device *hwdev, void *ptr, size_t size,
+              int direction)
+{
+       dma_addr_t bus = virt_to_bus(ptr);
+       if (!check_addr("map_single", hwdev, bus, size))
+                               return bad_dma_address;
+       return bus;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *hwdev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
+void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+                       int direction)
 {
-       free_pages((unsigned long)vaddr, get_order(size));
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-int dma_supported(struct device *hwdev, u64 mask)
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA.  This is the scatter-gather version of the
+ * above pci_map_single interface.  Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length.  They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ *       DMA address/length pairs than there are SG table elements.
+ *       (for example via virtual mapping capabilities)
+ *       The routine returns the number of addr/length pairs actually
+ *       used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
+              int nents, int direction)
 {
-        /*
-         * we fall back to GFP_DMA when the mask isn't all 1s,
-         * so we can't guarantee allocations that must be
-         * within a tighter range than GFP_DMA..
-        * RED-PEN this won't work for pci_map_single. Caller has to
-        * use GFP_DMA in the first place.
-         */
-        if (mask < 0x00ffffff)
-                return 0;
+       int i;
 
-       return 1;
-} 
-EXPORT_SYMBOL(dma_supported);
+       BUG_ON(direction == DMA_NONE);
+       for (i = 0; i < nents; i++ ) {
+               struct scatterlist *s = &sg[i];
+               BUG_ON(!s->page);
+               s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+               if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
+                       return 0;
+               s->dma_length = s->length;
+       }
+       return nents;
+}
 
-int dma_get_cache_alignment(void)
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+                 int nents, int dir)
 {
-       return boot_cpu_data.x86_clflush_size;
 }
-EXPORT_SYMBOL(dma_get_cache_alignment);
 
-static int __init check_ram(void) 
-{ 
-       if (end_pfn >= 0xffffffff>>PAGE_SHIFT) { 
-               printk(
-               KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
-               KERN_ERR "WARNING 32bit PCI may malfunction.\n");
-       } 
-       return 0;
-} 
-__initcall(check_ram);
+struct dma_mapping_ops nommu_dma_ops = {
+       .map_single = nommu_map_single,
+       .unmap_single = nommu_unmap_single,
+       .map_sg = nommu_map_sg,
+       .unmap_sg = nommu_unmap_sg,
+       .is_phys = 1,
+};
 
+void __init no_iommu_init(void)
+{
+       if (dma_ops)
+               return;
+       dma_ops = &nommu_dma_ops;
+}