X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ia64%2Fdma-mapping.h;h=8499cd6af87a1b71c64b77567a0154dd72639e53;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=df67d40801de9ac9dc2041145927c251e7e5c7c5;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h index df67d4080..8499cd6af 100644 --- a/include/asm-ia64/dma-mapping.h +++ b/include/asm-ia64/dma-mapping.h @@ -5,22 +5,68 @@ * Copyright (C) 2003-2004 Hewlett-Packard Co * David Mosberger-Tang */ -#include #include +#ifndef CONFIG_XEN -#define dma_alloc_coherent platform_dma_alloc_coherent -#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */ -#define dma_free_coherent platform_dma_free_coherent -#define dma_free_noncoherent platform_dma_free_coherent -#define dma_map_single platform_dma_map_single -#define dma_map_sg platform_dma_map_sg -#define dma_unmap_single platform_dma_unmap_single -#define dma_unmap_sg platform_dma_unmap_sg -#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu -#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu +#define dma_alloc_coherent platform_dma_alloc_coherent +#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */ +#define dma_free_coherent platform_dma_free_coherent +#define dma_free_noncoherent platform_dma_free_coherent +#define dma_map_single platform_dma_map_single +#define dma_map_sg platform_dma_map_sg +#define dma_unmap_single platform_dma_unmap_single +#define dma_unmap_sg platform_dma_unmap_sg +#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu +#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu #define dma_sync_single_for_device platform_dma_sync_single_for_device -#define dma_sync_sg_for_device platform_dma_sync_sg_for_device -#define dma_mapping_error platform_dma_mapping_error +#define dma_sync_sg_for_device platform_dma_sync_sg_for_device +#define dma_mapping_error platform_dma_mapping_error + +#else /* CONFIG_XEN */ +/* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */ +#include +/* Needed for arch/i386/kernel/swiotlb.c */ +#include + +int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); +void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); +int dma_supported(struct device *dev, u64 mask); +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +void dma_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); +void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction); +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction); +void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction); +int dma_mapping_error(dma_addr_t dma_addr); + +#define flush_write_buffers() do { } while (0) +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + if (swiotlb) + swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction); + flush_write_buffers(); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + if (swiotlb) + swiotlb_sync_sg_for_device(dev,sg,nelems,direction); + flush_write_buffers(); +} +#endif /* CONFIG_XEN */ #define dma_map_page(dev, pg, off, size, dir) \ dma_map_single(dev, page_address(pg) + (off), (size), (dir)) @@ -37,7 +83,9 @@ #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ dma_sync_single_for_device(dev, dma_handle, size, dir) +#ifndef CONFIG_XEN #define dma_supported platform_dma_supported +#endif static inline int dma_set_mask (struct device *dev, u64 mask) @@ -51,7 +99,8 @@ dma_set_mask (struct device *dev, u64 mask) extern int dma_get_cache_alignment(void); static inline void -dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) +dma_cache_sync (struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) { /* * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to @@ -60,6 +109,31 @@ dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) mb(); } -#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */ +#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ + +#ifdef CONFIG_XEN +/* arch/i386/kernel/swiotlb.o requires */ +void contiguous_bitmap_init(unsigned long end_pfn); + +static inline int +address_needs_mapping(struct device *hwdev, dma_addr_t addr) +{ + dma_addr_t mask = DMA_64BIT_MASK; + /* If the device has a mask, use it, otherwise default to 64 bits */ + if (hwdev && hwdev->dma_mask) + mask = *hwdev->dma_mask; + return (addr & ~mask) != 0; +} +#else +#define contiguous_bitmap_init(end_pfn) ((void)end_pfn) +#endif + +static inline int +range_straddles_page_boundary(void *p, size_t size) +{ + extern unsigned long *contiguous_bitmap; + return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) && + !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap)); +} #endif /* _ASM_IA64_DMA_MAPPING_H */