ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / asm-i386 / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <asm/cache.h>
5
6 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
7 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
8
9 void *dma_alloc_coherent(struct device *dev, size_t size,
10                            dma_addr_t *dma_handle, int flag);
11
12 void dma_free_coherent(struct device *dev, size_t size,
13                          void *vaddr, dma_addr_t dma_handle);
14
15 static inline dma_addr_t
16 dma_map_single(struct device *dev, void *ptr, size_t size,
17                enum dma_data_direction direction)
18 {
19         BUG_ON(direction == DMA_NONE);
20         flush_write_buffers();
21         return virt_to_phys(ptr);
22 }
23
24 static inline void
25 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
26                  enum dma_data_direction direction)
27 {
28         BUG_ON(direction == DMA_NONE);
29 }
30
31 static inline int
32 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
33            enum dma_data_direction direction)
34 {
35         int i;
36
37         BUG_ON(direction == DMA_NONE);
38
39         for (i = 0; i < nents; i++ ) {
40                 BUG_ON(!sg[i].page);
41
42                 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
43         }
44
45         flush_write_buffers();
46         return nents;
47 }
48
49 static inline dma_addr_t
50 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
51              size_t size, enum dma_data_direction direction)
52 {
53         BUG_ON(direction == DMA_NONE);
54         return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
55 }
56
57 static inline void
58 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
59                enum dma_data_direction direction)
60 {
61         BUG_ON(direction == DMA_NONE);
62 }
63
64
65 static inline void
66 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
67              enum dma_data_direction direction)
68 {
69         BUG_ON(direction == DMA_NONE);
70 }
71
72 static inline void
73 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
74                         enum dma_data_direction direction)
75 {
76 }
77
78 static inline void
79 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
80                         enum dma_data_direction direction)
81 {
82         flush_write_buffers();
83 }
84
85 static inline void
86 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
87                               unsigned long offset, size_t size,
88                               enum dma_data_direction direction)
89 {
90 }
91
92 static inline void
93 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
94                                  unsigned long offset, size_t size,
95                                  enum dma_data_direction direction)
96 {
97         flush_write_buffers();
98 }
99
100 static inline void
101 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
102                     enum dma_data_direction direction)
103 {
104 }
105
106 static inline void
107 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
108                     enum dma_data_direction direction)
109 {
110         flush_write_buffers();
111 }
112
113 static inline int
114 dma_mapping_error(dma_addr_t dma_addr)
115 {
116         return 0;
117 }
118
119 static inline int
120 dma_supported(struct device *dev, u64 mask)
121 {
122         /*
123          * we fall back to GFP_DMA when the mask isn't all 1s,
124          * so we can't guarantee allocations that must be
125          * within a tighter range than GFP_DMA..
126          */
127         if(mask < 0x00ffffff)
128                 return 0;
129
130         return 1;
131 }
132
133 static inline int
134 dma_set_mask(struct device *dev, u64 mask)
135 {
136         if(!dev->dma_mask || !dma_supported(dev, mask))
137                 return -EIO;
138
139         *dev->dma_mask = mask;
140
141         return 0;
142 }
143
144 static inline int
145 dma_get_cache_alignment(void)
146 {
147         /* no easy way to get cache size on all x86, so return the
148          * maximum possible, to be safe */
149         return (1 << L1_CACHE_SHIFT_MAX);
150 }
151
152 #define dma_is_consistent(d)    (1)
153
154 static inline void
155 dma_cache_sync(void *vaddr, size_t size,
156                enum dma_data_direction direction)
157 {
158         flush_write_buffers();
159 }
160
161 #endif