patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / include / asm-i386 / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <asm/cache.h>
5 #include <asm/io.h>
6 #include <asm/scatterlist.h>
7
8 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
9 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
10
11 void *dma_alloc_coherent(struct device *dev, size_t size,
12                            dma_addr_t *dma_handle, int flag);
13
14 void dma_free_coherent(struct device *dev, size_t size,
15                          void *vaddr, dma_addr_t dma_handle);
16
17 static inline dma_addr_t
18 dma_map_single(struct device *dev, void *ptr, size_t size,
19                enum dma_data_direction direction)
20 {
21         BUG_ON(direction == DMA_NONE);
22         flush_write_buffers();
23         return virt_to_phys(ptr);
24 }
25
26 static inline void
27 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
28                  enum dma_data_direction direction)
29 {
30         BUG_ON(direction == DMA_NONE);
31 }
32
33 static inline int
34 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
35            enum dma_data_direction direction)
36 {
37         int i;
38
39         BUG_ON(direction == DMA_NONE);
40
41         for (i = 0; i < nents; i++ ) {
42                 BUG_ON(!sg[i].page);
43
44                 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
45         }
46
47         flush_write_buffers();
48         return nents;
49 }
50
51 static inline dma_addr_t
52 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
53              size_t size, enum dma_data_direction direction)
54 {
55         BUG_ON(direction == DMA_NONE);
56         return page_to_phys(page) + offset;
57 }
58
59 static inline void
60 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
61                enum dma_data_direction direction)
62 {
63         BUG_ON(direction == DMA_NONE);
64 }
65
66
67 static inline void
68 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
69              enum dma_data_direction direction)
70 {
71         BUG_ON(direction == DMA_NONE);
72 }
73
74 static inline void
75 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
76                         enum dma_data_direction direction)
77 {
78 }
79
80 static inline void
81 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
82                         enum dma_data_direction direction)
83 {
84         flush_write_buffers();
85 }
86
87 static inline void
88 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
89                               unsigned long offset, size_t size,
90                               enum dma_data_direction direction)
91 {
92 }
93
94 static inline void
95 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
96                                  unsigned long offset, size_t size,
97                                  enum dma_data_direction direction)
98 {
99         flush_write_buffers();
100 }
101
102 static inline void
103 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
104                     enum dma_data_direction direction)
105 {
106 }
107
108 static inline void
109 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
110                     enum dma_data_direction direction)
111 {
112         flush_write_buffers();
113 }
114
115 static inline int
116 dma_mapping_error(dma_addr_t dma_addr)
117 {
118         return 0;
119 }
120
121 static inline int
122 dma_supported(struct device *dev, u64 mask)
123 {
124         /*
125          * we fall back to GFP_DMA when the mask isn't all 1s,
126          * so we can't guarantee allocations that must be
127          * within a tighter range than GFP_DMA..
128          */
129         if(mask < 0x00ffffff)
130                 return 0;
131
132         return 1;
133 }
134
135 static inline int
136 dma_set_mask(struct device *dev, u64 mask)
137 {
138         if(!dev->dma_mask || !dma_supported(dev, mask))
139                 return -EIO;
140
141         *dev->dma_mask = mask;
142
143         return 0;
144 }
145
146 static inline int
147 dma_get_cache_alignment(void)
148 {
149         /* no easy way to get cache size on all x86, so return the
150          * maximum possible, to be safe */
151         return (1 << L1_CACHE_SHIFT_MAX);
152 }
153
154 #define dma_is_consistent(d)    (1)
155
156 static inline void
157 dma_cache_sync(void *vaddr, size_t size,
158                enum dma_data_direction direction)
159 {
160         flush_write_buffers();
161 }
162
163 #endif