VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / include / asm-i386 / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/device.h>
5 #include <linux/mm.h>
6
7 #include <asm/cache.h>
8 #include <asm/io.h>
9 #include <asm/scatterlist.h>
10
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15                            dma_addr_t *dma_handle, int flag);
16
17 void dma_free_coherent(struct device *dev, size_t size,
18                          void *vaddr, dma_addr_t dma_handle);
19
20 static inline dma_addr_t
21 dma_map_single(struct device *dev, void *ptr, size_t size,
22                enum dma_data_direction direction)
23 {
24         BUG_ON(direction == DMA_NONE);
25         flush_write_buffers();
26         return virt_to_phys(ptr);
27 }
28
29 static inline void
30 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
31                  enum dma_data_direction direction)
32 {
33         BUG_ON(direction == DMA_NONE);
34 }
35
36 static inline int
37 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
38            enum dma_data_direction direction)
39 {
40         int i;
41
42         BUG_ON(direction == DMA_NONE);
43
44         for (i = 0; i < nents; i++ ) {
45                 BUG_ON(!sg[i].page);
46
47                 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
48         }
49
50         flush_write_buffers();
51         return nents;
52 }
53
54 static inline dma_addr_t
55 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
56              size_t size, enum dma_data_direction direction)
57 {
58         BUG_ON(direction == DMA_NONE);
59         return page_to_phys(page) + offset;
60 }
61
62 static inline void
63 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
64                enum dma_data_direction direction)
65 {
66         BUG_ON(direction == DMA_NONE);
67 }
68
69
70 static inline void
71 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
72              enum dma_data_direction direction)
73 {
74         BUG_ON(direction == DMA_NONE);
75 }
76
77 static inline void
78 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
79                         enum dma_data_direction direction)
80 {
81 }
82
83 static inline void
84 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
85                         enum dma_data_direction direction)
86 {
87         flush_write_buffers();
88 }
89
90 static inline void
91 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
92                               unsigned long offset, size_t size,
93                               enum dma_data_direction direction)
94 {
95 }
96
97 static inline void
98 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
99                                  unsigned long offset, size_t size,
100                                  enum dma_data_direction direction)
101 {
102         flush_write_buffers();
103 }
104
105 static inline void
106 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
107                     enum dma_data_direction direction)
108 {
109 }
110
111 static inline void
112 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
113                     enum dma_data_direction direction)
114 {
115         flush_write_buffers();
116 }
117
118 static inline int
119 dma_mapping_error(dma_addr_t dma_addr)
120 {
121         return 0;
122 }
123
124 static inline int
125 dma_supported(struct device *dev, u64 mask)
126 {
127         /*
128          * we fall back to GFP_DMA when the mask isn't all 1s,
129          * so we can't guarantee allocations that must be
130          * within a tighter range than GFP_DMA..
131          */
132         if(mask < 0x00ffffff)
133                 return 0;
134
135         return 1;
136 }
137
138 static inline int
139 dma_set_mask(struct device *dev, u64 mask)
140 {
141         if(!dev->dma_mask || !dma_supported(dev, mask))
142                 return -EIO;
143
144         *dev->dma_mask = mask;
145
146         return 0;
147 }
148
149 static inline int
150 dma_get_cache_alignment(void)
151 {
152         /* no easy way to get cache size on all x86, so return the
153          * maximum possible, to be safe */
154         return (1 << L1_CACHE_SHIFT_MAX);
155 }
156
157 #define dma_is_consistent(d)    (1)
158
159 static inline void
160 dma_cache_sync(void *vaddr, size_t size,
161                enum dma_data_direction direction)
162 {
163         flush_write_buffers();
164 }
165
166 #endif