4 #include <linux/config.h>
9 #include <linux/mm.h> /* for struct page */
11 /* Can be used to override the logic in pci_scan_bus for skipping
12 already-configured bus numbers - to be used for buggy BIOSes
13 or architectures with incomplete PCI setup by the loader */
16 extern unsigned int pcibios_assign_all_busses(void);
18 #define pcibios_assign_all_busses() 0
20 #define pcibios_scan_all_fns(a, b) 0
22 extern int no_iommu, force_iommu;
24 extern unsigned long pci_mem_start;
25 #define PCIBIOS_MIN_IO 0x1000
26 #define PCIBIOS_MIN_MEM (pci_mem_start)
28 #define PCIBIOS_MIN_CARDBUS_IO 0x4000
30 void pcibios_config_init(void);
31 struct pci_bus * pcibios_scan_root(int bus);
32 extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
33 extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
35 void pcibios_set_master(struct pci_dev *dev);
36 void pcibios_penalize_isa_irq(int irq);
37 struct irq_routing_table *pcibios_get_irq_routing_table(void);
38 int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
40 #include <linux/types.h>
41 #include <linux/slab.h>
42 #include <asm/scatterlist.h>
43 #include <linux/string.h>
49 extern int iommu_setup(char *opt);
51 extern dma_addr_t bad_dma_address;
52 #define pci_dma_mapping_error(x) ((x) == bad_dma_address)
54 /* Allocate and map kernel buffer using consistent mode DMA for a device.
55 * hwdev should be valid struct pci_dev pointer for PCI devices,
56 * NULL for PCI-like buses (ISA, EISA).
57 * Returns non-NULL cpu-view pointer to the buffer if successful and
58 * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
61 extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
62 dma_addr_t *dma_handle);
64 /* Free and unmap a consistent DMA buffer.
65 * cpu_addr is what was returned from pci_alloc_consistent,
66 * size must be the same as what as passed into pci_alloc_consistent,
67 * and likewise dma_addr must be the same as what *dma_addrp was set to.
69 * References to the memory and mappings associated with cpu_addr/dma_addr
70 * past this call are illegal.
72 extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
73 void *vaddr, dma_addr_t dma_handle);
77 extern dma_addr_t swiotlb_map_single (struct device *hwdev, void *ptr, size_t size,
79 extern void swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr,
80 size_t size, int dir);
81 extern void swiotlb_sync_single_for_cpu (struct device *hwdev,
83 size_t size, int dir);
84 extern void swiotlb_sync_single_for_device (struct device *hwdev,
86 size_t size, int dir);
87 extern void swiotlb_sync_sg_for_cpu (struct device *hwdev,
88 struct scatterlist *sg, int nelems,
90 extern void swiotlb_sync_sg_for_device (struct device *hwdev,
91 struct scatterlist *sg, int nelems,
93 extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
94 int nents, int direction);
95 extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
96 int nents, int direction);
100 #ifdef CONFIG_GART_IOMMU
102 /* Map a single buffer of the indicated size for DMA in streaming mode.
103 * The 32-bit bus address to use is returned.
105 * Once the device is given the dma address, the device owns this memory
106 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
108 extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
112 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t addr,
113 size_t size, int direction);
116 * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
117 * to pci_map_single, but takes a struct page instead of a virtual address
120 #define pci_map_page(dev,page,offset,size,dir) \
121 pci_map_single((dev), page_address(page)+(offset), (size), (dir))
123 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
124 dma_addr_t ADDR_NAME;
125 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
127 #define pci_unmap_addr(PTR, ADDR_NAME) \
129 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
130 (((PTR)->ADDR_NAME) = (VAL))
131 #define pci_unmap_len(PTR, LEN_NAME) \
133 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
134 (((PTR)->LEN_NAME) = (VAL))
136 static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev,
137 dma_addr_t dma_handle,
138 size_t size, int direction)
140 BUG_ON(direction == PCI_DMA_NONE);
142 #ifdef CONFIG_SWIOTLB
144 return swiotlb_sync_single_for_cpu(&hwdev->dev,dma_handle,size,direction);
147 flush_write_buffers();
150 static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev,
151 dma_addr_t dma_handle,
152 size_t size, int direction)
154 BUG_ON(direction == PCI_DMA_NONE);
156 #ifdef CONFIG_SWIOTLB
158 return swiotlb_sync_single_for_device(&hwdev->dev,dma_handle,size,direction);
161 flush_write_buffers();
164 static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev,
165 struct scatterlist *sg,
166 int nelems, int direction)
168 BUG_ON(direction == PCI_DMA_NONE);
170 #ifdef CONFIG_SWIOTLB
172 return swiotlb_sync_sg_for_cpu(&hwdev->dev,sg,nelems,direction);
174 flush_write_buffers();
177 static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev,
178 struct scatterlist *sg,
179 int nelems, int direction)
181 BUG_ON(direction == PCI_DMA_NONE);
183 #ifdef CONFIG_SWIOTLB
185 return swiotlb_sync_sg_for_device(&hwdev->dev,sg,nelems,direction);
187 flush_write_buffers();
190 /* The PCI address space does equal the physical memory
191 * address space. The networking and block device layers use
192 * this boolean for bounce buffer decisions
194 * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
195 * that an IOMMU is available.
197 #define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
199 /* We lie slightly when the IOMMU is forced to get the device to
200 use SAC instead of DAC. */
201 #define pci_dac_dma_supported(pci_dev, mask) (force_iommu ? 0 : 1)
204 static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
205 size_t size, int direction)
209 if (direction == PCI_DMA_NONE)
211 addr = virt_to_bus(ptr);
214 * This is gross, but what should I do.
215 * Unfortunately drivers do not test the return value of this.
217 if ((addr+size) & ~hwdev->dma_mask)
222 static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
223 size_t size, int direction)
225 if (direction == PCI_DMA_NONE)
230 static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
231 unsigned long offset, size_t size, int direction)
234 if (direction == PCI_DMA_NONE)
236 addr = page_to_pfn(page) * PAGE_SIZE + offset;
237 if ((addr+size) & ~hwdev->dma_mask)
242 /* pci_unmap_{page,single} is a nop so... */
243 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
244 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
245 #define pci_unmap_addr(PTR, ADDR_NAME) (0)
246 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
247 #define pci_unmap_len(PTR, LEN_NAME) (0)
248 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
250 /* Make physical memory consistent for a single
251 * streaming mode DMA translation after a transfer.
253 * If you perform a pci_map_single() but wish to interrogate the
254 * buffer using the cpu, yet do not wish to teardown the PCI dma
255 * mapping, you must call this function before doing so. At the
256 * next point you give the PCI dma address back to the card, you
257 * must first perform a pci_dma_sync_for_device, and then the
258 * device again owns the buffer.
260 static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev,
261 dma_addr_t dma_handle,
262 size_t size, int direction)
264 if (direction == PCI_DMA_NONE)
268 static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev,
269 dma_addr_t dma_handle,
270 size_t size, int direction)
272 if (direction == PCI_DMA_NONE)
274 flush_write_buffers();
277 /* Make physical memory consistent for a set of streaming
278 * mode DMA translations after a transfer.
280 * The same as pci_dma_sync_single_* but for a scatter-gather list,
281 * same rules and usage.
283 static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev,
284 struct scatterlist *sg,
285 int nelems, int direction)
287 if (direction == PCI_DMA_NONE)
291 static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev,
292 struct scatterlist *sg,
293 int nelems, int direction)
295 if (direction == PCI_DMA_NONE)
297 flush_write_buffers();
300 #define PCI_DMA_BUS_IS_PHYS 1
302 #define pci_dac_dma_supported(pci_dev, mask) 1
305 extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
306 int nents, int direction);
307 extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
308 int nents, int direction);
310 #define pci_unmap_page pci_unmap_single
312 /* Return whether the given PCI device DMA address mask can
313 * be supported properly. For example, if your device can
314 * only drive the low 24-bits during PCI bus mastering, then
315 * you would pass 0x00ffffff as the mask to this function.
317 extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
319 static inline dma64_addr_t
320 pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
322 return ((dma64_addr_t) page_to_phys(page) +
323 (dma64_addr_t) offset);
326 static inline struct page *
327 pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
329 return virt_to_page(__va(dma_addr));
332 static inline unsigned long
333 pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
335 return (dma_addr & ~PAGE_MASK);
339 pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
344 pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
346 flush_write_buffers();
349 #define HAVE_PCI_MMAP
350 extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
351 enum pci_mmap_state mmap_state, int write_combine);
353 static inline void pcibios_add_platform_entries(struct pci_dev *dev)
357 #endif /* __KERNEL__ */
359 /* generic pci stuff */
361 #include <asm-generic/pci.h>
362 #include <linux/dma-mapping.h>
365 #endif /* __x8664_PCI_H */