X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ppc%2Fpci.h;h=9d162028dab92809d4d6b49bc9fce254ccc3932b;hb=refs%2Fheads%2Fvserver;hp=2511b4327e4e6eab4e25344dd6a6d0fefcb4fa80;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h index 2511b4327..9d162028d 100644 --- a/include/asm-ppc/pci.h +++ b/include/asm-ppc/pci.h @@ -9,6 +9,7 @@ #include #include #include +#include struct pci_dev; @@ -23,9 +24,9 @@ struct pci_dev; * Set this to 1 if you want the kernel to re-assign all PCI * bus numbers */ -extern int pci_assign_all_busses; +extern int pci_assign_all_buses; -#define pcibios_assign_all_busses() (pci_assign_all_busses) +#define pcibios_assign_all_busses() (pci_assign_all_buses) #define pcibios_scan_all_fns(a, b) 0 #define PCIBIOS_MIN_IO 0x1000 @@ -36,7 +37,7 @@ extern inline void pcibios_set_master(struct pci_dev *dev) /* No special bus mastering setup handling */ } -extern inline void pcibios_penalize_isa_irq(int irq) +extern inline void pcibios_penalize_isa_irq(int irq, int active) { /* We don't do dynamic PCI IRQ allocation */ } @@ -54,61 +55,32 @@ extern unsigned long phys_to_bus(unsigned long pa); extern unsigned long pci_phys_to_bus(unsigned long pa, int busnr); extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr); -/* - * Dynamic DMA Mapping stuff - * Originally stolen from i386 by ajoshi and updated by paulus - * Non-consistent cache support by Dan Malek - */ - /* The PCI address space does equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ -#define PCI_DMA_BUS_IS_PHYS (1) - -/* Allocate and map kernel buffer using consistent mode DMA for a device. - * hwdev should be valid struct pci_dev pointer for PCI devices, - * NULL for PCI-like buses (ISA, EISA). - * Returns non-NULL cpu-view pointer to the buffer if successful and - * sets *dma_addrp to the pci side dma address as well, else *dma_addrp - * is undefined. - */ -extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle); +#define PCI_DMA_BUS_IS_PHYS (1) -/* Free and unmap a consistent DMA buffer. - * cpu_addr is what was returned from pci_alloc_consistent, - * size must be the same as what as passed into pci_alloc_consistent, - * and likewise dma_addr must be the same as what *dma_addrp was set to. - * - * References to the memory and mappings associated with cpu_addr/dma_addr - * past this call are illegal. - */ -extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -/* Map a single buffer of the indicated size for DMA in streaming mode. - * The 32-bit bus address to use is returned. - * - * Once the device is given the dma address, the device owns this memory - * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. +#ifdef CONFIG_NOT_COHERENT_CACHE +/* + * pci_unmap_{page,single} are NOPs but pci_dma_sync_single_for_cpu() + * and so on are not, so... */ -static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, - size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - - consistent_sync(ptr, size, direction); - return virt_to_bus(ptr); -} - -static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, - size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* nothing to do */ -} +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) + +#else /* coherent */ /* pci_unmap_{page,single} is a nop so... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) @@ -118,141 +90,17 @@ static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, #define pci_unmap_len(PTR, LEN_NAME) (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) -/* - * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical - * to pci_map_single, but takes a struct page instead of a virtual address - */ -static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, - int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - consistent_sync_page(page, offset, size, direction); - return (page - mem_map) * PAGE_SIZE + PCI_DRAM_OFFSET + offset; -} - -static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, - size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* Nothing to do */ -} - -/* Map a set of buffers described by scatterlist in streaming - * mode for DMA. This is the scather-gather version of the - * above pci_map_single interface. Here the scatter gather list - * elements are each tagged with the appropriate dma address - * and length. They are obtained via sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for pci_map_single are - * the same here. - */ -static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - int i; - - BUG_ON(direction == PCI_DMA_NONE); - - /* - * temporary 2.4 hack - */ - for (i = 0; i < nents; i++, sg++) { - BUG_ON(!sg->page); - consistent_sync_page(sg->page, sg->offset, - sg->length, direction); - sg->dma_address = page_to_bus(sg->page) + sg->offset; - } - - return nents; -} - -/* Unmap a set of streaming mode DMA translations. - * Again, cpu read rules concerning calls here are the same as for - * pci_unmap_single() above. - */ -static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* nothing to do */ -} - -/* Make physical memory consistent for a single - * streaming mode DMA translation after a transfer. - * - * If you perform a pci_map_single() but wish to interrogate the - * buffer using the cpu, yet do not wish to teardown the PCI dma - * mapping, you must call this function before doing so. At the - * next point you give the PCI dma address back to the card, you - * must first perform a pci_dma_sync_for_device, and then the device - * again owns the buffer. - */ -static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, - dma_addr_t dma_handle, - size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); +#endif /* CONFIG_NOT_COHERENT_CACHE */ - consistent_sync(bus_to_virt(dma_handle), size, direction); -} - -static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev, - dma_addr_t dma_handle, - size_t size, int direction) +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) { - BUG_ON(direction == PCI_DMA_NONE); - - consistent_sync(bus_to_virt(dma_handle), size, direction); -} - -/* Make physical memory consistent for a set of streaming - * mode DMA translations after a transfer. - * - * The same as pci_dma_sync_single_for_* but for a scatter-gather list, - * same rules and usage. - */ -static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, - struct scatterlist *sg, - int nelems, int direction) -{ - int i; - - BUG_ON(direction == PCI_DMA_NONE); - - for (i = 0; i < nelems; i++, sg++) - consistent_sync_page(sg->page, sg->offset, - sg->length, direction); -} - -static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, - struct scatterlist *sg, - int nelems, int direction) -{ - int i; - - BUG_ON(direction == PCI_DMA_NONE); - - for (i = 0; i < nelems; i++, sg++) - consistent_sync_page(sg->page, sg->offset, - sg->length, direction); -} - -/* Return whether the given PCI device DMA address mask can - * be supported properly. For example, if your device can - * only drive the low 24-bits during PCI bus mastering, then - * you would pass 0x00ffffff as the mask to this function. - */ -static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) -{ - return 1; + *strat = PCI_DMA_BURST_INFINITY; + *strategy_parameter = ~0UL; } +#endif /* * At present there are very few 32-bit PPC machines that can have @@ -260,48 +108,12 @@ static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) */ #define pci_dac_dma_supported(pci_dev, mask) (0) -static inline dma64_addr_t -pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) -{ - return (dma64_addr_t) page_to_bus(page) + offset; -} - -static inline struct page * -pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) -{ - return mem_map + (unsigned long)(dma_addr >> PAGE_SHIFT); -} - -static inline unsigned long -pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) -{ - return (dma_addr & ~PAGE_MASK); -} - -static inline void -pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) -{ - /* Nothing to do. */ -} - -static inline void -pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) -{ - /* Nothing to do. */ -} - -static inline int pci_dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - /* Return the index of the PCI controller for device PDEV. */ #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index /* Set the name of the bus as it appears in /proc/bus/pci */ -static inline int pci_name_bus(char *name, struct pci_bus *bus) +static inline int pci_proc_domain(struct pci_bus *bus) { - sprintf(name, "%02x", bus->number); return 0; } @@ -316,8 +128,37 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, struct resource *res); +extern void +pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region); + +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + extern void pcibios_add_platform_entries(struct pci_dev *dev); +struct file; +extern pgprot_t pci_phys_mem_access_prot(struct file *file, + unsigned long pfn, + unsigned long size, + pgprot_t prot); + +#define HAVE_ARCH_PCI_RESOURCE_TO_USER +extern void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end); + + #endif /* __KERNEL__ */ #endif /* __PPC_PCI_H */