pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
do {
- pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+ pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
}
static int
-remap_area_pages(unsigned long start, unsigned long phys_addr,
+remap_area_pages(unsigned long start, unsigned long pfn,
unsigned long size, unsigned long flags)
{
unsigned long address = start;
unsigned long end = start + size;
+ unsigned long phys_addr = __pfn_to_phys(pfn);
int err = 0;
pgd_t * dir;
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
BUG_ON(address >= end);
- spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
if (!pmd) {
dir++;
} while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
flush_cache_vmap(start, end);
return err;
}
* mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
*/
void __iomem *
-__ioremap(unsigned long phys_addr, size_t size, unsigned long flags,
- unsigned long align)
+__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned long flags)
{
- void * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
+ unsigned long addr;
+ struct vm_struct * area;
+
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+ if (remap_area_pages(addr, pfn, size, flags)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+ return (void __iomem *) (offset + (char *)addr);
+}
+EXPORT_SYMBOL(__ioremap_pfn);
+
+void __iomem *
+__ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
+{
+ unsigned long last_addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys_addr);
- /* Don't allow wraparound or zero size */
+ /*
+ * Don't allow wraparound or zero size
+ */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
- * Mappings have to be page-aligned
+ * Page align the mapping size
*/
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr) - phys_addr;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
- vfree(addr);
- return NULL;
- }
- return (void __iomem *) (offset + (char *)addr);
+ return __ioremap_pfn(pfn, offset, size, flags);
}
EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *addr)
{
- vfree((void *) (PAGE_MASK & (unsigned long) addr));
+ vunmap((void *)(PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(__iounmap);
+
+#ifdef __io
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return __io(port);
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(ioport_unmap);
+#endif
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#include <linux/ioport.h>
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ if (flags & IORESOURCE_CACHEABLE)
+ return ioremap(start, len);
+ return ioremap_nocache(start, len);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+ if ((unsigned long)addr >= VMALLOC_START &&
+ (unsigned long)addr < VMALLOC_END)
+ iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+#endif