#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/module.h>
+#include <linux/io.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
-#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/proto.h>
+#define ISA_START_ADDRESS 0xa0000
+#define ISA_END_ADDRESS 0x100000
-static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
-{
- unsigned long end;
- unsigned long pfn;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- if (address >= end)
- BUG();
- pfn = phys_addr >> PAGE_SHIFT;
- do {
- if (!pte_none(*pte)) {
- printk("remap_area_pte: page already exists\n");
- BUG();
- }
- set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
- address += PAGE_SIZE;
- pfn++;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
- if (address >= end)
- BUG();
- do {
- pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
- if (!pte)
- return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-static int remap_area_pages(unsigned long address, unsigned long phys_addr,
- unsigned long size, unsigned long flags)
+/*
+ * Fix up the linear direct mapping of the kernel to avoid cache attribute
+ * conflicts.
+ */
+static int
+ioremap_change_attr(unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
{
- int error;
- pgd_t * dir;
- unsigned long end = address + size;
-
- phys_addr -= address;
- dir = pgd_offset_k(address);
- flush_cache_all();
- if (address >= end)
- BUG();
- spin_lock(&init_mm.page_table_lock);
- do {
- pmd_t *pmd;
- pmd = pmd_alloc(&init_mm, dir, address);
- error = -ENOMEM;
- if (!pmd)
- break;
- if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags))
- break;
- error = 0;
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
- flush_tlb_all();
- return error;
+ int err = 0;
+ if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
+ unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long vaddr = (unsigned long) __va(phys_addr);
+
+ /*
+ * Must use a address here and not struct page because the phys addr
+ * can be a in hole between nodes and not have an memmap entry.
+ */
+ err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
+ if (!err)
+ global_flush_tlb();
+ }
+ return err;
}
/*
- * Generic mapping function (not visible outside):
+ * Generic mapping function
*/
/*
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
void * addr;
struct vm_struct * area;
unsigned long offset, last_addr;
+ pgprot_t pgprot;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= 0xA0000 && last_addr < 0x100000)
- return phys_to_virt(phys_addr);
+ if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+ return (__force void __iomem *)phys_to_virt(phys_addr);
+#ifdef CONFIG_FLATMEM
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- if (phys_addr < virt_to_phys(high_memory)) {
-#ifndef CONFIG_DISCONTIGMEM
+ if (last_addr < virt_to_phys(high_memory)) {
char *t_addr, *t_end;
struct page *page;
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
if(!PageReserved(page))
return NULL;
-#endif
}
+#endif
+ pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL
+ | _PAGE_DIRTY | _PAGE_ACCESSED | flags);
/*
* Mappings have to be page-aligned
*/
/*
* Ok, go for it..
*/
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
if (!area)
return NULL;
+ area->phys_addr = phys_addr;
addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+ if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+ phys_addr, pgprot)) {
+ remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
+ return NULL;
+ }
+ if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
+ area->flags &= 0xffffff;
vunmap(addr);
return NULL;
}
- return (void *) (offset + (char *)addr);
+ return (__force void __iomem *) (offset + (char *)addr);
}
+EXPORT_SYMBOL(__ioremap);
/**
* ioremap_nocache - map bus memory into CPU space
* Must be freed with iounmap.
*/
-void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
- void *p = __ioremap(phys_addr, size, _PAGE_PCD);
- if (!p)
- return p;
-
- if (phys_addr + size < virt_to_phys(high_memory)) {
- struct page *ppage = virt_to_page(__va(phys_addr));
- unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- BUG_ON(phys_addr+size > (unsigned long)high_memory);
- BUG_ON(phys_addr + size < phys_addr);
-
- if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
- iounmap(p);
- p = NULL;
- }
- global_flush_tlb();
- }
-
- return p;
+ return __ioremap(phys_addr, size, _PAGE_PCD);
}
+EXPORT_SYMBOL(ioremap_nocache);
-void iounmap(void *addr)
+/**
+ * iounmap - Free a IO remapping
+ * @addr: virtual address from ioremap_*
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
+void iounmap(volatile void __iomem *addr)
{
- struct vm_struct *p;
+ struct vm_struct *p, *o;
+
if (addr <= high_memory)
return;
- p = remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
- if (!p) {
- printk("__iounmap: bad address %p\n", addr);
+ if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
+ addr < phys_to_virt(ISA_END_ADDRESS))
return;
- }
-
- if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
- change_page_attr(virt_to_page(__va(p->phys_addr)),
- p->size >> PAGE_SHIFT,
- PAGE_KERNEL);
- global_flush_tlb();
- }
+
+ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
+ /* Use the vm area unlocked, assuming the caller
+ ensures there isn't another iounmap for the same address
+ in parallel. Reuse of the virtual address is prevented by
+ leaving it in the global lists until we're done with it.
+ cpa takes care of the direct mappings. */
+ read_lock(&vmlist_lock);
+ for (p = vmlist; p; p = p->next) {
+ if (p->addr == addr)
+ break;
+ }
+ read_unlock(&vmlist_lock);
+
+ if (!p) {
+ printk("iounmap: bad address %p\n", addr);
+ dump_stack();
+ return;
+ }
+
+ /* Reset the direct mapping. Can block */
+ if (p->flags >> 20)
+ ioremap_change_attr(p->phys_addr, p->size, 0);
+
+ /* Finally remove it */
+ o = remove_vm_area((void *)addr);
+ BUG_ON(p != o || o == NULL);
kfree(p);
}
+EXPORT_SYMBOL(iounmap);
+