#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#define HAVE_BATS 1
#endif
+#if defined(CONFIG_FSL_BOOKE)
+#define HAVE_TLBCAM 1
+#endif
+
extern char etext[], _stext[];
#ifdef CONFIG_SMP
#define p_mapped_by_bats(x) (0UL)
#endif /* HAVE_BATS */
-#ifdef CONFIG_44x
+#ifdef HAVE_TLBCAM
+extern unsigned int tlbcam_index;
+extern unsigned long v_mapped_by_tlbcam(unsigned long va);
+extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
+#else /* !HAVE_TLBCAM */
+#define v_mapped_by_tlbcam(x) (0UL)
+#define p_mapped_by_tlbcam(x) (0UL)
+#endif /* HAVE_TLBCAM */
+
+#ifdef CONFIG_PTE_64BIT
/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
#define PGDIR_ORDER 1
#else
{
pgd_t *ret;
- if ((ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER)) != NULL)
- clear_pages(ret, PGDIR_ORDER);
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
return ret;
}
extern void *early_get_page(void);
if (mem_init_done) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
- if (pte) {
- struct page *ptepage = virt_to_page(pte);
- ptepage->mapping = (void *) mm;
- ptepage->index = address & PMD_MASK;
- }
- } else
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ } else {
pte = (pte_t *)early_get_page();
- if (pte)
- clear_page(pte);
+ if (pte)
+ clear_page(pte);
+ }
return pte;
}
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
- int flags = GFP_KERNEL | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
#endif
ptepage = alloc_pages(flags, 0);
- if (ptepage) {
- ptepage->mapping = (void *) mm;
- ptepage->index = address & PMD_MASK;
+ if (ptepage)
clear_highpage(ptepage);
- }
return ptepage;
}
#ifdef CONFIG_SMP
hash_page_sync();
#endif
- virt_to_page(pte)->mapping = NULL;
free_page((unsigned long)pte);
}
#ifdef CONFIG_SMP
hash_page_sync();
#endif
- ptepage->mapping = NULL;
__free_page(ptepage);
}
-#ifndef CONFIG_44x
-void *
+#ifndef CONFIG_PHYS_64BIT
+void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
-#else /* CONFIG_44x */
-void *
+#else /* CONFIG_PHYS_64BIT */
+void __iomem *
ioremap64(unsigned long long addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
-void *
+void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
return ioremap64(addr64, size);
}
-#endif /* CONFIG_44x */
+#endif /* CONFIG_PHYS_64BIT */
-void *
+void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
unsigned long v, i;
*/
if ( mem_init_done && (p < virt_to_phys(high_memory)) )
{
- printk("__ioremap(): phys addr "PTE_FMT" is RAM lr %p\n", p,
+ printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
__builtin_return_address(0));
return NULL;
}
if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
goto out;
+ if ((v = p_mapped_by_tlbcam(p)))
+ goto out;
+
if (mem_init_done) {
struct vm_struct *area;
area = get_vm_area(size, VM_IOREMAP);
}
out:
- return (void *) (v + ((unsigned long)addr & ~PAGE_MASK));
+ return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
-void iounmap(void *addr)
+void iounmap(volatile void __iomem *addr)
{
/*
* If mapped by BATs then there is nothing to do.
vunmap((void *) (PAGE_MASK & (unsigned long)addr));
}
+void __iomem *ioport_map(unsigned long port, unsigned int len)
+{
+ return (void __iomem *) (port + _IO_BASE);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+ /* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
int
map_page(unsigned long va, phys_addr_t pa, int flags)
{
pte_t *pg;
int err = -ENOMEM;
- spin_lock(&init_mm.page_table_lock);
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
/* Use middle 10 bits of VA to index the second-level map */
- pg = pte_alloc_kernel(&init_mm, pd, va);
+ pg = pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
- set_pte(pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
+ set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
}
- spin_unlock(&init_mm.page_table_lock);
return err;
}
/* is x a power of 2? */
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+/* is x a power of 4? */
+#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
+
/*
* Set up a mapping for a block of I/O.
* virt, phys, size must all be page-aligned.
}
#endif /* HAVE_BATS */
+#ifdef HAVE_TLBCAM
+ /*
+ * Use a CAM for this if possible...
+ */
+ if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
+ && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
+ settlbcam(tlbcam_index, virt, phys, size, flags, 0);
+ ++tlbcam_index;
+ return;
+ }
+#endif /* HAVE_TLBCAM */
+
/* No BATs available, put it in the page tables. */
for (i = 0; i < size; i += PAGE_SIZE)
map_page(virt + i, phys + i, flags);