*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
unsigned long ioremap_bot;
int io_bat_index;
-#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
+#if defined(CONFIG_6xx)
#define HAVE_BATS 1
#endif
#ifdef HAVE_TLBCAM
extern unsigned int tlbcam_index;
-extern unsigned int num_tlbcam_entries;
extern unsigned long v_mapped_by_tlbcam(unsigned long va);
extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
#else /* !HAVE_TLBCAM */
#define p_mapped_by_tlbcam(x) (0UL)
#endif /* HAVE_TLBCAM */
-#ifdef CONFIG_44x
+#ifdef CONFIG_PTE_64BIT
/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
#define PGDIR_ORDER 1
#else
if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
- if (pte) {
- struct page *ptepage = virt_to_page(pte);
- ptepage->mapping = (void *) mm;
- ptepage->index = address & PMD_MASK;
- }
} else {
pte = (pte_t *)early_get_page();
if (pte)
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
- int flags = GFP_KERNEL | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
#endif
ptepage = alloc_pages(flags, 0);
- if (ptepage) {
- ptepage->mapping = (void *) mm;
- ptepage->index = address & PMD_MASK;
+ if (ptepage)
clear_highpage(ptepage);
- }
return ptepage;
}
#ifdef CONFIG_SMP
hash_page_sync();
#endif
- virt_to_page(pte)->mapping = NULL;
free_page((unsigned long)pte);
}
#ifdef CONFIG_SMP
hash_page_sync();
#endif
- ptepage->mapping = NULL;
__free_page(ptepage);
}
-#ifndef CONFIG_44x
+#ifndef CONFIG_PHYS_64BIT
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
-#else /* CONFIG_44x */
+#else /* CONFIG_PHYS_64BIT */
void __iomem *
ioremap64(unsigned long long addr, unsigned long size)
{
return ioremap64(addr64, size);
}
-#endif /* CONFIG_44x */
+#endif /* CONFIG_PHYS_64BIT */
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
*/
if ( mem_init_done && (p < virt_to_phys(high_memory)) )
{
- printk("__ioremap(): phys addr "PTE_FMT" is RAM lr %p\n", p,
+ printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
__builtin_return_address(0));
return NULL;
}
pte_t *pg;
int err = -ENOMEM;
- spin_lock(&init_mm.page_table_lock);
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
/* Use middle 10 bits of VA to index the second-level map */
- pg = pte_alloc_kernel(&init_mm, pd, va);
+ pg = pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
- set_pte(pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
+ set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
}
- spin_unlock(&init_mm.page_table_lock);
return err;
}
* the PTE pointer is unmodified if PTE is not found.
*/
int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
pgd_t *pgd;
pmd_t *pmd;
if (pte) {
retval = 1;
*ptep = pte;
+ if (pmdp)
+ *pmdp = pmd;
/* XXX caller needs to do pte_unmap, yuck */
}
}
mm = &init_mm;
pa = 0;
- if (get_pteptr(mm, addr, &pte)) {
+ if (get_pteptr(mm, addr, &pte, NULL)) {
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
pte_unmap(pte);
}