ctxd_t *srmmu_context_table;
int viking_mxcc_present;
-static spinlock_t srmmu_context_spinlock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(srmmu_context_spinlock);
int is_hypersparc;
static unsigned long srmmu_pte_pfn(pte_t pte)
{
if (srmmu_device_memory(pte_val(pte))) {
- /* XXX Anton obviously had something in mind when he did this.
- * But what?
+ /* Just return something that will cause
+ * pfn_valid() to return false. This makes
+ * copy_one_pte() to just directly copy to
+ * PTE over.
*/
- /* return (struct page *)~0; */
- BUG();
+ return ~0UL;
}
return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
}
static inline int srmmu_pte_present(pte_t pte)
{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
+static inline int srmmu_pte_read(pte_t pte)
+{ return !(pte_val(pte) & SRMMU_NOREAD); }
+
static inline void srmmu_pte_clear(pte_t *ptep)
{ srmmu_set_pte(ptep, __pte(0)); }
static inline void srmmu_pmd_clear(pmd_t *pmdp) {
int i;
- for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++)
+ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
}
int i;
ptp = __nocache_pa((unsigned long) ptep) >> 4;
- for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
+ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
- ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
+ ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
}
}
int i;
ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
- for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
+ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
- ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
+ ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
}
}
{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
/* to find an entry in a top-level page table... */
-extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
/* Find an entry in the second-level page table.. */
static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) srmmu_pgd_page(*dir) +
- ((address >> SRMMU_PMD_SHIFT_SOFT) & (SRMMU_PTRS_PER_PMD_SOFT - 1));
+ ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
}
/* Find an entry in the third-level page table.. */
pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
return (pte_t *) pte +
- ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE_SOFT - 1));
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
static unsigned long srmmu_swp_type(swp_entry_t entry)
static pte_t *
srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- return (pte_t *)srmmu_get_nocache(SRMMU_PTE_SZ_SOFT, SRMMU_PTE_SZ_SOFT);
+ return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
static struct page *
static void srmmu_free_pte_fast(pte_t *pte)
{
- srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_SZ_SOFT);
+ srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
}
static void srmmu_pte_free(struct page *pte)
BUG();
p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
p = (unsigned long) __nocache_va(p); /* Nocached virtual */
- srmmu_free_nocache(p, SRMMU_PTE_SZ_SOFT);
+ srmmu_free_nocache(p, PTE_SIZE);
}
/*
a = 0x20; b = 0x40; c = 0x60;
d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
- start &= SRMMU_PMD_MASK;
+ start &= SRMMU_REAL_PMD_MASK;
while(start < end) {
faddr = (start + (0x10000 - 0x100));
goto inside;
"r" (a), "r" (b), "r" (c), "r" (d),
"r" (e), "r" (f), "r" (g));
} while (faddr != start);
- start += SRMMU_PMD_SIZE;
+ start += SRMMU_REAL_PMD_SIZE;
}
srmmu_set_context(octx);
local_irq_restore(flags);
extern void viking_flush_cache_mm(struct mm_struct *mm);
extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
-extern void viking_flush_cache_page(struct vm_area_struct *vma,
- unsigned long page);
+extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void viking_flush_page_to_ram(unsigned long page);
extern void viking_flush_page_for_dma(unsigned long page);
extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
}
pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
- ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
- SRMMU_PTE_SZ_SOFT);
+ ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
- memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT);
+ memset(__nocache_fix(ptep), 0, PTE_SIZE);
srmmu_pmd_set(__nocache_fix(pmdp), ptep);
}
- if (start > (0xffffffffUL - SRMMU_PMD_SIZE_SOFT))
+ if (start > (0xffffffffUL - PMD_SIZE))
break;
- start = (start + SRMMU_PMD_SIZE_SOFT) & SRMMU_PMD_MASK_SOFT;
+ start = (start + PMD_SIZE) & PMD_MASK;
}
}
}
pmdp = srmmu_pmd_offset(pgdp, start);
if(srmmu_pmd_none(*pmdp)) {
- ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
- SRMMU_PTE_SZ_SOFT);
+ ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
+ PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
- memset(ptep, 0, SRMMU_PTE_SZ_SOFT);
+ memset(ptep, 0, PTE_SIZE);
srmmu_pmd_set(pmdp, ptep);
}
- if (start > (0xffffffffUL - SRMMU_PMD_SIZE_SOFT))
+ if (start > (0xffffffffUL - PMD_SIZE))
break;
- start = (start + SRMMU_PMD_SIZE_SOFT) & SRMMU_PMD_MASK_SOFT;
+ start = (start + PMD_SIZE) & PMD_MASK;
}
}
/* A red snapper, see what it really is. */
what = 0;
- if(!(start & ~(SRMMU_PMD_MASK))) {
- if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
+ if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
+ if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
what = 1;
}
}
pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
- ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
- SRMMU_PTE_SZ_SOFT);
+ ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
+ PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
- memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT);
+ memset(__nocache_fix(ptep), 0, PTE_SIZE);
srmmu_pmd_set(__nocache_fix(pmdp), ptep);
}
if(what == 1) {
* good hardware PTE piece. Alternatives seem worse.
*/
unsigned int x; /* Index of HW PMD in soft cluster */
- x = (start >> SRMMU_PMD_SHIFT) & 15;
+ x = (start >> PMD_SHIFT) & 15;
*(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
- start += SRMMU_PMD_SIZE;
+ start += SRMMU_REAL_PMD_SIZE;
continue;
}
ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
flush_cache_all();
srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
+#ifdef CONFIG_SMP
+ /* Stop from hanging here... */
+ local_flush_tlb_all();
+#else
flush_tlb_all();
+#endif
poke_srmmu();
#ifdef CONFIG_SUN_IO
zones_size[ZONE_HIGHMEM] = npages;
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
- free_area_init_node(0, &contig_page_data, NULL, zones_size,
+ free_area_init_node(0, &contig_page_data, zones_size,
pfn_base, zholes_size);
- mem_map = contig_page_data.node_mem_map;
}
}
max_size = vac_cache_size;
if(vac_line_size < min_line_size)
min_line_size = vac_line_size;
+ //FIXME: cpus not contiguous!!
cpu++;
if (cpu >= NR_CPUS || !cpu_online(cpu))
break;
static void __init init_hypersparc(void)
{
srmmu_name = "ROSS HyperSparc";
+ srmmu_modtype = HyperSparc;
init_vac_layout();
static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
-
- FLUSH_BEGIN(mm)
+ FLUSH_BEGIN(vma->vm_mm)
flush_user_windows();
turbosparc_idflash_clear();
FLUSH_END
static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
-
- FLUSH_BEGIN(mm)
+ FLUSH_BEGIN(vma->vm_mm)
srmmu_flush_whole_tlb();
FLUSH_END
}
return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
}
+static pgprot_t srmmu_pgprot_noncached(pgprot_t prot)
+{
+ prot &= ~__pgprot(SRMMU_CACHE);
+
+ return prot;
+}
+
/* Load up routines and constants for sun4m and sun4d mmu */
void __init ld_mmu_srmmu(void)
{
extern void ld_mmu_iounit(void);
extern void ___xchg32_sun4md(void);
- BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT_SOFT);
- BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE_SOFT);
- BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK_SOFT);
-
BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
- BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE_SOFT);
- BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD_SOFT);
+ BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
- pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
/* Functions */
+ BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM);
#ifndef CONFIG_SMP
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
#endif
BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
+ BTFIXUPSET_CALL(pte_read, srmmu_pte_read, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);