#include <asm/spitfire.h>
#include <asm/sections.h>
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
extern void device_scan(void);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
#endif
#if (L1DCACHE_SIZE > PAGE_SIZE)
- __flush_dcache_page(page->virtual,
+ __flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
#else
if (page_mapping(page) != NULL &&
tlb_type == spitfire)
- __flush_icache_page(__pa(page->virtual));
+ __flush_icache_page(__pa(page_address(page)));
#endif
}
#define dcache_dirty_cpu(page) \
(((page)->flags >> 24) & (NR_CPUS - 1UL))
-static __inline__ void set_dcache_dirty(struct page *page)
+static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
{
- unsigned long mask = smp_processor_id();
+ unsigned long mask = this_cpu;
unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
mask = (mask << 24) | (1UL << PG_dcache_dirty);
__asm__ __volatile__("1:\n\t"
(page = pfn_to_page(pfn), page_mapping(page)) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
+ int this_cpu = get_cpu();
/* This is just to optimize away some function calls
* in the SMP case.
*/
- if (cpu == smp_processor_id())
+ if (cpu == this_cpu)
flush_dcache_page_impl(page);
else
smp_flush_dcache_page_impl(page, cpu);
clear_dcache_dirty_cpu(page, cpu);
+
+ put_cpu();
}
if (get_thread_fault_code())
__update_mmu_cache(vma->vm_mm->context & TAG_CONTEXT_BITS,
struct address_space *mapping = page_mapping(page);
int dirty = test_bit(PG_dcache_dirty, &page->flags);
int dirty_cpu = dcache_dirty_cpu(page);
+ int this_cpu = get_cpu();
if (mapping && !mapping_mapped(mapping)) {
if (dirty) {
- if (dirty_cpu == smp_processor_id())
- return;
+ if (dirty_cpu == this_cpu)
+ goto out;
smp_flush_dcache_page_impl(page, dirty_cpu);
}
- set_dcache_dirty(page);
+ set_dcache_dirty(page, this_cpu);
} else {
/* We could delay the flush for the !page_mapping
* case too. But that case is for exec env/arg
*/
flush_dcache_page_impl(page);
}
-}
-
-/* When shared+writable mmaps of files go away, we lose all dirty
- * page state, so we have to deal with D-cache aliasing here.
- *
- * This code relies on the fact that flush_cache_range() is always
- * called for an area composed by a single VMA. It also assumes that
- * the MM's page_table_lock is held.
- */
-static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
-{
- unsigned long offset;
- pte_t *ptep;
-
- if (pmd_none(*pmd))
- return;
- ptep = pte_offset_map(pmd, address);
- offset = address & ~PMD_MASK;
- if (offset + size > PMD_SIZE)
- size = PMD_SIZE - offset;
- size &= PAGE_MASK;
- for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
- pte_t pte = *ptep;
-
- if (pte_none(pte))
- continue;
-
- if (pte_present(pte) && pte_dirty(pte)) {
- struct page *page;
- unsigned long pgaddr, uaddr;
- unsigned long pfn = pte_pfn(pte);
-
- if (!pfn_valid(pfn))
- continue;
- page = pfn_to_page(pfn);
- if (PageReserved(page) || !page_mapping(page))
- continue;
- pgaddr = (unsigned long) page_address(page);
- uaddr = address + offset;
- if ((pgaddr ^ uaddr) & (1 << 13))
- flush_dcache_page_all(mm, page);
- }
- }
- pte_unmap(ptep - 1);
-}
-static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
-{
- pmd_t *pmd;
- unsigned long end;
-
- if (pgd_none(*dir))
- return;
- pmd = pmd_offset(dir, address);
- end = address + size;
- if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
- end = ((address + PGDIR_SIZE) & PGDIR_MASK);
- do {
- flush_cache_pte_range(mm, pmd, address, end - address);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- pgd_t *dir = pgd_offset(mm, start);
-
- if (mm == current->mm)
- flushw_user();
-
- if (vma->vm_file == NULL ||
- ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))
- return;
-
- do {
- flush_cache_pmd_range(mm, dir, start, end - start);
- start = (start + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (start && (start < end));
+out:
+ put_cpu();
}
void flush_icache_range(unsigned long start, unsigned long end)
}
}
+unsigned long page_to_pfn(struct page *page)
+{
+ return (unsigned long) ((page - mem_map) + pfn_base);
+}
+
+struct page *pfn_to_page(unsigned long pfn)
+{
+ return (mem_map + (pfn - pfn_base));
+}
+
void show_mem(void)
{
printk("Mem-info:\n");
show_free_areas();
- printk("Free swap: %6dkB\n",
+ printk("Free swap: %6ldkB\n",
nr_swap_pages << (PAGE_SHIFT-10));
printk("%ld pages of RAM\n", num_physpages);
printk("%d free pages\n", nr_free_pages());
#else
#define DC_ALIAS_SHIFT 0
#endif
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
struct page *page;
unsigned long color;
* prom_set_traptable() call, and OBP is allocating a scratchpad
* for saving client program register state etc.
*/
-void __init sort_memlist(struct linux_mlist_p1275 *thislist)
+static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
{
int swapi = 0;
int i, mitr;