#include <linux/config.h>
#include <linux/mm.h>
+#include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
-/* The following value needs to be tuned and probably scaled with the
- * cache size.
- */
-
-#define FLUSH_THRESHOLD 0x80000
+extern int parisc_cache_flush_threshold;
+void parisc_setup_cache_timing(void);
static inline void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
-#ifdef CONFIG_SMP
- flush_user_dcache_range_asm(start,end);
-#else
- if ((end - start) < FLUSH_THRESHOLD)
+ if ((end - start) < parisc_cache_flush_threshold)
flush_user_dcache_range_asm(start,end);
else
flush_data_cache();
-#endif
}
static inline void
flush_user_icache_range(unsigned long start, unsigned long end)
{
-#ifdef CONFIG_SMP
- flush_user_icache_range_asm(start,end);
-#else
- if ((end - start) < FLUSH_THRESHOLD)
+ if ((end - start) < parisc_cache_flush_threshold)
flush_user_icache_range_asm(start,end);
else
flush_instruction_cache();
-#endif
}
-extern void __flush_dcache_page(struct page *page);
-
-static inline void flush_dcache_page(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
-
- if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &page->flags);
- } else {
- __flush_dcache_page(page);
- }
-}
+extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) \
- spin_lock_irq(&(mapping)->tree_lock)
+ write_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
- spin_unlock_irq(&(mapping)->tree_lock)
+ write_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
+do { \
+ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+ memcpy(dst, src, len); \
+ flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
} while (0)
+
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+do { \
+ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+ memcpy(dst, src, len); \
+} while (0)
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
static inline int translation_exists(struct vm_area_struct *vma,
- unsigned long addr)
+ unsigned long addr, unsigned long pfn)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
- pte_t *pte;
+ pte_t pte;
if(pgd_none(*pgd))
return 0;
if(pmd_none(*pmd) || pmd_bad(*pmd))
return 0;
- pte = pte_offset_map(pmd, addr);
+ /* We cannot take the pte lock here: flush_cache_page is usually
+ * called with pte lock already held. Whereas flush_dcache_page
+ * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
+ * the vma itself is secure, but the pte might come or go racily.
+ */
+ pte = *pte_offset_map(pmd, addr);
+ /* But pte_unmap() does nothing on this architecture */
- /* The PA flush mappings show up as pte_none, but they're
- * valid none the less */
- if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
+ /* Filter out coincidental file entries and swap entries */
+ if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
return 0;
- return 1;
-}
+ return pte_pfn(pte) == pfn;
+}
/* Private function to flush a page from the cache of a non-current
* process. cr25 contains the Page Directory of the current user
}
static inline void
-flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
+flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
BUG_ON(!vma->vm_mm->context);
- if(likely(translation_exists(vma, vmaddr)))
+ if (likely(translation_exists(vma, vmaddr, pfn)))
__flush_cache_page(vma, vmaddr);
}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
#endif
+#endif /* _PARISC_CACHEFLUSH_H */
+