vserver 1.9.3
[linux-2.6.git] / include / asm-s390 / pgtable.h
index 1168f86..c74a47e 100644 (file)
@@ -580,12 +580,16 @@ static inline void ptep_mkdirty(pte_t *ptep)
 
 static inline void
 ptep_establish(struct vm_area_struct *vma, 
-              unsigned long address, pte_t *ptep, pte_t entry)
+              unsigned long address, pte_t *ptep,
+              pte_t entry)
 {
        ptep_clear_flush(vma, address, ptep);
        set_pte(ptep, entry);
 }
 
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+       ptep_establish(__vma, __address, __ptep, __entry)
+
 /*
  * Test and clear dirty bit in storage key.
  * We can't clear the changed bit atomically. This is a potential
@@ -593,17 +597,13 @@ ptep_establish(struct vm_area_struct *vma,
  * should therefore only be called if it is not mapped in any
  * address space.
  */
-#define page_test_and_clear_dirty(page)                                          \
+#define page_test_and_clear_dirty(_page)                                 \
 ({                                                                       \
-       struct page *__page = (page);                                     \
+       struct page *__page = (_page);                                    \
        unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);  \
-       int __skey;                                                       \
-       asm volatile ("iske %0,%1" : "=d" (__skey) : "a" (__physpage));   \
-       if (__skey & _PAGE_CHANGED) {                                     \
-               asm volatile ("sske %0,%1"                                \
-                             : : "d" (__skey & ~_PAGE_CHANGED),          \
-                                 "a" (__physpage));                      \
-       }                                                                 \
+       int __skey = page_get_storage_key(__physpage);                    \
+       if (__skey & _PAGE_CHANGED)                                       \
+               page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\
        (__skey & _PAGE_CHANGED);                                         \
 })
 
@@ -650,10 +650,11 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
        __pte;                                                            \
 })
 
-#define arch_set_page_uptodate(__page)                                   \
-       do {                                                              \
-               asm volatile ("sske %0,%1" : : "d" (0),                   \
-                             "a" (__pa((__page-mem_map) << PAGE_SHIFT)));\
+#define SetPageUptodate(_page) \
+       do {                                                                  \
+               struct page *__page = (_page);                                \
+               if (!test_and_set_bit(PG_uptodate, &__page->flags))           \
+                       page_test_and_clear_dirty(_page);                     \
        } while (0)
 
 #ifdef __s390x__
@@ -760,8 +761,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-typedef pte_t *pte_addr_t;
-
 #ifndef __s390x__
 # define PTE_FILE_MAX_BITS     26
 #else /* __s390x__ */
@@ -784,11 +783,8 @@ typedef pte_t *pte_addr_t;
  */
 #define pgtable_cache_init()   do { } while (0)
 
-#ifdef __s390x__
-# define HAVE_ARCH_UNMAPPED_AREA
-#endif /* __s390x__ */
-
 #define __HAVE_ARCH_PTEP_ESTABLISH
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY