1 #ifndef _X86_64_PGTABLE_H
2 #define _X86_64_PGTABLE_H
5 * This file contains the functions and defines necessary to modify and use
6 * the x86-64 page table tree.
8 #include <asm/processor.h>
9 #include <asm/fixmap.h>
10 #include <asm/bitops.h>
11 #include <linux/threads.h>
12 #include <linux/sched.h>
15 #include <asm/hypervisor.h>
17 extern pud_t level3_user_pgt[512];
18 extern pud_t init_level4_user_pgt[];
20 extern void xen_init_pt(void);
22 #define virt_to_ptep(__va) \
24 pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
25 pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
26 pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
27 pte_offset_kernel(__pmd, (unsigned long)(__va)); \
30 #define arbitrary_virt_to_machine(__va) \
32 maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
33 m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
37 extern pud_t level3_kernel_pgt[512];
38 extern pud_t level3_physmem_pgt[512];
39 extern pud_t level3_ident_pgt[512];
40 extern pmd_t level2_kernel_pgt[512];
41 extern pgd_t init_level4_pgt[];
42 extern pgd_t boot_level4_pgt[];
43 extern unsigned long __supported_pte_mask;
45 #define swapper_pg_dir init_level4_pgt
47 extern void paging_init(void);
48 extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
51 * ZERO_PAGE is a global shared page that is always zero: used
52 * for zero-mapped memory areas etc..
54 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
55 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
58 * PGDIR_SHIFT determines what a top-level page table entry can map
60 #define PGDIR_SHIFT 39
61 #define PTRS_PER_PGD 512
67 #define PTRS_PER_PUD 512
70 * PMD_SHIFT determines the size of the area a middle-level
74 #define PTRS_PER_PMD 512
77 * entries per page directory level
79 #define PTRS_PER_PTE 512
81 #define pte_ERROR(e) \
82 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
83 #define pmd_ERROR(e) \
84 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
85 #define pud_ERROR(e) \
86 printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
87 #define pgd_ERROR(e) \
88 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
90 #define pgd_none(x) (!pgd_val(x))
91 #define pud_none(x) (!pud_val(x))
93 #define set_pte_batched(pteptr, pteval) \
94 queue_l1_entry_update(pteptr, (pteval))
96 static inline void set_pte(pte_t *dst, pte_t val)
100 #define set_pte_at(_mm,addr,ptep,pteval) do { \
101 if (((_mm) != current->mm && (_mm) != &init_mm) || \
102 HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
103 set_pte((ptep), (pteval)); \
106 #define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
108 #define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
110 static inline void pud_clear (pud_t *pud)
112 set_pud(pud, __pud(0));
115 #define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
117 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
119 static inline void pgd_clear (pgd_t * pgd)
121 set_pgd(pgd, __pgd(0));
122 set_pgd(__user_pgd(pgd), __pgd(0));
126 * A note on implementation of this atomic 'get-and-clear' operation.
127 * This is actually very simple because Xen Linux can only run on a single
128 * processor. Therefore, we cannot race other processors setting the 'accessed'
129 * or 'dirty' bits on a page-table entry.
130 * Even if pages are shared between domains, that is not a problem because
131 * each domain will have separate page tables, with their own versions of
132 * accessed & dirty state.
134 #define ptep_get_and_clear(mm,addr,xp) __pte_ma(xchg(&(xp)->pte, 0))
137 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
141 set_pte(xp, __pte_ma(0));
148 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
155 pte = ptep_get_and_clear(mm, addr, ptep);
160 #define pte_same(a, b) ((a).pte == (b).pte)
162 #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
164 #define PMD_SIZE (1UL << PMD_SHIFT)
165 #define PMD_MASK (~(PMD_SIZE-1))
166 #define PUD_SIZE (1UL << PUD_SHIFT)
167 #define PUD_MASK (~(PUD_SIZE-1))
168 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
169 #define PGDIR_MASK (~(PGDIR_SIZE-1))
171 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
172 #define FIRST_USER_ADDRESS 0
175 #define MAXMEM 0x3fffffffffffUL
176 #define VMALLOC_START 0xffffc20000000000UL
177 #define VMALLOC_END 0xffffe1ffffffffffUL
178 #define MODULES_VADDR 0xffffffff88000000UL
179 #define MODULES_END 0xfffffffffff00000UL
180 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
182 #define _PAGE_BIT_PRESENT 0
183 #define _PAGE_BIT_RW 1
184 #define _PAGE_BIT_USER 2
185 #define _PAGE_BIT_PWT 3
186 #define _PAGE_BIT_PCD 4
187 #define _PAGE_BIT_ACCESSED 5
188 #define _PAGE_BIT_DIRTY 6
189 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
190 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
191 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
193 #define _PAGE_PRESENT 0x001
194 #define _PAGE_RW 0x002
195 #define _PAGE_USER 0x004
196 #define _PAGE_PWT 0x008
197 #define _PAGE_PCD 0x010
198 #define _PAGE_ACCESSED 0x020
199 #define _PAGE_DIRTY 0x040
200 #define _PAGE_PSE 0x080 /* 2MB page */
201 #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
202 #define _PAGE_GLOBAL 0x100 /* Global TLB entry */
204 #define _PAGE_PROTNONE 0x080 /* If not present */
205 #define _PAGE_NX (1UL<<_PAGE_BIT_NX)
207 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
208 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
210 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
212 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
213 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
214 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
215 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
216 #define PAGE_COPY PAGE_COPY_NOEXEC
217 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
218 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
219 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
220 #define __PAGE_KERNEL \
221 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
222 #define __PAGE_KERNEL_EXEC \
223 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
224 #define __PAGE_KERNEL_NOCACHE \
225 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
226 #define __PAGE_KERNEL_RO \
227 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
228 #define __PAGE_KERNEL_VSYSCALL \
229 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
230 #define __PAGE_KERNEL_VSYSCALL_NOCACHE \
231 (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
232 #define __PAGE_KERNEL_LARGE \
233 (__PAGE_KERNEL | _PAGE_PSE)
234 #define __PAGE_KERNEL_LARGE_EXEC \
235 (__PAGE_KERNEL_EXEC | _PAGE_PSE)
238 * We don't support GLOBAL page in xenolinux64
240 #define MAKE_GLOBAL(x) __pgprot((x))
242 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
243 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
244 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
245 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
246 #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
247 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
248 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
249 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
252 #define __P000 PAGE_NONE
253 #define __P001 PAGE_READONLY
254 #define __P010 PAGE_COPY
255 #define __P011 PAGE_COPY
256 #define __P100 PAGE_READONLY_EXEC
257 #define __P101 PAGE_READONLY_EXEC
258 #define __P110 PAGE_COPY_EXEC
259 #define __P111 PAGE_COPY_EXEC
261 #define __S000 PAGE_NONE
262 #define __S001 PAGE_READONLY
263 #define __S010 PAGE_SHARED
264 #define __S011 PAGE_SHARED
265 #define __S100 PAGE_READONLY_EXEC
266 #define __S101 PAGE_READONLY_EXEC
267 #define __S110 PAGE_SHARED_EXEC
268 #define __S111 PAGE_SHARED_EXEC
270 static inline unsigned long pgd_bad(pgd_t pgd)
272 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
275 static inline unsigned long pud_bad(pud_t pud)
277 return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
280 static inline unsigned long pmd_bad(pmd_t pmd)
282 return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
285 #define pte_none(x) (!(x).pte)
286 #define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
287 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
289 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
291 #define pte_page(x) pfn_to_page(pte_pfn(x))
292 #define pte_pfn(x) mfn_to_local_pfn(pte_mfn(x))
293 #define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
295 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
298 (pte).pte = (pfn_to_mfn(page_nr) << PAGE_SHIFT);
299 (pte).pte |= pgprot_val(pgprot);
300 (pte).pte &= __supported_pte_mask;
305 * The following only work if pte_present() is true.
306 * Undefined behaviour if not..
308 #define __pte_val(x) ((x).pte)
310 #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
311 static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
312 static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
313 static inline int pte_exec(pte_t pte) { return !(__pte_val(pte) & _PAGE_NX); }
314 static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
315 static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
316 static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
317 static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
318 static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
320 static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
321 static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
322 static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
323 static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
324 static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
325 static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
326 static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) &= ~_PAGE_NX; return pte; }
327 static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
328 static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
329 static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
330 static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
331 static inline pte_t pte_clrhuge(pte_t pte) { __pte_val(pte) &= ~_PAGE_PSE; return pte; }
333 struct vm_area_struct;
335 static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
338 int ret = pte_dirty(pte);
340 set_pte(ptep, pte_mkclean(pte));
344 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
347 int ret = pte_young(pte);
349 set_pte(ptep, pte_mkold(pte));
353 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
357 set_pte(ptep, pte_wrprotect(pte));
361 * Macro to mark a page protection value as "uncacheable".
363 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
365 static inline int pmd_large(pmd_t pte) {
366 return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
371 * Conversion functions: convert a page and protection to a page entry,
372 * and a page entry and page directory to the page they refer to.
377 * Never use these in the common code.
379 #define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
380 #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
381 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
382 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
383 #define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
384 #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
385 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
387 /* PUD - Level3 access */
388 /* to find an entry in a page-table-directory. */
389 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
390 #define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
391 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
392 #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
393 extern inline int pud_present(pud_t pud) { return !pud_none(pud); }
395 /* Find correct pud via the hidden fourth level page level: */
397 /* This accesses the reference page table of the boot cpu.
398 Other CPUs get synced lazily via the page fault handler. */
399 static inline pud_t *pud_offset_k(pgd_t *pgd, unsigned long address)
401 return pud_offset(pgd_offset_k(address), address);
404 /* PMD - Level 2 access */
405 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
406 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
408 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
409 #define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
411 #define pmd_none(x) (!pmd_val(x))
412 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
413 can temporarily clear it. */
414 #define pmd_present(x) (pmd_val(x))
415 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
416 #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
417 #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
419 #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
420 #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
421 #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
423 /* PTE - Level 1 access. */
425 /* page, protection -> pte */
426 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
427 #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
429 /* physical address -> PTE */
430 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
433 pte.pte = physpage | pgprot_val(pgprot);
434 pte.pte &= __supported_pte_mask;
438 /* Change flags of a PTE */
439 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
441 (pte).pte &= _PAGE_CHG_MASK;
442 (pte).pte |= pgprot_val(newprot);
443 (pte).pte &= __supported_pte_mask;
447 #define pte_index(address) \
448 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
449 #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
452 /* x86-64 always has all page tables mapped. */
453 #define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
454 #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
455 #define pte_unmap(pte) /* NOP */
456 #define pte_unmap_nested(pte) /* NOP */
458 #define update_mmu_cache(vma,address,pte) do { } while (0)
460 /* We only update the dirty/accessed state if we set
461 * the dirty bit by hand in the kernel, since the hardware
462 * will do the accessed bit for us, and we don't want to
463 * race with other CPU's that might be updating the dirty
464 * bit at the same time. */
465 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
467 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
470 set_pte(__ptep, __entry); \
471 flush_tlb_page(__vma, __address); \
475 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
478 if ( likely((__vma)->vm_mm == current->mm) ) { \
479 BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
481 xen_l1_entry_update((__ptep), (__entry)); \
482 flush_tlb_page((__vma), (__address)); \
487 /* Encode and de-code a swap entry */
488 #define __swp_type(x) (((x).val >> 1) & 0x3f)
489 #define __swp_offset(x) ((x).val >> 8)
490 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
491 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
492 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
494 extern spinlock_t pgd_lock;
495 extern struct page *pgd_list;
496 void vmalloc_sync_all(void);
498 #endif /* !__ASSEMBLY__ */
500 extern int kern_addr_valid(unsigned long addr);
502 #define DOMID_LOCAL (0xFFFFU)
504 int direct_remap_pfn_range(struct vm_area_struct *vma,
505 unsigned long address,
511 int direct_kernel_remap_pfn_range(unsigned long address,
517 int create_lookup_pte_addr(struct mm_struct *mm,
518 unsigned long address,
521 int touch_pte_range(struct mm_struct *mm,
522 unsigned long address,
525 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
526 direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
528 #define MK_IOSPACE_PFN(space, pfn) (pfn)
529 #define GET_IOSPACE(pfn) 0
530 #define GET_PFN(pfn) (pfn)
532 #define HAVE_ARCH_UNMAPPED_AREA
534 #define pgtable_cache_init() do { } while (0)
535 #define check_pgt_cache() do { } while (0)
537 #define PAGE_AGP PAGE_KERNEL_NOCACHE
538 #define HAVE_PAGE_AGP 1
540 /* fs/proc/kcore.c */
541 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
542 #define kc_offset_to_vaddr(o) \
543 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
545 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
546 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
547 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
548 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
549 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
550 #define __HAVE_ARCH_PTE_SAME
551 #include <asm-generic/pgtable.h>
553 #endif /* _X86_64_PGTABLE_H */