2 * highmem.h: virtual kernel memory mappings for high memory
4 * PowerPC version, stolen from the i386 version.
6 * Used in CONFIG_HIGHMEM systems for memory pages which
7 * are not addressable by direct kernel virtual addresses.
9 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
10 * Gerhard.Wichert@pdb.siemens.de
13 * Redesigned the x86 32-bit VM architecture to deal with
14 * up to 16 Terrabyte physical memory. With current x86 CPUs
15 * we now support up to 64 Gigabytes physical RAM.
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
20 #ifndef _ASM_HIGHMEM_H
21 #define _ASM_HIGHMEM_H
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <asm/kmap_types.h>
28 #include <asm/tlbflush.h>
31 /* undef for production */
32 #define HIGHMEM_DEBUG 1
34 extern pte_t *kmap_pte;
35 extern pgprot_t kmap_prot;
36 extern pte_t *pkmap_page_table;
38 extern void kmap_init(void) __init;
41 * Right now we initialize only a single pte table. It can be extended
42 * easily, subsequent pte tables have to be allocated in one physical
45 #define PKMAP_BASE CONFIG_HIGHMEM_START
46 #define LAST_PKMAP (1 << PTE_SHIFT)
47 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
48 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
49 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
51 #define KMAP_FIX_BEGIN (PKMAP_BASE + 0x00400000UL)
53 extern void *kmap_high(struct page *page);
54 extern void kunmap_high(struct page *page);
56 static inline void *kmap(struct page *page)
59 if (page < highmem_start_page)
60 return page_address(page);
61 return kmap_high(page);
64 static inline void kunmap(struct page *page)
66 BUG_ON(in_interrupt());
67 if (page < highmem_start_page)
73 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
74 * gives a more generic (and caching) interface. But kmap_atomic can
75 * be used in IRQ contexts, so in some (very limited) cases we need
78 static inline void *kmap_atomic(struct page *page, enum km_type type)
83 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
85 if (page < highmem_start_page)
86 return page_address(page);
88 idx = type + KM_TYPE_NR*smp_processor_id();
89 vaddr = KMAP_FIX_BEGIN + idx * PAGE_SIZE;
91 BUG_ON(!pte_none(*(kmap_pte+idx)));
93 set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
94 flush_tlb_page(0, vaddr);
99 static inline void kunmap_atomic(void *kvaddr, enum km_type type)
102 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
103 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
105 if (vaddr < KMAP_FIX_BEGIN) { // FIXME
107 preempt_check_resched();
111 BUG_ON(vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE);
114 * force other mappings to Oops if they'll try to access
115 * this pte without first remap it
117 pte_clear(kmap_pte+idx);
118 flush_tlb_page(0, vaddr);
121 preempt_check_resched();
124 static inline struct page *kmap_atomic_to_page(void *ptr)
126 unsigned long idx, vaddr = (unsigned long) ptr;
128 if (vaddr < KMAP_FIX_BEGIN)
129 return virt_to_page(ptr);
131 idx = (vaddr - KMAP_FIX_BEGIN) >> PAGE_SHIFT;
132 return pte_page(kmap_pte[idx]);
135 #define flush_cache_kmaps() flush_cache_all()
137 #endif /* __KERNEL__ */
139 #endif /* _ASM_HIGHMEM_H */