This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / include / asm-i386 / atomic_kmap.h
1 /*
2  * atomic_kmap.h: temporary virtual kernel memory mappings
3  *
4  * Copyright (C) 2003 Ingo Molnar <mingo@redhat.com>
5  */
6
7 #ifndef _ASM_ATOMIC_KMAP_H
8 #define _ASM_ATOMIC_KMAP_H
9
10 #ifdef __KERNEL__
11
12 #include <linux/config.h>
13 #include <asm/tlbflush.h>
14
15 #ifdef CONFIG_DEBUG_HIGHMEM
16 #define HIGHMEM_DEBUG 1
17 #else
18 #define HIGHMEM_DEBUG 0
19 #endif
20
21 extern pte_t *kmap_pte;
22 #define kmap_prot PAGE_KERNEL
23
24 #define PKMAP_BASE (0xff000000UL)
25 #define NR_SHARED_PMDS ((0xffffffff-PKMAP_BASE+1)/PMD_SIZE)
26
27 static inline unsigned long __kmap_atomic_vaddr(enum km_type type)
28 {
29         enum fixed_addresses idx;
30
31         idx = type + KM_TYPE_NR*smp_processor_id();
32         return __fix_to_virt(FIX_KMAP_BEGIN + idx);
33 }
34
35 static inline void *__kmap_atomic_noflush(struct page *page, enum km_type type)
36 {
37         enum fixed_addresses idx;
38         unsigned long vaddr;
39
40         idx = type + KM_TYPE_NR*smp_processor_id();
41         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
42         /*
43          * NOTE: entries that rely on some secondary TLB-flush
44          * effect must not be global:
45          */
46         set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
47
48         return (void*) vaddr;
49 }
50
51 static inline void *__kmap_atomic(struct page *page, enum km_type type)
52 {
53         enum fixed_addresses idx;
54         unsigned long vaddr;
55
56         idx = type + KM_TYPE_NR*smp_processor_id();
57         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
58 #if HIGHMEM_DEBUG
59         BUG_ON(!pte_none(*(kmap_pte-idx)));
60 #else
61         /*
62          * Performance optimization - do not flush if the new
63          * pte is the same as the old one:
64          */
65         if (pte_val(*(kmap_pte-idx)) == pte_val(mk_pte(page, kmap_prot)))
66                 return (void *) vaddr;
67 #endif
68         set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
69         __flush_tlb_one(vaddr);
70
71         return (void*) vaddr;
72 }
73
74 static inline void __kunmap_atomic(void *kvaddr, enum km_type type)
75 {
76 #if HIGHMEM_DEBUG
77         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
78         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
79
80         BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
81         /*
82          * force other mappings to Oops if they'll try to access
83          * this pte without first remap it
84          */
85         pte_clear(kmap_pte-idx);
86         __flush_tlb_one(vaddr);
87 #endif
88 }
89
90 #define __kunmap_atomic_type(type) \
91                 __kunmap_atomic((void *)__kmap_atomic_vaddr(type), (type))
92
93 #endif /* __KERNEL__ */
94
95 #endif /* _ASM_ATOMIC_KMAP_H */