git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git]
/
arch
/
i386
/
mm
/
highmem.c
diff --git
a/arch/i386/mm/highmem.c
b/arch/i386/mm/highmem.c
index
5817532
..
e0fa6cb
100644
(file)
--- a/
arch/i386/mm/highmem.c
+++ b/
arch/i386/mm/highmem.c
@@
-1,9
+1,10
@@
#include <linux/highmem.h>
#include <linux/highmem.h>
+#include <linux/module.h>
void *kmap(struct page *page)
{
might_sleep();
void *kmap(struct page *page)
{
might_sleep();
- if (
page < highmem_start_page
)
+ if (
!PageHighMem(page)
)
return page_address(page);
return kmap_high(page);
}
return page_address(page);
return kmap_high(page);
}
@@
-12,7
+13,7
@@
void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
{
if (in_interrupt())
BUG();
- if (
page < highmem_start_page
)
+ if (
!PageHighMem(page)
)
return;
kunmap_high(page);
}
return;
kunmap_high(page);
}
@@
-31,47
+32,57
@@
void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-
inc_preempt_count
();
- if (
page < highmem_start_page
)
+
pagefault_disable
();
+ if (
!PageHighMem(page)
)
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
if (!pte_none(*(kmap_pte-idx)))
BUG();
if (!pte_none(*(kmap_pte-idx)))
BUG();
-#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
return (void*) vaddr;
}
void kunmap_atomic(void *kvaddr, enum km_type type)
{
return (void*) vaddr;
}
void kunmap_atomic(void *kvaddr, enum km_type type)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
- if (vaddr < FIXADDR_START) { // FIXME
- dec_preempt_count();
- preempt_check_resched();
- return;
- }
-
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
- BUG();
-
/*
/*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
+ * Force other mappings to Oops if they'll try to access this pte
+ * without first remap it. Keeping stale mappings around is a bad idea
+ * also, in case the page changes cacheability attributes or becomes
+ * a protected page in a hypervisor.
*/
*/
- pte_clear(kmap_pte-idx);
- __flush_tlb_one(vaddr);
+ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ else {
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr < PAGE_OFFSET);
+ BUG_ON(vaddr >= (unsigned long)high_memory);
#endif
#endif
+ }
+
+ pagefault_enable();
+}
+
+/* This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_disable();
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+
+ return (void*) vaddr;
}
struct page *kmap_atomic_to_page(void *ptr)
}
struct page *kmap_atomic_to_page(void *ptr)
@@
-87,3
+98,8
@@
struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
return pte_page(*pte);
}
+EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kunmap);
+EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_to_page);