* Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
*/
-#include <linux/config.h>
+#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/memory_hotplug.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
+#include <asm/sections.h>
#ifndef Dprintk
#define Dprintk(x...)
#endif
-extern char _stext[];
+struct dma_mapping_ops* dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
+static unsigned long dma_reserve __initdata;
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
void show_mem(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ long i, total = 0, reserved = 0;
+ long shared = 0, cached = 0;
pg_data_t *pgdat;
struct page *page;
- printk("Mem-info:\n");
+ printk(KERN_INFO "Mem-info:\n");
show_free_areas();
- printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
- page = pgdat->node_mem_map + i;
- total++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (page_count(page))
- shared += page_count(page) - 1;
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page) - 1;
}
}
- printk("%d pages of RAM\n", total);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
+ printk(KERN_INFO "%lu pages of RAM\n", total);
+ printk(KERN_INFO "%lu reserved pages\n",reserved);
+ printk(KERN_INFO "%lu pages shared\n",shared);
+ printk(KERN_INFO "%lu pages swap cached\n",cached);
}
-/* References to section boundaries */
-
-extern char _text, _etext, _edata, __bss_start, _end[];
-extern char __init_begin, __init_end;
-
int after_bootmem;
-static void *spp_getpage(void)
+static __init void *spp_getpage(void)
{
void *ptr;
if (after_bootmem)
return ptr;
}
-static void set_pte_phys(unsigned long vaddr,
+static __init void set_pte_phys(unsigned long vaddr,
unsigned long phys, pgprot_t prot)
{
- pml4_t *level4;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte, new_pte;
Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
- level4 = pml4_offset_k(vaddr);
- if (pml4_none(*level4)) {
- printk("PML4 FIXMAP MISSING, it should be setup in head.S!\n");
+ pgd = pgd_offset_k(vaddr);
+ if (pgd_none(*pgd)) {
+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
return;
}
- pgd = level3_offset_k(level4, vaddr);
- if (pgd_none(*pgd)) {
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none(*pud)) {
pmd = (pmd_t *) spp_getpage();
- set_pgd(pgd, __pgd(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
- if (pmd != pmd_offset(pgd, 0)) {
- printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pgd,0));
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+ if (pmd != pmd_offset(pud, 0)) {
+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
return;
}
}
- pmd = pmd_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd)) {
pte = (pte_t *) spp_getpage();
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
}
/* NOTE: this is meant to be run only at boot */
-void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+void __init
+__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
unsigned long address = __fix_to_virt(idx);
{}
};
-static __init void *alloc_low_page(int *index, unsigned long *phys)
+static __meminit void *alloc_low_page(int *index, unsigned long *phys)
{
struct temp_map *ti;
int i;
unsigned long pfn = table_end++, paddr;
void *adr;
+ if (after_bootmem) {
+ adr = (void *)get_zeroed_page(GFP_ATOMIC);
+ *phys = __pa(adr);
+ return adr;
+ }
+
if (pfn >= end_pfn)
panic("alloc_low_page: ran out of memory");
for (i = 0; temp_mappings[i].allocated; i++) {
ti->allocated = 1;
__flush_tlb();
adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
+ memset(adr, 0, PAGE_SIZE);
*index = i;
*phys = pfn * PAGE_SIZE;
return adr;
}
-static __init void unmap_low_page(int i)
+static __meminit void unmap_low_page(int i)
{
- struct temp_map *ti = &temp_mappings[i];
+ struct temp_map *ti;
+
+ if (after_bootmem)
+ return;
+
+ ti = &temp_mappings[i];
set_pmd(ti->pmd, __pmd(0));
ti->allocated = 0;
}
-static void __init phys_pgd_init(pgd_t *pgd, unsigned long address, unsigned long end)
+/* Must run before zap_low_mappings */
+__init void *early_ioremap(unsigned long addr, unsigned long size)
+{
+ unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
+
+ /* actually usually some more */
+ if (size >= LARGE_PAGE_SIZE) {
+ return NULL;
+ }
+ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+ map += LARGE_PAGE_SIZE;
+ set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+ __flush_tlb();
+ return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
+}
+
+/* To avoid virtual aliases later */
+__init void early_iounmap(void *addr, unsigned long size)
+{
+ if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
+ printk("early_iounmap: bad address %p\n", addr);
+ set_pmd(temp_mappings[0].pmd, __pmd(0));
+ set_pmd(temp_mappings[1].pmd, __pmd(0));
+ __flush_tlb();
+}
+
+static void __meminit
+phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+{
+ int i = pmd_index(address);
+
+ for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
+ unsigned long entry;
+ pmd_t *pmd = pmd_page + pmd_index(address);
+
+ if (address >= end) {
+ if (!after_bootmem)
+ for (; i < PTRS_PER_PMD; i++, pmd++)
+ set_pmd(pmd, __pmd(0));
+ break;
+ }
+
+ if (pmd_val(*pmd))
+ continue;
+
+ entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
+ entry &= __supported_pte_mask;
+ set_pmd(pmd, __pmd(entry));
+ }
+}
+
+static void __meminit
+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud,0);
+ spin_lock(&init_mm.page_table_lock);
+ phys_pmd_init(pmd, address, end);
+ spin_unlock(&init_mm.page_table_lock);
+ __flush_tlb_all();
+}
+
+static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
{
- long i, j;
+ int i = pud_index(addr);
- i = pgd_index(address);
- pgd = pgd + i;
- for (; i < PTRS_PER_PGD; pgd++, i++) {
+
+ for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
int map;
- unsigned long paddr, pmd_phys;
+ unsigned long pmd_phys;
+ pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
- paddr = (address & PML4_MASK) + i*PGDIR_SIZE;
- if (paddr >= end) {
- for (; i < PTRS_PER_PGD; i++, pgd++)
- set_pgd(pgd, __pgd(0));
+ if (addr >= end)
break;
- }
- if (!e820_mapped(paddr, paddr+PGDIR_SIZE, 0)) {
- set_pgd(pgd, __pgd(0));
+ if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
+ set_pud(pud, __pud(0));
continue;
}
- pmd = alloc_low_page(&map, &pmd_phys);
- set_pgd(pgd, __pgd(pmd_phys | _KERNPG_TABLE));
- for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
- unsigned long pe;
-
- if (paddr >= end) {
- for (; j < PTRS_PER_PMD; j++, pmd++)
- set_pmd(pmd, __pmd(0));
- break;
- }
- pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
- pe &= __supported_pte_mask;
- set_pmd(pmd, __pmd(pe));
+ if (pud_val(*pud)) {
+ phys_pmd_update(pud, addr, end);
+ continue;
}
+
+ pmd = alloc_low_page(&map, &pmd_phys);
+ spin_lock(&init_mm.page_table_lock);
+ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
+ phys_pmd_init(pmd, addr, end);
+ spin_unlock(&init_mm.page_table_lock);
unmap_low_page(map);
}
__flush_tlb();
}
+static void __init find_early_table_space(unsigned long end)
+{
+ unsigned long puds, pmds, tables, start;
+
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+ tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
+ round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+
+ /* RED-PEN putting page tables only on node 0 could
+ cause a hotspot and fill up ZONE_DMA. The page tables
+ need roughly 0.5KB per GB. */
+ start = 0x8000;
+ table_start = find_e820_area(start, end, tables);
+ if (table_start == -1UL)
+ panic("Cannot find space for the kernel page tables");
+
+ table_start >>= PAGE_SHIFT;
+ table_end = table_start;
+}
+
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
This runs before bootmem is initialized and gets pages directly from the
physical memory. To access them they are temporarily mapped. */
-void __init init_memory_mapping(void)
+void __meminit init_memory_mapping(unsigned long start, unsigned long end)
{
- unsigned long adr;
- unsigned long end;
unsigned long next;
- unsigned long pgds, pmds, tables;
Dprintk("init_memory_mapping\n");
- end = end_pfn_map << PAGE_SHIFT;
-
/*
* Find space for the kernel direct mapping tables.
* Later we should allocate these tables in the local node of the memory
* mapped. Unfortunately this is done currently before the nodes are
* discovered.
*/
+ if (!after_bootmem)
+ find_early_table_space(end);
- pgds = (end + PGDIR_SIZE - 1) >> PGDIR_SHIFT;
- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
- tables = round_up(pgds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE);
+ start = (unsigned long)__va(start);
+ end = (unsigned long)__va(end);
- table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
- if (table_start == -1UL)
- panic("Cannot find space for the kernel page tables");
+ for (; start < end; start = next) {
+ int map;
+ unsigned long pud_phys;
+ pgd_t *pgd = pgd_offset_k(start);
+ pud_t *pud;
- table_start >>= PAGE_SHIFT;
- table_end = table_start;
-
- end += __PAGE_OFFSET; /* turn virtual */
+ if (after_bootmem)
+ pud = pud_offset(pgd, start & PGDIR_MASK);
+ else
+ pud = alloc_low_page(&map, &pud_phys);
- for (adr = PAGE_OFFSET; adr < end; adr = next) {
- int map;
- unsigned long pgd_phys;
- pgd_t *pgd = alloc_low_page(&map, &pgd_phys);
- next = adr + PML4_SIZE;
+ next = start + PGDIR_SIZE;
if (next > end)
next = end;
- phys_pgd_init(pgd, adr-PAGE_OFFSET, next-PAGE_OFFSET);
- set_pml4(init_level4_pgt + pml4_index(adr), mk_kernel_pml4(pgd_phys));
+ phys_pud_init(pud, __pa(start), __pa(next));
+ if (!after_bootmem)
+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
unmap_low_page(map);
}
- asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
+
+ if (!after_bootmem)
+ asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
__flush_tlb_all();
- early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
- table_start<<PAGE_SHIFT,
- table_end<<PAGE_SHIFT);
}
-extern struct x8664_pda cpu_pda[NR_CPUS];
-
-static unsigned long low_pml4[NR_CPUS];
-
-void swap_low_mappings(void)
+void __cpuinit zap_low_mappings(int cpu)
{
- int i;
- for (i = 0; i < NR_CPUS; i++) {
- unsigned long t;
- if (!cpu_pda[i].level4_pgt)
- continue;
- t = cpu_pda[i].level4_pgt[0];
- cpu_pda[i].level4_pgt[0] = low_pml4[i];
- low_pml4[i] = t;
+ if (cpu == 0) {
+ pgd_t *pgd = pgd_offset_k(0UL);
+ pgd_clear(pgd);
+ } else {
+ /*
+ * For AP's, zap the low identity mappings by changing the cr3
+ * to init_level4_pgt and doing local flush tlb all
+ */
+ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
}
- flush_tlb_all();
-}
-
-void zap_low_mappings(void)
-{
- swap_low_mappings();
+ __flush_tlb_all();
}
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NUMA
void __init paging_init(void)
{
- {
- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
- unsigned int max_dma;
-
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
- if (end_pfn < max_dma)
- zones_size[ZONE_DMA] = end_pfn;
- else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = end_pfn - max_dma;
- }
- free_area_init(zones_size);
- }
- return;
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+ max_zone_pfns[ZONE_NORMAL] = end_pfn;
+
+ memory_present(0, 0, end_pfn);
+ sparse_init();
+ free_area_init_nodes(max_zone_pfns);
}
#endif
for (; address < end; address += LARGE_PAGE_SIZE) {
pgd_t *pgd = pgd_offset_k(address);
- pmd_t *pmd;
- if (!pgd || pgd_none(*pgd))
+ pud_t *pud;
+ pmd_t *pmd;
+ if (pgd_none(*pgd))
+ continue;
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud))
continue;
- pmd = pmd_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
if (!pmd || pmd_none(*pmd))
continue;
if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
if (e820.map[i].type != E820_RAM) /* not usable memory */
continue;
/*
- * !!!FIXME!!! Some BIOSen report areas as RAM that
- * are not. Notably the 640->1Mb area. We need a sanity
- * check here.
+ * !!!FIXME!!! Some BIOSen report areas as RAM that
+ * are not. Notably the 640->1Mb area. We need a sanity
+ * check here.
*/
addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
return 0;
}
+/*
+ * Memory hotplug specific functions
+ */
+void online_page(struct page *page)
+{
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ totalram_pages++;
+ num_physpages++;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+/*
+ * Memory is added always to NORMAL zone. This means you will never get
+ * additional DMA/DMA32 memory.
+ */
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+ struct pglist_data *pgdat = NODE_DATA(nid);
+ struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ init_memory_mapping(start, (start + size -1));
+
+ ret = __add_pages(zone, start_pfn, nr_pages);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ printk("%s: Problem encountered in __add_pages!\n", __func__);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arch_add_memory);
+
+int remove_memory(u64 start, u64 size)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(remove_memory);
+
+#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
+int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
+/*
+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+ * just online the pages.
+ */
+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+{
+ int err = -EIO;
+ unsigned long pfn;
+ unsigned long total = 0, mem = 0;
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+ if (pfn_valid(pfn)) {
+ online_page(pfn_to_page(pfn));
+ err = 0;
+ mem++;
+ }
+ total++;
+ }
+ if (!err) {
+ z->spanned_pages += total;
+ z->present_pages += mem;
+ z->zone_pgdat->node_spanned_pages += total;
+ z->zone_pgdat->node_present_pages += mem;
+ }
+ return err;
+}
+#endif
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
+ * valid. The argument is a physical page number.
+ *
+ *
+ * On x86-64, access has to be given to the first megabyte of ram because that area
+ * contains bios code and data regions used by X and dosemu and similar apps.
+ * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+ * mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pagenr)
+{
+ if (pagenr <= 256)
+ return 1;
+ if (!page_is_ram(pagenr))
+ return 1;
+ return 0;
+}
+
+
+EXPORT_SYMBOL_GPL(page_is_ram);
+
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
void __init mem_init(void)
{
- int codesize, reservedpages, datasize, initsize;
- int tmp;
-
-#ifdef CONFIG_SWIOTLB
- if (!iommu_aperture && end_pfn >= 0xffffffff>>PAGE_SHIFT)
- swiotlb = 1;
- if (swiotlb)
- swiotlb_init();
-#endif
+ long codesize, reservedpages, datasize, initsize;
- /* How many end-of-memory variables you have, grandma! */
- max_low_pfn = end_pfn;
- max_pfn = end_pfn;
- num_physpages = end_pfn;
- high_memory = (void *) __va(end_pfn * PAGE_SIZE);
+ pci_iommu_alloc();
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
reservedpages = 0;
/* this will put all low memory onto the freelists */
-#ifdef CONFIG_DISCONTIGMEM
- totalram_pages += numa_free_all_bootmem();
- tmp = 0;
- /* should count reserved pages here for all nodes */
+#ifdef CONFIG_NUMA
+ totalram_pages = numa_free_all_bootmem();
#else
- max_mapnr = end_pfn;
- if (!mem_map) BUG();
-
- totalram_pages += free_all_bootmem();
-
- for (tmp = 0; tmp < end_pfn; tmp++)
- /*
- * Only count reserved RAM pages
- */
- if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
- reservedpages++;
+ totalram_pages = free_all_bootmem();
#endif
+ reservedpages = end_pfn - totalram_pages -
+ absent_pages_in_range(0, end_pfn);
after_bootmem = 1;
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
VSYSCALL_END - VSYSCALL_START);
- printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
end_pfn << (PAGE_SHIFT-10),
codesize >> 10,
datasize >> 10,
initsize >> 10);
+#ifdef CONFIG_SMP
/*
- * Subtle. SMP is doing its boot stuff late (because it has to
- * fork idle threads) - but it also needs low mappings for the
- * protected-mode entry to work. We zap these entries only after
- * the WP-bit has been tested.
+ * Sync boot_level4_pgt mappings with the init_level4_pgt
+ * except for the low identity mappings which are already zapped
+ * in init_level4_pgt. This sync-up is essential for AP's bringup
*/
-#ifndef CONFIG_SMP
- zap_low_mappings();
+ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
#endif
}
-void free_initmem(void)
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr;
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ if (begin >= end)
+ return;
+
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
-#ifdef CONFIG_INIT_DEBUG
- memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
-#endif
+ init_page_count(virt_to_page(addr));
+ memset((void *)(addr & ~(PAGE_SIZE-1)),
+ POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
- printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
}
+void free_initmem(void)
+{
+ memset(__initdata_begin, POISON_FREE_INITDATA,
+ __initdata_end - __initdata_begin);
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+
+void mark_rodata_ro(void)
+{
+ unsigned long addr = (unsigned long)__start_rodata;
+
+ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
+ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
+
+ printk ("Write protecting the kernel read-only data: %luk\n",
+ (__end_rodata - __start_rodata) >> 10);
+
+ /*
+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
+ * We do this after the printk so that if something went wrong in the
+ * change, the printk gets out at least to give a better debug hint
+ * of who is the culprit.
+ */
+ global_flush_tlb();
+}
+#endif
+
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start < (unsigned long)&_end)
- return;
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
- free_page(start);
- totalram_pages++;
- }
+ free_init_pages("initrd memory", start, end);
}
#endif
void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
{
- /* Should check here against the e820 map to avoid double free */
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
int nid = phys_to_nid(phys);
- if (phys < HIGH_MEMORY && nid)
- panic("reserve of %lx at node %d", phys, nid);
- reserve_bootmem_node(NODE_DATA(nid), phys, len);
+#endif
+ unsigned long pfn = phys >> PAGE_SHIFT;
+ if (pfn >= end_pfn) {
+ /* This can happen with kdump kernels when accessing firmware
+ tables. */
+ if (pfn < end_pfn_map)
+ return;
+ printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
+ phys, len);
+ return;
+ }
+
+ /* Should check here against the e820 map to avoid double free */
+#ifdef CONFIG_NUMA
+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
#else
reserve_bootmem(phys, len);
#endif
+ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
+ dma_reserve += len / PAGE_SIZE;
+ set_dma_reserve(dma_reserve);
+ }
}
int kern_addr_valid(unsigned long addr)
{
unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
- pml4_t *pml4;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (above != 0 && above != -1UL)
return 0;
- pml4 = pml4_offset_k(addr);
- if (pml4_none(*pml4))
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
return 0;
- pgd = pgd_offset_k(addr);
- if (pgd_none(*pgd))
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
return 0;
- pmd = pmd_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
if (pmd_large(*pmd))
return pfn_valid(pmd_pfn(*pmd));
- pte = pte_offset_kernel(pmd, addr);
+ pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
return pfn_valid(pte_pfn(*pte));
static ctl_table debug_table2[] = {
{ 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
proc_dointvec },
-#ifdef CONFIG_CHECKING
- { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
- proc_dointvec },
-#endif
{ 0, }
};
__initcall(x8664_sysctl_init);
#endif
-/* Pseudo VMAs to allow ptrace access for the vsyscall pages. x86-64 has two
- different ones: one for 32bit and one for 64bit. Use the appropiate
- for the target task. */
+/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
+ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
+ not need special handling anymore. */
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_START,
- .vm_end = VSYSCALL_END,
- .vm_page_prot = PAGE_READONLY
-};
-
-static struct vm_area_struct gate32_vma = {
- .vm_start = VSYSCALL32_BASE,
- .vm_end = VSYSCALL32_END,
- .vm_page_prot = PAGE_READONLY
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
+ .vm_page_prot = PAGE_READONLY_EXEC,
+ .vm_flags = VM_READ | VM_EXEC
};
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
- return test_tsk_thread_flag(tsk, TIF_IA32) ? &gate32_vma : &gate_vma;
+#ifdef CONFIG_IA32_EMULATION
+ if (test_tsk_thread_flag(tsk, TIF_IA32))
+ return NULL;
+#endif
+ return &gate_vma;
}
int in_gate_area(struct task_struct *task, unsigned long addr)
{
struct vm_area_struct *vma = get_gate_vma(task);
+ if (!vma)
+ return 0;
return (addr >= vma->vm_start) && (addr < vma->vm_end);
}
+
+/* Use this when you have no reliable task/vma, typically from interrupt
+ * context. It is less reliable than using the task's vma and may give
+ * false positives.
+ */
+int in_gate_area_no_task(unsigned long addr)
+{
+ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
+}