* Modified for Xen.
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/memory_hotplug.h>
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
-#include <asm/dma-mapping.h>
-#include <asm/swiotlb.h>
#include <xen/features.h>
printk(KERN_INFO "%lu pages swap cached\n",cached);
}
-/* References to section boundaries */
-
int after_bootmem;
static __init void *spp_getpage(void)
if (after_bootmem) {
void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
-
*phys = __pa(adr);
return adr;
}
/* actually usually some more */
if (size >= LARGE_PAGE_SIZE) {
- printk("SMBIOS area too long %lu\n", size);
return NULL;
}
set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
#endif /* !CONFIG_XEN */
static void __meminit
-phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
+phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
{
- int i, k;
+ int i = pmd_index(address);
+ int k;
- for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
+ for (; i < PTRS_PER_PMD; i++) {
unsigned long pte_phys;
pte_t *pte, *pte_save;
+ pmd_t *pmd = pmd_page + pmd_index(address);
if (address >= end) {
- for (; i < PTRS_PER_PMD; i++, pmd++)
- set_pmd(pmd, __pmd(0));
+ if (!after_bootmem)
+ for (; i < PTRS_PER_PMD; i++, pmd++)
+ set_pmd(pmd, __pmd(0));
break;
}
+
+ if (pmd_val(*pmd))
+ continue;
+
pte = alloc_static_page(&pte_phys);
pte_save = pte;
for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
static void __meminit
phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
{
- pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
-
- if (pmd_none(*pmd)) {
- spin_lock(&init_mm.page_table_lock);
- phys_pmd_init(pmd, address, end);
- spin_unlock(&init_mm.page_table_lock);
- __flush_tlb_all();
- }
+ pmd_t *pmd = pmd_offset(pud,0);
+ spin_lock(&init_mm.page_table_lock);
+ phys_pmd_init(pmd, address, end);
+ spin_unlock(&init_mm.page_table_lock);
+ __flush_tlb_all();
}
-static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
{
- long i = pud_index(address);
-
- pud = pud + i;
+ int i = pud_index(addr);
- if (after_bootmem && pud_val(*pud)) {
- phys_pmd_update(pud, address, end);
- return;
- }
- for (; i < PTRS_PER_PUD; pud++, i++) {
- unsigned long paddr, pmd_phys;
+ for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
+ unsigned long pmd_phys;
+ pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
- paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
- if (paddr >= end)
+ if (addr >= end)
break;
+ if (pud_val(*pud)) {
+ phys_pmd_update(pud, addr, end);
+ continue;
+ }
+
pmd = alloc_static_page(&pmd_phys);
early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
spin_lock(&init_mm.page_table_lock);
set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
- phys_pmd_init(pmd, paddr, end);
+ phys_pmd_init(pmd, addr, end);
spin_unlock(&init_mm.page_table_lock);
}
__flush_tlb();
mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
__pud(__pa_symbol(level2_kernel_pgt) |
- _KERNPG_TABLE | _PAGE_USER);
+ _KERNPG_TABLE);
memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
early_make_page_readonly(init_level4_pgt,
pte_page = alloc_static_page(&phys);
early_make_page_readonly(
pte_page, XENFEAT_writable_page_tables);
- set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER));
+ set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
} else {
addr = page[pmd_index(va)];
addr_to_page(addr, pte_page);
if (pte_none(*pte)) {
new_pte = pfn_pte(
(va - __START_KERNEL_map) >> PAGE_SHIFT,
- __pgprot(_KERNPG_TABLE | _PAGE_USER));
+ __pgprot(_KERNPG_TABLE));
xen_l1_entry_update(pte, new_pte);
}
va += PAGE_SIZE;
table_end = table_start + (tables>>PAGE_SHIFT);
early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
- end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
+ end, table_start << PAGE_SHIFT,
+ (table_end << PAGE_SHIFT) + tables);
}
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
pud_t *pud;
if (after_bootmem) {
- pud = pud_offset_k(pgd, start & PGDIR_MASK);
+ pud = pud_offset(pgd, start & PGDIR_MASK);
make_page_readonly(pud, XENFEAT_writable_page_tables);
pud_phys = __pa(pud);
} else {
#endif
}
-/* Compute zone sizes for the DMA and DMA32 zones in a node. */
-__init void
-size_zones(unsigned long *z, unsigned long *h,
- unsigned long start_pfn, unsigned long end_pfn)
-{
- int i;
-#ifndef CONFIG_XEN
- unsigned long w;
-#endif
-
- for (i = 0; i < MAX_NR_ZONES; i++)
- z[i] = 0;
-
-#ifndef CONFIG_XEN
- if (start_pfn < MAX_DMA_PFN)
- z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
- if (start_pfn < MAX_DMA32_PFN) {
- unsigned long dma32_pfn = MAX_DMA32_PFN;
- if (dma32_pfn > end_pfn)
- dma32_pfn = end_pfn;
- z[ZONE_DMA32] = dma32_pfn - start_pfn;
- }
- z[ZONE_NORMAL] = end_pfn - start_pfn;
-
- /* Remove lower zones from higher ones. */
- w = 0;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- if (z[i])
- z[i] -= w;
- w += z[i];
- }
-
- /* Compute holes */
- w = start_pfn;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- unsigned long s = w;
- w += z[i];
- h[i] = e820_hole_size(s, w);
- }
-
- /* Add the space pace needed for mem_map to the holes too. */
- for (i = 0; i < MAX_NR_ZONES; i++)
- h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
-
- /* The 16MB DMA zone has the kernel and other misc mappings.
- Account them too */
- if (h[ZONE_DMA]) {
- h[ZONE_DMA] += dma_reserve;
- if (h[ZONE_DMA] >= z[ZONE_DMA]) {
- printk(KERN_WARNING
- "Kernel too large and filling up ZONE_DMA?\n");
- h[ZONE_DMA] = z[ZONE_DMA];
- }
- }
-#else
- z[ZONE_DMA] = end_pfn;
- for (i = 0; i < MAX_NR_ZONES; i++)
- h[i] = 0;
-#endif
-}
-
#ifndef CONFIG_NUMA
void __init paging_init(void)
{
- unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
int i;
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ max_zone_pfns[ZONE_DMA] = end_pfn;
+ max_zone_pfns[ZONE_DMA32] = end_pfn;
+ max_zone_pfns[ZONE_NORMAL] = end_pfn;
memory_present(0, 0, end_pfn);
sparse_init();
- size_zones(zones, holes, 0, end_pfn);
- free_area_init_node(0, NODE_DATA(0), zones,
- __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
+ free_area_init_nodes(max_zone_pfns);
/* Switch to the real shared_info page, and clear the
* dummy page. */
/* Setup mapping of lower 1st MB */
for (i = 0; i < NR_FIX_ISAMAPS; i++)
- if (xen_start_info->flags & SIF_PRIVILEGED)
+ if (is_initial_xendomain())
set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
else
__set_fixmap(FIX_ISAMAP_BEGIN - i,
__flush_tlb_all();
}
-int page_is_ram (unsigned long pagenr)
-{
- return 1;
-}
-
/*
* Memory hotplug specific functions
*/
-#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
-
void online_page(struct page *page)
{
ClearPageReserved(page);
num_physpages++;
}
-#ifndef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_MEMORY_HOTPLUG
+/*
+ * Memory is added always to NORMAL zone. This means you will never get
+ * additional DMA/DMA32 memory.
+ */
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+ struct pglist_data *pgdat = NODE_DATA(nid);
+ struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ init_memory_mapping(start, (start + size -1));
+
+ ret = __add_pages(zone, start_pfn, nr_pages);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ printk("%s: Problem encountered in __add_pages!\n", __func__);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arch_add_memory);
+
+int remove_memory(u64 start, u64 size)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(remove_memory);
+
+#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
+int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
/*
* Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
* just online the pages.
}
#endif
-/*
- * Memory is added always to NORMAL zone. This means you will never get
- * additional DMA/DMA32 memory.
- */
-int add_memory(u64 start, u64 size)
-{
- struct pglist_data *pgdat = NODE_DATA(0);
- struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long nr_pages = size >> PAGE_SHIFT;
- int ret;
-
- ret = __add_pages(zone, start_pfn, nr_pages);
- if (ret)
- goto error;
-
- init_memory_mapping(start, (start + size -1));
-
- return ret;
-error:
- printk("%s: Problem encountered in __add_pages!\n", __func__);
- return ret;
-}
-EXPORT_SYMBOL_GPL(add_memory);
-
-int remove_memory(u64 start, u64 size)
+static inline int page_is_ram (unsigned long pagenr)
{
- return -EINVAL;
+ return 1;
}
-EXPORT_SYMBOL_GPL(remove_memory);
-
-#endif
+EXPORT_SYMBOL_GPL(page_is_ram);
/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address is
}
-EXPORT_SYMBOL_GPL(page_is_ram);
-
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
BUG_ON(!contiguous_bitmap);
memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
-#if defined(CONFIG_SWIOTLB)
- pci_swiotlb_init();
-#endif
- no_iommu_init();
-
- /* How many end-of-memory variables you have, grandma! */
- max_low_pfn = end_pfn;
- max_pfn = end_pfn;
- num_physpages = end_pfn;
- high_memory = (void *) __va(end_pfn * PAGE_SIZE);
+ pci_iommu_alloc();
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
init_page_count(&mem_map[pfn]);
totalram_pages++;
}
- reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
+ reservedpages = end_pfn - totalram_pages -
+ absent_pages_in_range(0, end_pfn);
+
after_bootmem = 1;
#endif
}
-void free_initmem(void)
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
-#ifdef __DO_LATER__
- /*
- * Some pages can be pinned, but some are not. Unpinning such pages
- * triggers BUG().
- */
unsigned long addr;
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ if (begin >= end)
+ return;
+
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
- make_page_writable(
- __va(__pa(addr)), XENFEAT_writable_page_tables);
- /*
- * Make pages from __PAGE_OFFSET address as well
- */
- make_page_writable(
- (void *)addr, XENFEAT_writable_page_tables);
+ memset((void *)(addr & ~(PAGE_SIZE-1)),
+ POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
- memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
- printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
+}
+
+void free_initmem(void)
+{
+ memset(__initdata_begin, POISON_FREE_INITDATA,
+ __initdata_end - __initdata_begin);
+#ifdef __DO_LATER__
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
#endif
}
#ifdef CONFIG_DEBUG_RODATA
-extern char __start_rodata, __end_rodata;
void mark_rodata_ro(void)
{
- unsigned long addr = (unsigned long)&__start_rodata;
+ unsigned long addr = (unsigned long)__start_rodata;
- for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
printk ("Write protecting the kernel read-only data: %luk\n",
- (&__end_rodata - &__start_rodata) >> 10);
+ (__end_rodata - __start_rodata) >> 10);
/*
* change_page_attr_addr() requires a global_flush_tlb() call after it.
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start >= end)
- return;
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_init_pages("initrd memory", start, end);
}
#endif
void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
{
- /* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA
int nid = phys_to_nid(phys);
+#endif
+ unsigned long pfn = phys >> PAGE_SHIFT;
+ if (pfn >= end_pfn) {
+ /* This can happen with kdump kernels when accessing firmware
+ tables. */
+ if (pfn < end_pfn_map)
+ return;
+ printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
+ phys, len);
+ return;
+ }
+
+ /* Should check here against the e820 map to avoid double free */
+#ifdef CONFIG_NUMA
reserve_bootmem_node(NODE_DATA(nid), phys, len);
#else
reserve_bootmem(phys, len);
#endif
- if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
+ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
dma_reserve += len / PAGE_SIZE;
+ set_dma_reserve(dma_reserve);
+ }
}
int kern_addr_valid(unsigned long addr)
__initcall(x8664_sysctl_init);
#endif
-/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
+/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
covers the 64bit vsyscall page now. 32bit has a real VMA now and does
not need special handling anymore. */
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_START,
- .vm_end = VSYSCALL_END,
- .vm_page_prot = PAGE_READONLY
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
+ .vm_page_prot = PAGE_READONLY_EXEC,
+ .vm_flags = VM_READ | VM_EXEC
};
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)