This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / include / asm-i386 / mach-xen / setup_arch_post.h
1 /**
2  * machine_specific_memory_setup - Hook for machine specific memory setup.
3  *
4  * Description:
5  *      This is included late in kernel/setup.c so that it can make
6  *      use of all of the static functions.
7  **/
8
9 #include <xen/interface/callback.h>
10 #include <xen/interface/memory.h>
11
12 static char * __init machine_specific_memory_setup(void)
13 {
14         int rc;
15         struct xen_memory_map memmap;
16         /*
17          * This is rather large for a stack variable but this early in
18          * the boot process we know we have plenty slack space.
19          */
20         struct e820entry map[E820MAX];
21
22         memmap.nr_entries = E820MAX;
23         set_xen_guest_handle(memmap.buffer, map);
24
25         rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
26         if ( rc == -ENOSYS ) {
27                 memmap.nr_entries = 1;
28                 map[0].addr = 0ULL;
29                 map[0].size = PFN_PHYS(xen_start_info->nr_pages);
30                 /* 8MB slack (to balance backend allocations). */
31                 map[0].size += 8ULL << 20;
32                 map[0].type = E820_RAM;
33                 rc = 0;
34         }
35         BUG_ON(rc);
36
37         sanitize_e820_map(map, (char *)&memmap.nr_entries);
38
39         BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
40
41         return "Xen";
42 }
43
44 extern void hypervisor_callback(void);
45 extern void failsafe_callback(void);
46 extern void nmi(void);
47
48 unsigned long *machine_to_phys_mapping;
49 EXPORT_SYMBOL(machine_to_phys_mapping);
50 unsigned int machine_to_phys_order;
51 EXPORT_SYMBOL(machine_to_phys_order);
52
53 static void __init machine_specific_arch_setup(void)
54 {
55         int ret;
56         struct xen_machphys_mapping mapping;
57         unsigned long machine_to_phys_nr_ents;
58         struct xen_platform_parameters pp;
59         struct callback_register event = {
60                 .type = CALLBACKTYPE_event,
61                 .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
62         };
63         struct callback_register failsafe = {
64                 .type = CALLBACKTYPE_failsafe,
65                 .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
66         };
67         struct callback_register nmi_cb = {
68                 .type = CALLBACKTYPE_nmi,
69                 .address = { __KERNEL_CS, (unsigned long)nmi },
70         };
71
72         ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
73         if (ret == 0)
74                 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
75         if (ret == -ENOSYS)
76                 ret = HYPERVISOR_set_callbacks(
77                         event.address.cs, event.address.eip,
78                         failsafe.address.cs, failsafe.address.eip);
79         BUG_ON(ret);
80
81         ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
82         if (ret == -ENOSYS) {
83                 struct xennmi_callback cb;
84
85                 cb.handler_address = nmi_cb.address.eip;
86                 HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
87         }
88
89         if (HYPERVISOR_xen_version(XENVER_platform_parameters,
90                                    &pp) == 0)
91                 set_fixaddr_top(pp.virt_start - PAGE_SIZE);
92
93         machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
94         machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
95         if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
96                 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
97                 machine_to_phys_nr_ents = mapping.max_mfn + 1;
98         }
99         while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
100                 machine_to_phys_order++;
101 }