#include "user_util.h"
#include "kern_util.h"
#include "kern.h"
-#include "mprot.h"
#include "mem_user.h"
#include "mem.h"
#include "umid.h"
#include "mode_kern.h"
#include "mode.h"
-#define DEFAULT_COMMAND_LINE "root=6200"
+#define DEFAULT_COMMAND_LINE "root=98:0"
struct cpuinfo_um boot_cpu_data = {
.loops_per_jiffy = 0,
.ipi_pipe = { -1, -1 }
};
+/* Placeholder to make UML link until the vsyscall stuff is actually
+ * implemented
+ */
+void *__kernel_vsyscall;
+
unsigned long thread_saved_pc(struct task_struct *task)
{
return(os_process_pc(CHOOSE_MODE_PROC(thread_pid_tt, thread_pid_skas,
static int show_cpuinfo(struct seq_file *m, void *v)
{
- int index;
+ int index = 0;
- index = (struct cpuinfo_um *)v - cpu_data;
#ifdef CONFIG_SMP
+ index = (struct cpuinfo_um *) v - cpu_data;
if (!cpu_online(index))
return 0;
#endif
- seq_printf(m, "bogomips\t: %lu.%02lu\n",
+ seq_printf(m, "processor\t: %d\n", index);
+ seq_printf(m, "vendor_id\t: User Mode Linux\n");
+ seq_printf(m, "model name\t: UML\n");
+ seq_printf(m, "mode\t\t: %s\n", CHOOSE_MODE("tt", "skas"));
+ seq_printf(m, "host\t\t: %s\n", host_info);
+ seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
- seq_printf(m, "host\t\t: %s\n", host_info);
return(0);
}
if(umid != NULL){
snprintf(argv1_begin,
(argv1_end - argv1_begin) * sizeof(*ptr),
- "(%s)", umid);
+ "(%s) ", umid);
ptr = &argv1_begin[strlen(argv1_begin)];
}
else ptr = argv1_begin;
- snprintf(ptr, (argv1_end - ptr) * sizeof(*ptr), " [%s]", cmd);
+ snprintf(ptr, (argv1_end - ptr) * sizeof(*ptr), "[%s]", cmd);
memset(argv1_begin + strlen(argv1_begin), '\0',
argv1_end - argv1_begin - strlen(argv1_begin));
#endif
static int __init uml_ncpus_setup(char *line, int *add)
{
if (!sscanf(line, "%d", &ncpus)) {
- printk("Couldn't parse [%s]\n", line);
+ printf("Couldn't parse [%s]\n", line);
return -1;
}
static int __init mode_tt_setup(char *line, int *add)
{
- printk("CONFIG_MODE_TT disabled - 'mode=tt' ignored\n");
+ printf("CONFIG_MODE_TT disabled - 'mode=tt' ignored\n");
return(0);
}
static int __init mode_tt_setup(char *line, int *add)
{
- printk("CONFIG_MODE_SKAS disabled - 'mode=tt' redundant\n");
+ printf("CONFIG_MODE_SKAS disabled - 'mode=tt' redundant\n");
return(0);
}
/* Set during early boot */
unsigned long brk_start;
-static struct vm_reserved kernel_vm_reserved;
+unsigned long end_iomem;
#define MIN_VMALLOC (32 * 1024 * 1024)
int linux_main(int argc, char **argv)
{
- unsigned long avail;
+ unsigned long avail, diff;
unsigned long virtmem_size, max_physmem;
- unsigned int i, add, err;
+ unsigned int i, add;
for (i = 1; i < argc; i++){
if((i == 1) && (argv[i][0] == ' ')) continue;
brk_start = (unsigned long) sbrk(0);
CHOOSE_MODE_PROC(before_mem_tt, before_mem_skas, brk_start);
+ /* Increase physical memory size for exec-shield users
+ so they actually get what they asked for. This should
+ add zero for non-exec shield users */
+
+ diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+ if(diff > 1024 * 1024){
+ printf("Adding %ld bytes to physical memory to account for "
+ "exec-shield gap\n", diff);
+ physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+ }
uml_physmem = uml_start;
argv1_end = &argv[1][strlen(argv[1])];
#endif
- set_usable_vm(uml_physmem, get_kmem_end());
-
highmem = 0;
- max_physmem = get_kmem_end() - uml_physmem - MIN_VMALLOC;
- if(physmem_size > max_physmem){
- highmem = physmem_size - max_physmem;
+ iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
+ max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC;
+
+ /* Zones have to begin on a 1 << MAX_ORDER page boundary,
+ * so this makes sure that's true for highmem
+ */
+ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
+ if(physmem_size + iomem_size > max_physmem){
+ highmem = physmem_size + iomem_size - max_physmem;
physmem_size -= highmem;
#ifndef CONFIG_HIGHMEM
highmem = 0;
}
high_physmem = uml_physmem + physmem_size;
- high_memory = (void *) high_physmem;
+ end_iomem = high_physmem + iomem_size;
+ high_memory = (void *) end_iomem;
start_vm = VMALLOC_START;
- setup_physmem(uml_physmem, uml_reserved, physmem_size);
+ setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
+ if(init_maps(physmem_size, iomem_size, highmem)){
+ printf("Failed to allocate mem_map for %ld bytes of physical "
+ "memory and %ld bytes of highmem\n", physmem_size,
+ highmem);
+ exit(1);
+ }
+
virtmem_size = physmem_size;
avail = get_kmem_end() - start_vm;
if(physmem_size > avail) virtmem_size = avail;
printf("Kernel virtual memory size shrunk to %ld bytes\n",
virtmem_size);
- err = reserve_vm(high_physmem, end_vm, &kernel_vm_reserved);
- if(err){
- printf("Failed to reserve VM area for kernel VM\n");
- exit(1);
- }
-
uml_postsetup();
- init_task.thread.kernel_stack = (unsigned long) &init_thread_info +
- 2 * PAGE_SIZE;
-
task_protections((unsigned long) &init_thread_info);
+ os_flush_stdout();
return(CHOOSE_MODE(start_uml_tt(), start_uml_skas()));
}
+extern int uml_exitcode;
+
static int panic_exit(struct notifier_block *self, unsigned long unused1,
void *unused2)
{
#ifdef CONFIG_MAGIC_SYSRQ
- handle_sysrq('p', ¤t->thread.regs, NULL, NULL);
+ handle_sysrq('p', ¤t->thread.regs, NULL);
#endif
+ uml_exitcode = 1;
machine_halt();
return(0);
}
arch_check_bugs();
check_ptrace();
check_sigio();
+ check_devanon();
+}
+
+void apply_alternatives(void *start, void *end)
+{
}
/*