#include "linux/module.h"
#include "linux/init.h"
#include "linux/capability.h"
+#include "linux/vmalloc.h"
+#include "linux/spinlock.h"
+#include "linux/vs_cvirt.h"
+
#include "asm/unistd.h"
#include "asm/mman.h"
#include "asm/segment.h"
#include "asm/pgtable.h"
#include "asm/processor.h"
#include "asm/tlbflush.h"
-#include "asm/spinlock.h"
#include "asm/uaccess.h"
#include "asm/user.h"
#include "user_util.h"
struct task_struct *get_task(int pid, int require)
{
- struct task_struct *task, *ret;
+ struct task_struct *ret;
- ret = NULL;
read_lock(&tasklist_lock);
- for_each_process(task){
- if(task->pid == pid){
- ret = task;
- break;
- }
- }
+ ret = find_task_by_pid(pid);
read_unlock(&tasklist_lock);
+
if(require && (ret == NULL)) panic("get_task couldn't find a task\n");
return(ret);
}
int flags = GFP_KERNEL;
if(atomic) flags |= GFP_ATOMIC;
- if((page = __get_free_pages(flags, order)) == 0)
+ page = __get_free_pages(flags, order);
+ if(page == 0)
return(0);
stack_protections(page);
return(page);
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
- struct task_struct *p;
+ int pid;
current->thread.request.u.thread.proc = fn;
current->thread.request.u.thread.arg = arg;
- p = do_fork(CLONE_VM | flags, 0, NULL, 0, NULL, NULL);
- if(IS_ERR(p)) panic("do_fork failed in kernel_thread");
- return(p->pid);
+ pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, NULL, 0, NULL,
+ NULL);
+ if(pid < 0)
+ panic("do_fork failed in kernel_thread, errno = %d", pid);
+ return(pid);
}
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
- unsigned cpu = smp_processor_id();
+ int cpu = smp_processor_id();
+
if (prev != next)
- clear_bit(cpu, &prev->cpu_vm_mask);
- set_bit(cpu, &next->cpu_vm_mask);
+ cpu_clear(cpu, prev->cpu_vm_mask);
+ cpu_set(cpu, next->cpu_vm_mask);
}
void set_current(void *t)
{ external_pid(task), task });
}
-void *switch_to(void *prev, void *next, void *last)
+void *_switch_to(void *prev, void *next, void *last)
{
return(CHOOSE_MODE(switch_to_tt(prev, next),
switch_to_skas(prev, next)));
void exit_thread(void)
{
CHOOSE_MODE(exit_thread_tt(), exit_thread_skas());
- unprotect_stack((unsigned long) current->thread_info);
+ unprotect_stack((unsigned long) current_thread);
}
void *get_current(void)
return(current);
}
+void prepare_to_copy(struct task_struct *tsk)
+{
+}
+
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long stack_top, struct task_struct * p,
struct pt_regs *regs)
{
p->thread = (struct thread_struct) INIT_THREAD;
- p->thread.kernel_stack =
- (unsigned long) p->thread_info + 2 * PAGE_SIZE;
return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
clone_flags, sp, stack_top, p, regs));
}
void default_idle(void)
{
- idle_timer();
+ uml_idle_timer();
atomic_inc(&init_mm.mm_count);
current->mm = &init_mm;
return(kmalloc(size, GFP_ATOMIC));
}
+void *um_vmalloc(int size)
+{
+ return(vmalloc(size));
+}
+
unsigned long get_fault_addr(void)
{
return((unsigned long) current->thread.fault_addr);
unsigned long stack;
stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
- stack += 2 * PAGE_SIZE;
- return(stack != current->thread.kernel_stack);
+ return(stack != (unsigned long) current_thread);
}
extern void remove_umid_dir(void);
return(clear_user(buf, size));
}
+int strlen_user_proc(char *str)
+{
+ return(strlen_user(str));
+}
+
int smp_sigio_handler(void)
{
#ifdef CONFIG_SMP
- int cpu = current->thread_info->cpu;
+ int cpu = current_thread->cpu;
IPI_handler(cpu);
if(cpu != 0)
return(1);
int cpu(void)
{
- return(current->thread_info->cpu);
+ return(current_thread->cpu);
}
/*