-diff -Nurb linux-2.6.22-580/arch/i386/Kconfig linux-2.6.22-590/arch/i386/Kconfig
---- linux-2.6.22-580/arch/i386/Kconfig 2008-04-30 09:29:26.000000000 -0400
-+++ linux-2.6.22-590/arch/i386/Kconfig 2008-04-30 09:29:41.000000000 -0400
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/Kconfig linux-2.6.22-590/arch/i386/Kconfig
+--- linux-2.6.22-580/arch/i386/Kconfig 2009-02-18 09:56:02.000000000 -0500
++++ linux-2.6.22-590/arch/i386/Kconfig 2009-02-18 09:57:23.000000000 -0500
@@ -1217,6 +1217,14 @@
source "arch/i386/oprofile/Kconfig"
config KPROBES
bool "Kprobes (EXPERIMENTAL)"
depends on KALLSYMS && EXPERIMENTAL && MODULES
-diff -Nurb linux-2.6.22-580/block/ll_rw_blk.c linux-2.6.22-590/block/ll_rw_blk.c
---- linux-2.6.22-580/block/ll_rw_blk.c 2008-04-30 09:29:21.000000000 -0400
-+++ linux-2.6.22-590/block/ll_rw_blk.c 2008-04-30 09:29:41.000000000 -0400
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/kernel/asm-offsets.c linux-2.6.22-590/arch/i386/kernel/asm-offsets.c
+--- linux-2.6.22-580/arch/i386/kernel/asm-offsets.c 2007-07-08 19:32:17.000000000 -0400
++++ linux-2.6.22-590/arch/i386/kernel/asm-offsets.c 2009-02-18 09:57:23.000000000 -0500
+@@ -9,6 +9,7 @@
+ #include <linux/signal.h>
+ #include <linux/personality.h>
+ #include <linux/suspend.h>
++#include <linux/arrays.h>
+ #include <asm/ucontext.h>
+ #include "sigframe.h"
+ #include <asm/pgtable.h>
+@@ -25,9 +26,19 @@
+ #define OFFSET(sym, str, mem) \
+ DEFINE(sym, offsetof(struct str, mem));
+
++#define STACKOFFSET(sym, str, mem) \
++ DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
++
+ /* workaround for a warning with -Wmissing-prototypes */
+ void foo(void);
+
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned int number;
++};
++
+ void foo(void)
+ {
+ OFFSET(SIGCONTEXT_eax, sigcontext, eax);
+@@ -51,7 +62,16 @@
+ OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
+ BLANK();
+
+- OFFSET(TI_task, thread_info, task);
++ STACKOFFSET(TASK_thread, task_struct, thread);
++ STACKOFFSET(THREAD_esp, thread_struct, esp);
++ STACKOFFSET(EVENT_event_data, event, event_data);
++ STACKOFFSET(EVENT_task, event, task);
++ STACKOFFSET(EVENT_event_type, event, event_data);
++ STACKOFFSET(SPEC_number, event_spec, number);
++ DEFINE(EVENT_SIZE, sizeof(struct event));
++ DEFINE(SPEC_SIZE, sizeof(struct event_spec));
++ DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
++
+ OFFSET(TI_exec_domain, thread_info, exec_domain);
+ OFFSET(TI_flags, thread_info, flags);
+ OFFSET(TI_status, thread_info, status);
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/kernel/entry. linux-2.6.22-590/arch/i386/kernel/entry.
+--- linux-2.6.22-580/arch/i386/kernel/entry. 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/arch/i386/kernel/entry. 2009-02-18 09:57:23.000000000 -0500
+@@ -0,0 +1,1027 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'syscall_exit':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - %fs
++ * 28(%esp) - orig_eax
++ * 2C(%esp) - %eip
++ * 30(%esp) - %cs
++ * 34(%esp) - %eflags
++ * 38(%esp) - %oldesp
++ * 3C(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/percpu.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++
++/*
++ * We use macros for low-level operations which need to be overridden
++ * for paravirtualization. The following will never clobber any registers:
++ * INTERRUPT_RETURN (aka. "iret")
++ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
++ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *
++ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
++ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
++ * Allowing a register to be clobbered can shrink the paravirt replacement
++ * enough to patch inline, increasing performance.
++ */
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
++#else
++#define preempt_stop(clobbers)
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %fs; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET fs, 0;*/\
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es; \
++ movl $(__KERNEL_PERCPU), %edx; \
++ movl %edx, %fs
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++3: popl %fs; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE fs;*/\
++.pushsection .fixup,"ax"; \
++4: movl $0,(%esp); \
++ jmp 1b; \
++5: movl $0,(%esp); \
++ jmp 2b; \
++6: movl $0,(%esp); \
++ jmp 3b; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,4b; \
++ .long 2b,5b; \
++ .long 3b,6b; \
++.popsection
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
++ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
++ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
++ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
++ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
++ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
++ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
++ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
++ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
++ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
++ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
++ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop(CLBR_ANY)
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
++ cmpl $USER_RPL, %eax
++ jb resume_kernel # not returning to v8086 or userspace
++
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++END(ret_from_exception)
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++END(resume_kernel)
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl TSS_sysenter_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl PT_EIP(%esp), %edx
++ movl PT_OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++ TRACE_IRQS_ON
++1: mov PT_FS(%esp), %fs
++ ENABLE_INTERRUPTS_SYSEXIT
++ CFI_ENDPROC
++.pushsection .fixup,"ax"
++2: movl $0,PT_FS(%esp)
++ jmp 1b
++.section __ex_table,"a"
++ .align 4
++ .long 1b,2b
++.popsection
++ENDPROC(sysenter_entry)
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb PT_OLDSS(%esp), %ah
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
++ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp # skip orig_eax/error_code
++ CFI_ADJUST_CFA_OFFSET -4
++1: INTERRUPT_RETURN
++.section .fixup,"ax"
++iret_exc:
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++ldt_ss:
++ larl PT_OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++
++#ifdef CONFIG_PARAVIRT
++ /*
++ * The kernel can't run on a non-flat stack if paravirt mode
++ * is active. Rather than try to fixup the high bits of
++ * ESP, bypass this code entirely. This may break DOSemu
++ * and/or Wine support in a paravirt VM, although the option
++ * is still available to implement the setting of the high
++ * 16-bits in the INTERRUPT_RETURN paravirt-op.
++ */
++ cmpl $0, paravirt_ops+PARAVIRT_enabled
++ jne restore_nocheck
++#endif
++
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ movl PT_OLDESP(%esp), %eax
++ movl %esp, %edx
++ call patch_espfix_desc
++ pushl $__ESPFIX_SS
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ DISABLE_INTERRUPTS(CLBR_EAX)
++ TRACE_IRQS_OFF
++ lss (%esp), %esp
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp restore_nocheck
++ CFI_ENDPROC
++ENDPROC(system_call)
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++#ifdef CONFIG_VM86
++ testl $VM_MASK, PT_EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++#else
++ movl %esp, %eax
++#endif
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++END(work_pending)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,PT_EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl PT_ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++END(syscall_trace_entry)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++END(syscall_exit_work)
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_fault)
++
++syscall_badsys:
++ movl $-ENOSYS,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_badsys)
++ CFI_ENDPROC
++
++#define FIXUP_ESPFIX_STACK \
++ /* since we are on a wrong stack, we cant make it a C code :( */ \
++ PER_CPU(gdt_page, %ebx); \
++ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
++ addl %esp, %eax; \
++ pushl $__KERNEL_DS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ lss (%esp), %esp; \
++ CFI_ADJUST_CFA_OFFSET -8;
++#define UNWIND_ESPFIX_STACK \
++ movl %ss, %eax; \
++ /* see if on espfix stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ jne 27f; \
++ movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to normal stack */ \
++ FIXUP_ESPFIX_STACK; \
++27:;
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++vector=0
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++ .previous
++ .long 1b
++ .text
++vector=vector+1
++.endr
++END(irq_entries_start)
++
++.previous
++END(interrupt)
++.previous
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ENDPROC(common_interrupt)
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_##name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC; \
++ENDPROC(name)
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ /* the function address is in %fs's slot on the stack */
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %fs
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET fs, 0*/
++ movl $(__KERNEL_PERCPU), %ecx
++ movl %ecx, %fs
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl PT_FS(%esp), %edi # get the function address
++ movl PT_ORIG_EAX(%esp), %edx # get the error code
++ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
++ mov %ecx, PT_FS(%esp)
++ /*CFI_REL_OFFSET fs, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(page_fault)
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_CR0_INTO_EAX
++ testl $0x4, %eax # EM (math emulation bit)
++ jne device_not_available_emulate
++ preempt_stop(CLBR_ANY)
++ call math_state_restore
++ jmp ret_from_exception
++device_not_available_emulate:
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++ CFI_ENDPROC
++END(device_not_available)
++
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl TSS_sysenter_esp0+offset(%esp),%esp; \
++ CFI_DEF_CFA esp, 0; \
++ CFI_UNDEFINED eip; \
++ pushfl; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $__KERNEL_CS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $sysenter_past_esp; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ CFI_REL_OFFSET eip, 0
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(debug)
++
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++KPROBE_ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_espfix_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ /* We have a RING0_INT_FRAME here */
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ RING0_INT_FRAME
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_debug_stack_check:
++ /* We have a RING0_INT_FRAME here */
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_espfix_stack:
++ /* We have a RING0_INT_FRAME here.
++ *
++ * create the pointer to lss back
++ */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to espfix stack
++ CFI_ADJUST_CFA_OFFSET -24
++1: INTERRUPT_RETURN
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++KPROBE_END(nmi)
++
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_iret)
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++END(native_iret)
++
++ENTRY(native_irq_enable_sysexit)
++ sti
++ sysexit
++END(native_irq_enable_sysexit)
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(int3)
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(overflow)
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(bounds)
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_segment_overrun)
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(segment_not_present)
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++KPROBE_END(general_protection)
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(alignment_check)
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(divide_error)
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(machine_check)
++#endif
++
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(spurious_interrupt_bug)
++
++ENTRY(kernel_thread_helper)
++ pushl $0 # fake return address for unwinder
++ CFI_STARTPROC
++ movl %edx,%eax
++ push %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ call *%ebx
++ push %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(kernel_thread_helper)
++
++.section .rodata,"a"
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/kernel/entry.S.syscallprobe linux-2.6.22-590/arch/i386/kernel/entry.S.syscallprobe
+--- linux-2.6.22-580/arch/i386/kernel/entry.S.syscallprobe 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/arch/i386/kernel/entry.S.syscallprobe 2009-02-18 09:57:23.000000000 -0500
+@@ -0,0 +1,1055 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'syscall_exit':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - %fs
++ * 28(%esp) - orig_eax
++ * 2C(%esp) - %eip
++ * 30(%esp) - %cs
++ * 34(%esp) - %eflags
++ * 38(%esp) - %oldesp
++ * 3C(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/percpu.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++
++/*
++ * We use macros for low-level operations which need to be overridden
++ * for paravirtualization. The following will never clobber any registers:
++ * INTERRUPT_RETURN (aka. "iret")
++ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
++ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *
++ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
++ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
++ * Allowing a register to be clobbered can shrink the paravirt replacement
++ * enough to patch inline, increasing performance.
++ */
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
++#else
++#define preempt_stop(clobbers)
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %fs; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET fs, 0;*/\
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es; \
++ movl $(__KERNEL_PERCPU), %edx; \
++ movl %edx, %fs
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++3: popl %fs; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE fs;*/\
++.pushsection .fixup,"ax"; \
++4: movl $0,(%esp); \
++ jmp 1b; \
++5: movl $0,(%esp); \
++ jmp 2b; \
++6: movl $0,(%esp); \
++ jmp 3b; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,4b; \
++ .long 2b,5b; \
++ .long 3b,6b; \
++.popsection
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
++ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
++ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
++ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
++ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
++ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
++ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
++ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
++ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
++ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
++ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
++ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop(CLBR_ANY)
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
++ cmpl $USER_RPL, %eax
++ jb resume_kernel # not returning to v8086 or userspace
++
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++END(ret_from_exception)
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++END(resume_kernel)
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl TSS_sysenter_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl PT_EIP(%esp), %edx
++ movl PT_OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++ TRACE_IRQS_ON
++1: mov PT_FS(%esp), %fs
++ ENABLE_INTERRUPTS_SYSEXIT
++ CFI_ENDPROC
++.pushsection .fixup,"ax"
++2: movl $0,PT_FS(%esp)
++ jmp 1b
++.section __ex_table,"a"
++ .align 4
++ .long 1b,2b
++.popsection
++ENDPROC(sysenter_entry)
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ /* Move Chopstix syscall probe here */
++ /* Save and clobber: eax, ecx, ebp */
++ pushl %ebp
++ movl %esp, %ebp
++ pushl %eax
++ pushl %ecx
++ subl $SPEC_EVENT_SIZE, %esp
++ movl rec_event, %ecx
++ testl %ecx, %ecx
++ jz carry_on
++ movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
++ leal SPEC_EVENT_SIZE(%ebp), %eax
++ movl %eax, EVENT_event_data(%ebp)
++ GET_THREAD_INFO(%eax)
++ movl %eax, EVENT_task(%ebp)
++ movl $7, EVENT_event_type(%ebp)
++ movl rec_event, %edx
++ movl $1, 4(%esp)
++ leal -EVENT_SIZE(%ebp), %eax
++ movl %eax, (%esp)
++ /*call rec_event_asm */
++carry_on:
++ addl $SPEC_EVENT_SIZE, %esp
++ popl %ecx
++ popl %eax
++ popl %ebp
++ /* End chopstix */
++
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb PT_OLDSS(%esp), %ah
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
++ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp # skip orig_eax/error_code
++ CFI_ADJUST_CFA_OFFSET -4
++1: INTERRUPT_RETURN
++.section .fixup,"ax"
++iret_exc:
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++ldt_ss:
++ larl PT_OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++
++#ifdef CONFIG_PARAVIRT
++ /*
++ * The kernel can't run on a non-flat stack if paravirt mode
++ * is active. Rather than try to fixup the high bits of
++ * ESP, bypass this code entirely. This may break DOSemu
++ * and/or Wine support in a paravirt VM, although the option
++ * is still available to implement the setting of the high
++ * 16-bits in the INTERRUPT_RETURN paravirt-op.
++ */
++ cmpl $0, paravirt_ops+PARAVIRT_enabled
++ jne restore_nocheck
++#endif
++
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ movl PT_OLDESP(%esp), %eax
++ movl %esp, %edx
++ call patch_espfix_desc
++ pushl $__ESPFIX_SS
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ DISABLE_INTERRUPTS(CLBR_EAX)
++ TRACE_IRQS_OFF
++ lss (%esp), %esp
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp restore_nocheck
++ CFI_ENDPROC
++ENDPROC(system_call)
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++#ifdef CONFIG_VM86
++ testl $VM_MASK, PT_EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++#else
++ movl %esp, %eax
++#endif
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++END(work_pending)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,PT_EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl PT_ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++END(syscall_trace_entry)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++END(syscall_exit_work)
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_fault)
++
++syscall_badsys:
++ movl $-ENOSYS,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_badsys)
++ CFI_ENDPROC
++
++#define FIXUP_ESPFIX_STACK \
++ /* since we are on a wrong stack, we cant make it a C code :( */ \
++ PER_CPU(gdt_page, %ebx); \
++ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
++ addl %esp, %eax; \
++ pushl $__KERNEL_DS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ lss (%esp), %esp; \
++ CFI_ADJUST_CFA_OFFSET -8;
++#define UNWIND_ESPFIX_STACK \
++ movl %ss, %eax; \
++ /* see if on espfix stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ jne 27f; \
++ movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to normal stack */ \
++ FIXUP_ESPFIX_STACK; \
++27:;
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++vector=0
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++ .previous
++ .long 1b
++ .text
++vector=vector+1
++.endr
++END(irq_entries_start)
++
++.previous
++END(interrupt)
++.previous
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ENDPROC(common_interrupt)
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_##name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC; \
++ENDPROC(name)
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ /* the function address is in %fs's slot on the stack */
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %fs
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET fs, 0*/
++ movl $(__KERNEL_PERCPU), %ecx
++ movl %ecx, %fs
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl PT_FS(%esp), %edi # get the function address
++ movl PT_ORIG_EAX(%esp), %edx # get the error code
++ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
++ mov %ecx, PT_FS(%esp)
++ /*CFI_REL_OFFSET fs, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(page_fault)
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_CR0_INTO_EAX
++ testl $0x4, %eax # EM (math emulation bit)
++ jne device_not_available_emulate
++ preempt_stop(CLBR_ANY)
++ call math_state_restore
++ jmp ret_from_exception
++device_not_available_emulate:
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++ CFI_ENDPROC
++END(device_not_available)
++
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl TSS_sysenter_esp0+offset(%esp),%esp; \
++ CFI_DEF_CFA esp, 0; \
++ CFI_UNDEFINED eip; \
++ pushfl; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $__KERNEL_CS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $sysenter_past_esp; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ CFI_REL_OFFSET eip, 0
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(debug)
++
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++KPROBE_ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_espfix_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ /* We have a RING0_INT_FRAME here */
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ RING0_INT_FRAME
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_debug_stack_check:
++ /* We have a RING0_INT_FRAME here */
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_espfix_stack:
++ /* We have a RING0_INT_FRAME here.
++ *
++ * create the pointer to lss back
++ */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to espfix stack
++ CFI_ADJUST_CFA_OFFSET -24
++1: INTERRUPT_RETURN
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++KPROBE_END(nmi)
++
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_iret)
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++END(native_iret)
++
++ENTRY(native_irq_enable_sysexit)
++ sti
++ sysexit
++END(native_irq_enable_sysexit)
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(int3)
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(overflow)
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(bounds)
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_segment_overrun)
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(segment_not_present)
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++KPROBE_END(general_protection)
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(alignment_check)
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(divide_error)
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(machine_check)
++#endif
++
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(spurious_interrupt_bug)
++
++ENTRY(kernel_thread_helper)
++ pushl $0 # fake return address for unwinder
++ CFI_STARTPROC
++ movl %edx,%eax
++ push %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ call *%ebx
++ push %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(kernel_thread_helper)
++
++.section .rodata,"a"
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/mm/fault.c linux-2.6.22-590/arch/i386/mm/fault.c
+--- linux-2.6.22-580/arch/i386/mm/fault.c 2009-02-18 09:56:02.000000000 -0500
++++ linux-2.6.22-590/arch/i386/mm/fault.c 2009-02-18 09:57:23.000000000 -0500
+@@ -60,6 +60,15 @@
+ DIE_PAGE_FAULT, &args);
+ }
+
++
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++
+ /*
+ * Return EIP plus the CS segment base. The segment limit is also
+ * adjusted, clamped to the kernel/user address space (whichever is
+@@ -296,6 +305,8 @@
+ * bit 3 == 1 means use of reserved bit detected
+ * bit 4 == 1 means fault was an instruction fetch
+ */
++
++
+ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long error_code)
+ {
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/block/ll_rw_blk.c linux-2.6.22-590/block/ll_rw_blk.c
+--- linux-2.6.22-580/block/ll_rw_blk.c 2009-02-18 09:55:48.000000000 -0500
++++ linux-2.6.22-590/block/ll_rw_blk.c 2009-02-18 09:57:23.000000000 -0500
@@ -30,6 +30,7 @@
#include <linux/cpu.h>
#include <linux/blktrace_api.h>
ret = q->make_request_fn(q, bio);
} while (ret);
}
-diff -Nurb linux-2.6.22-580/drivers/oprofile/cpu_buffer.c linux-2.6.22-590/drivers/oprofile/cpu_buffer.c
---- linux-2.6.22-580/drivers/oprofile/cpu_buffer.c 2007-07-08 19:32:17.000000000 -0400
-+++ linux-2.6.22-590/drivers/oprofile/cpu_buffer.c 2008-04-30 09:29:41.000000000 -0400
-@@ -21,6 +21,7 @@
- #include <linux/oprofile.h>
- #include <linux/vmalloc.h>
- #include <linux/errno.h>
-+#include <linux/arrays.h>
-
- #include "event_buffer.h"
- #include "cpu_buffer.h"
-@@ -143,6 +144,17 @@
- b->head_pos = 0;
- }
-
-+#ifdef CONFIG_CHOPSTIX
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/chopstix.S linux-2.6.22-590/chopstix.S
+--- linux-2.6.22-580/chopstix.S 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/chopstix.S 2009-02-18 09:57:23.000000000 -0500
+@@ -0,0 +1,1055 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
+
-+struct event_spec {
-+ unsigned int pc;
-+ unsigned long dcookie;
-+ unsigned count;
-+};
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'syscall_exit':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - %fs
++ * 28(%esp) - orig_eax
++ * 2C(%esp) - %eip
++ * 30(%esp) - %cs
++ * 34(%esp) - %eflags
++ * 38(%esp) - %oldesp
++ * 3C(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
+
-+extern void (*rec_event)(void *,unsigned int);
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/percpu.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++
++/*
++ * We use macros for low-level operations which need to be overridden
++ * for paravirtualization. The following will never clobber any registers:
++ * INTERRUPT_RETURN (aka. "iret")
++ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
++ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *
++ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
++ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
++ * Allowing a register to be clobbered can shrink the paravirt replacement
++ * enough to patch inline, increasing performance.
++ */
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
++#else
++#define preempt_stop(clobbers)
++#define resume_kernel restore_nocheck
+#endif
+
- static inline void
- add_sample(struct oprofile_cpu_buffer * cpu_buf,
- unsigned long pc, unsigned long event)
-@@ -151,6 +163,7 @@
- entry->eip = pc;
- entry->event = event;
- increment_head(cpu_buf);
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
+
- }
-
- static inline void
-@@ -241,8 +254,28 @@
- {
- int is_kernel = !user_mode(regs);
- unsigned long pc = profile_pc(regs);
-+ int res=0;
-
-+#ifdef CONFIG_CHOPSTIX
-+ if (rec_event) {
-+ struct event esig;
-+ struct event_spec espec;
-+ esig.task = current;
-+ espec.pc=pc;
-+ espec.count=1;
-+ esig.event_data=&espec;
-+ esig.event_type=event; /* index in the event array currently set up */
-+ /* make sure the counters are loaded in the order we want them to show up*/
-+ (*rec_event)(&esig, 1);
-+ }
-+ else {
- oprofile_add_ext_sample(pc, regs, event, is_kernel);
-+ }
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
+#else
-+ oprofile_add_ext_sample(pc, regs, event, is_kernel);
++#define resume_userspace_sig resume_userspace
+#endif
+
++#define SAVE_ALL \
++ cld; \
++ pushl %fs; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET fs, 0;*/\
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es; \
++ movl $(__KERNEL_PERCPU), %edx; \
++ movl %edx, %fs
+
- }
-
- void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
-diff -Nurb linux-2.6.22-580/fs/bio.c linux-2.6.22-590/fs/bio.c
---- linux-2.6.22-580/fs/bio.c 2007-07-08 19:32:17.000000000 -0400
-+++ linux-2.6.22-590/fs/bio.c 2008-04-30 09:29:41.000000000 -0400
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++3: popl %fs; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE fs;*/\
++.pushsection .fixup,"ax"; \
++4: movl $0,(%esp); \
++ jmp 1b; \
++5: movl $0,(%esp); \
++ jmp 2b; \
++6: movl $0,(%esp); \
++ jmp 3b; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,4b; \
++ .long 2b,5b; \
++ .long 3b,6b; \
++.popsection
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
++ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
++ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
++ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
++ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
++ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
++ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
++ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
++ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
++ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
++ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
++ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop(CLBR_ANY)
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
++ cmpl $USER_RPL, %eax
++ jb resume_kernel # not returning to v8086 or userspace
++
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++END(ret_from_exception)
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++END(resume_kernel)
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl TSS_sysenter_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl PT_EIP(%esp), %edx
++ movl PT_OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++ TRACE_IRQS_ON
++1: mov PT_FS(%esp), %fs
++ ENABLE_INTERRUPTS_SYSEXIT
++ CFI_ENDPROC
++.pushsection .fixup,"ax"
++2: movl $0,PT_FS(%esp)
++ jmp 1b
++.section __ex_table,"a"
++ .align 4
++ .long 1b,2b
++.popsection
++ENDPROC(sysenter_entry)
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ /* Move Chopstix syscall probe here */
++ /* Save and clobber: eax, ecx, ebp */
++ pushl %ebp
++ movl %esp, %ebp
++ pushl %eax
++ pushl %ecx
++ subl $16, %esp /*
++ movl rec_event, %ecx
++ testl %ecx, %ecx
++ jz carry_on
++ movl %eax, (SPEC_number-EVENT_SIZE)(%ebp)
++ leal SPEC_EVENT_SIZE(%ebp), %eax
++ movl %eax, EVENT_event_data(%ebp)
++ GET_THREAD_INFO(%eax)
++ movl %eax, EVENT_task(%ebp)
++ movl $7, EVENT_event_type(%ebp)
++ movl rec_event, %edx
++ movl $1, 4(%esp)
++ leal -EVENT_SIZE(%ebp), %eax
++ movl %eax, (%esp)
++ call rec_event_asm
++carry_on: */
++ addl $16, %esp
++ popl %ecx
++ popl %eax
++ popl %ebp
++ /* End chopstix */
++
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb PT_OLDSS(%esp), %ah
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
++ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp # skip orig_eax/error_code
++ CFI_ADJUST_CFA_OFFSET -4
++1: INTERRUPT_RETURN
++.section .fixup,"ax"
++iret_exc:
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++ldt_ss:
++ larl PT_OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++
++#ifdef CONFIG_PARAVIRT
++ /*
++ * The kernel can't run on a non-flat stack if paravirt mode
++ * is active. Rather than try to fixup the high bits of
++ * ESP, bypass this code entirely. This may break DOSemu
++ * and/or Wine support in a paravirt VM, although the option
++ * is still available to implement the setting of the high
++ * 16-bits in the INTERRUPT_RETURN paravirt-op.
++ */
++ cmpl $0, paravirt_ops+PARAVIRT_enabled
++ jne restore_nocheck
++#endif
++
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ movl PT_OLDESP(%esp), %eax
++ movl %esp, %edx
++ call patch_espfix_desc
++ pushl $__ESPFIX_SS
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ DISABLE_INTERRUPTS(CLBR_EAX)
++ TRACE_IRQS_OFF
++ lss (%esp), %esp
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp restore_nocheck
++ CFI_ENDPROC
++ENDPROC(system_call)
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++#ifdef CONFIG_VM86
++ testl $VM_MASK, PT_EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++#else
++ movl %esp, %eax
++#endif
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++END(work_pending)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,PT_EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl PT_ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++END(syscall_trace_entry)
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++END(syscall_exit_work)
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_fault)
++
++syscall_badsys:
++ movl $-ENOSYS,PT_EAX(%esp)
++ jmp resume_userspace
++END(syscall_badsys)
++ CFI_ENDPROC
++
++#define FIXUP_ESPFIX_STACK \
++ /* since we are on a wrong stack, we cant make it a C code :( */ \
++ PER_CPU(gdt_page, %ebx); \
++ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
++ addl %esp, %eax; \
++ pushl $__KERNEL_DS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ lss (%esp), %esp; \
++ CFI_ADJUST_CFA_OFFSET -8;
++#define UNWIND_ESPFIX_STACK \
++ movl %ss, %eax; \
++ /* see if on espfix stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ jne 27f; \
++ movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to normal stack */ \
++ FIXUP_ESPFIX_STACK; \
++27:;
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++vector=0
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++ .previous
++ .long 1b
++ .text
++vector=vector+1
++.endr
++END(irq_entries_start)
++
++.previous
++END(interrupt)
++.previous
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ENDPROC(common_interrupt)
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_##name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC; \
++ENDPROC(name)
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ /* the function address is in %fs's slot on the stack */
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %fs
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET fs, 0*/
++ movl $(__KERNEL_PERCPU), %ecx
++ movl %ecx, %fs
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl PT_FS(%esp), %edi # get the function address
++ movl PT_ORIG_EAX(%esp), %edx # get the error code
++ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
++ mov %ecx, PT_FS(%esp)
++ /*CFI_REL_OFFSET fs, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(page_fault)
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_CR0_INTO_EAX
++ testl $0x4, %eax # EM (math emulation bit)
++ jne device_not_available_emulate
++ preempt_stop(CLBR_ANY)
++ call math_state_restore
++ jmp ret_from_exception
++device_not_available_emulate:
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++ CFI_ENDPROC
++END(device_not_available)
++
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl TSS_sysenter_esp0+offset(%esp),%esp; \
++ CFI_DEF_CFA esp, 0; \
++ CFI_UNDEFINED eip; \
++ pushfl; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $__KERNEL_CS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $sysenter_past_esp; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ CFI_REL_OFFSET eip, 0
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(debug)
++
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++KPROBE_ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_espfix_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ /* We have a RING0_INT_FRAME here */
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ RING0_INT_FRAME
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_debug_stack_check:
++ /* We have a RING0_INT_FRAME here */
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_espfix_stack:
++ /* We have a RING0_INT_FRAME here.
++ *
++ * create the pointer to lss back
++ */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to espfix stack
++ CFI_ADJUST_CFA_OFFSET -24
++1: INTERRUPT_RETURN
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++KPROBE_END(nmi)
++
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_iret)
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++END(native_iret)
++
++ENTRY(native_irq_enable_sysexit)
++ sti
++ sysexit
++END(native_irq_enable_sysexit)
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(int3)
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(overflow)
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(bounds)
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(coprocessor_segment_overrun)
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(segment_not_present)
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++KPROBE_END(general_protection)
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(alignment_check)
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(divide_error)
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(machine_check)
++#endif
++
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++END(spurious_interrupt_bug)
++
++ENTRY(kernel_thread_helper)
++ pushl $0 # fake return address for unwinder
++ CFI_STARTPROC
++ movl %edx,%eax
++ push %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ call *%ebx
++ push %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(kernel_thread_helper)
++
++.section .rodata,"a"
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/drivers/oprofile/cpu_buffer.c linux-2.6.22-590/drivers/oprofile/cpu_buffer.c
+--- linux-2.6.22-580/drivers/oprofile/cpu_buffer.c 2007-07-08 19:32:17.000000000 -0400
++++ linux-2.6.22-590/drivers/oprofile/cpu_buffer.c 2009-02-18 09:57:23.000000000 -0500
+@@ -21,6 +21,7 @@
+ #include <linux/oprofile.h>
+ #include <linux/vmalloc.h>
+ #include <linux/errno.h>
++#include <linux/arrays.h>
+
+ #include "event_buffer.h"
+ #include "cpu_buffer.h"
+@@ -143,6 +144,17 @@
+ b->head_pos = 0;
+ }
+
++#ifdef CONFIG_CHOPSTIX
++
++struct event_spec {
++ unsigned int pc;
++ unsigned long dcookie;
++ unsigned count;
++};
++
++extern void (*rec_event)(void *,unsigned int);
++#endif
++
+ static inline void
+ add_sample(struct oprofile_cpu_buffer * cpu_buf,
+ unsigned long pc, unsigned long event)
+@@ -151,6 +163,7 @@
+ entry->eip = pc;
+ entry->event = event;
+ increment_head(cpu_buf);
++
+ }
+
+ static inline void
+@@ -241,8 +254,28 @@
+ {
+ int is_kernel = !user_mode(regs);
+ unsigned long pc = profile_pc(regs);
++ int res=0;
+
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event) {
++ struct event esig;
++ struct event_spec espec;
++ esig.task = current;
++ espec.pc=pc;
++ espec.count=1;
++ esig.event_data=&espec;
++ esig.event_type=event; /* index in the event array currently set up */
++ /* make sure the counters are loaded in the order we want them to show up*/
++ (*rec_event)(&esig, 1);
++ }
++ else {
+ oprofile_add_ext_sample(pc, regs, event, is_kernel);
++ }
++#else
++ oprofile_add_ext_sample(pc, regs, event, is_kernel);
++#endif
++
++
+ }
+
+ void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/evsend.S linux-2.6.22-590/evsend.S
+--- linux-2.6.22-580/evsend.S 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/evsend.S 2009-02-18 09:57:23.000000000 -0500
+@@ -0,0 +1,51 @@
++ .file "evsend.c"
++.globl num
++ .data
++ .align 4
++ .type num, @object
++ .size num, 4
++num:
++ .long 5
++ .text
++.globl main
++ .type main, @function
++main:
++ leal 4(%esp), %ecx
++ andl $-16, %esp
++ pushl -4(%ecx)
++ pushl %ebp
++ movl %esp, %ebp
++ pushl %ecx
++ subl $68, %esp
++ movl rec_event, %eax
++ testl %eax, %eax
++ je .L5
++ movl num, %eax
++ movzwl %ax, %eax
++ movw %ax, -36(%ebp)
++ movl current, %eax
++ movl (%eax), %eax
++ andl $4096, %eax
++ movl %eax, -8(%ebp)
++ leal -48(%ebp), %eax
++ movl %eax, -24(%ebp)
++ movl current, %eax
++ movl %eax, -12(%ebp)
++ movl -8(%ebp), %eax
++ movl %eax, -48(%ebp)
++ movl $7, -16(%ebp)
++ movl rec_event, %edx
++ movl $1, 4(%esp)
++ leal -32(%ebp), %eax
++ movl %eax, (%esp)
++ call *%edx
++.L5:
++ addl $68, %esp
++ popl %ecx
++ popl %ebp
++ leal -4(%ecx), %esp
++ ret
++ .size main, .-main
++ .comm current,4,4
++ .ident "GCC: (GNU) 4.1.1 (Gentoo 4.1.1-r3)"
++ .section .note.GNU-stack,"",@progbits
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/evsend.c linux-2.6.22-590/evsend.c
+--- linux-2.6.22-580/evsend.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/evsend.c 2009-02-18 09:57:23.000000000 -0500
+@@ -0,0 +1,43 @@
++#include <linux/list.h>
++
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned short number;
++};
++
++struct event {
++ struct list_head link;
++ void *event_data;
++ unsigned int count;
++ unsigned int event_type;
++ struct task_struct *task;
++};
++
++int num=5;
++
++struct task_struct {
++ struct thread_type {
++ unsigned esp;
++ } thread;
++} *current;
++
++int main() {
++ if (rec_event) {
++ struct event event;
++ struct event_spec espec;
++ unsigned long eip;
++
++ espec.number = num;
++ eip = current->thread.esp & 4096;
++ event.event_data=&espec;
++ event.task=current;
++ espec.pc=eip;
++ event.event_type=7;
++ /* index in the event array currently set up */
++ /* make sure the counters are loaded in the order we want them to show up*/
++ (*rec_event)(&event, 1);
++ }
++}
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/fs/bio.c linux-2.6.22-590/fs/bio.c
+--- linux-2.6.22-580/fs/bio.c 2007-07-08 19:32:17.000000000 -0400
++++ linux-2.6.22-590/fs/bio.c 2009-02-18 09:57:23.000000000 -0500
@@ -27,6 +27,7 @@
#include <linux/workqueue.h>
#include <linux/blktrace_api.h>
if (bio->bi_end_io)
bio->bi_end_io(bio, bytes_done, error);
}
-diff -Nurb linux-2.6.22-580/fs/exec.c linux-2.6.22-590/fs/exec.c
---- linux-2.6.22-580/fs/exec.c 2008-04-30 09:29:26.000000000 -0400
-+++ linux-2.6.22-590/fs/exec.c 2008-04-30 09:29:41.000000000 -0400
-@@ -52,6 +52,7 @@
- #include <linux/audit.h>
- #include <linux/signalfd.h>
- #include <linux/vs_memory.h>
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/fs/exec.c linux-2.6.22-590/fs/exec.c
+--- linux-2.6.22-580/fs/exec.c 2009-02-18 09:56:02.000000000 -0500
++++ linux-2.6.22-590/fs/exec.c 2009-02-18 09:57:23.000000000 -0500
+@@ -27,6 +27,7 @@
+ #include <linux/mman.h>
+ #include <linux/a.out.h>
+ #include <linux/stat.h>
+#include <linux/dcookies.h>
-
- #include <asm/uaccess.h>
- #include <asm/mmu_context.h>
+ #include <linux/fcntl.h>
+ #include <linux/smp_lock.h>
+ #include <linux/init.h>
+@@ -38,7 +39,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/swap.h>
+ #include <linux/utsname.h>
+-#include <linux/pid_namespace.h>
++/*#include <linux/pid_namespace.h>*/
+ #include <linux/module.h>
+ #include <linux/namei.h>
+ #include <linux/proc_fs.h>
@@ -488,6 +489,12 @@
if (!err) {
file = ERR_PTR(-EACCES);
if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
S_ISREG(inode->i_mode)) {
-diff -Nurb linux-2.6.22-580/include/linux/arrays.h linux-2.6.22-590/include/linux/arrays.h
---- linux-2.6.22-580/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.22-590/include/linux/arrays.h 2008-04-30 09:29:41.000000000 -0400
-@@ -0,0 +1,36 @@
-+#ifndef __ARRAYS_H__
-+#define __ARRAYS_H__
-+#include <linux/list.h>
-+
-+#define SAMPLING_METHOD_DEFAULT 0
-+#define SAMPLING_METHOD_LOG 1
+@@ -627,8 +634,10 @@
+ * Reparenting needs write_lock on tasklist_lock,
+ * so it is safe to do it under read_lock.
+ */
++ /*
+ if (unlikely(tsk->group_leader == child_reaper(tsk)))
+ tsk->nsproxy->pid_ns->child_reaper = tsk;
++ */
+
+ zap_other_threads(tsk);
+ read_unlock(&tasklist_lock);
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/fs/exec.c.orig linux-2.6.22-590/fs/exec.c.orig
+--- linux-2.6.22-580/fs/exec.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/fs/exec.c.orig 2009-02-18 09:56:02.000000000 -0500
+@@ -0,0 +1,1590 @@
++/*
++ * linux/fs/exec.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
+
-+/* Every probe has an array handler */
++/*
++ * #!-checking implemented by tytso.
++ */
++/*
++ * Demand-loading implemented 01.12.91 - no need to read anything but
++ * the header into memory. The inode of the executable is put into
++ * "current->executable", and page faults do the actual loading. Clean.
++ *
++ * Once more I can proudly say that linux stood up to being changed: it
++ * was less than 2 hours work to get demand-loading completely implemented.
++ *
++ * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
++ * current->executable is only used by the procfs. This allows a dispatch
++ * table to check for several different types of binary formats. We keep
++ * trying until we recognize the file or we run out of supported binary
++ * formats.
++ */
+
-+/* XXX - Optimize this structure */
++#include <linux/slab.h>
++#include <linux/file.h>
++#include <linux/mman.h>
++#include <linux/a.out.h>
++#include <linux/stat.h>
++#include <linux/fcntl.h>
++#include <linux/smp_lock.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/highmem.h>
++#include <linux/spinlock.h>
++#include <linux/key.h>
++#include <linux/personality.h>
++#include <linux/binfmts.h>
++#include <linux/swap.h>
++#include <linux/utsname.h>
++#include <linux/pid_namespace.h>
++#include <linux/module.h>
++#include <linux/namei.h>
++#include <linux/proc_fs.h>
++#include <linux/ptrace.h>
++#include <linux/mount.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/rmap.h>
++#include <linux/tsacct_kern.h>
++#include <linux/cn_proc.h>
++#include <linux/audit.h>
++#include <linux/signalfd.h>
++#include <linux/vs_memory.h>
+
-+extern void (*rec_event)(void *,unsigned int);
-+struct array_handler {
-+ struct list_head link;
-+ unsigned int (*hash_func)(void *);
-+ unsigned int (*sampling_func)(void *,int,void *);
-+ unsigned short size;
-+ unsigned int threshold;
-+ unsigned char **expcount;
-+ unsigned int sampling_method;
-+ unsigned int **arrays;
-+ unsigned int arraysize;
-+ unsigned int num_samples[2];
-+ void **epoch_samples; /* size-sized lists of samples */
-+ unsigned int (*serialize)(void *, void *);
-+ unsigned char code[5];
-+};
++#include <asm/uaccess.h>
++#include <asm/mmu_context.h>
+
-+struct event {
-+ struct list_head link;
-+ void *event_data;
-+ unsigned int count;
-+ unsigned int event_type;
-+ struct task_struct *task;
-+};
-+#endif
-diff -Nurb linux-2.6.22-580/include/linux/mutex.h linux-2.6.22-590/include/linux/mutex.h
---- linux-2.6.22-580/include/linux/mutex.h 2007-07-08 19:32:17.000000000 -0400
-+++ linux-2.6.22-590/include/linux/mutex.h 2008-04-30 09:45:43.000000000 -0400
-@@ -53,6 +53,10 @@
- struct thread_info *owner;
- const char *name;
- void *magic;
-+#else
-+#ifdef CONFIG_CHOPSTIX
-+ struct thread_info *owner;
-+#endif
- #endif
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-diff -Nurb linux-2.6.22-580/include/linux/sched.h linux-2.6.22-590/include/linux/sched.h
---- linux-2.6.22-580/include/linux/sched.h 2008-04-30 09:29:26.000000000 -0400
-+++ linux-2.6.22-590/include/linux/sched.h 2008-04-30 09:29:41.000000000 -0400
-@@ -850,6 +850,10 @@
- #endif
- unsigned long sleep_avg;
- unsigned long long timestamp, last_ran;
-+#ifdef CONFIG_CHOPSTIX
-+ unsigned long last_interrupted, last_ran_j;
++#ifdef CONFIG_KMOD
++#include <linux/kmod.h>
+#endif
+
- unsigned long long sched_time; /* sched_clock time spent running */
- enum sleep_type sleep_type;
-
-diff -Nurb linux-2.6.22-580/kernel/mutex.c linux-2.6.22-590/kernel/mutex.c
---- linux-2.6.22-580/kernel/mutex.c 2007-07-08 19:32:17.000000000 -0400
-+++ linux-2.6.22-590/kernel/mutex.c 2008-04-30 09:29:41.000000000 -0400
-@@ -18,6 +18,16 @@
- #include <linux/spinlock.h>
- #include <linux/interrupt.h>
- #include <linux/debug_locks.h>
-+#include <linux/arrays.h>
-+#undef CONFIG_CHOPSTIX
-+#ifdef CONFIG_CHOPSTIX
-+struct event_spec {
-+ unsigned long pc;
-+ unsigned long dcookie;
-+ unsigned count;
-+ unsigned char reason;
-+};
-+#endif
-
- /*
- * In the DEBUG case we are using the "NULL fastpath" for mutexes,
-@@ -43,6 +53,9 @@
- __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
- {
- atomic_set(&lock->count, 1);
-+#ifdef CONFIG_CHOPSTIX
-+ lock->owner=NULL;
-+#endif
- spin_lock_init(&lock->wait_lock);
- INIT_LIST_HEAD(&lock->wait_list);
-
-@@ -88,6 +101,7 @@
- * The locking fastpath is the 1->0 transition from
- * 'unlocked' into 'locked' state.
- */
++int core_uses_pid;
++char core_pattern[CORENAME_MAX_SIZE] = "core";
++int suid_dumpable = 0;
+
- __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
- }
-
-@@ -168,6 +182,27 @@
- }
- __set_task_state(task, state);
-
-+#ifdef CONFIG_CHOPSTIX
-+ if (rec_event) {
-+ if (lock->owner) {
-+ struct event event;
-+ struct event_spec espec;
-+ struct task_struct *p = lock->owner->task;
-+ /*spin_lock(&p->alloc_lock);*/
-+ espec.reason = 0; /* lock */
-+ event.event_data=&espec;
-+ event.task = p;
-+ espec.pc=lock;
-+ event.event_type=5;
-+ (*rec_event)(&event, 1);
-+ /*spin_unlock(&p->alloc_lock);*/
++EXPORT_SYMBOL(suid_dumpable);
++/* The maximal length of core_pattern is also specified in sysctl.c */
+
-+ }
-+ else
-+ BUG();
++static struct linux_binfmt *formats;
++static DEFINE_RWLOCK(binfmt_lock);
++
++int register_binfmt(struct linux_binfmt * fmt)
++{
++ struct linux_binfmt ** tmp = &formats;
++
++ if (!fmt)
++ return -EINVAL;
++ if (fmt->next)
++ return -EBUSY;
++ write_lock(&binfmt_lock);
++ while (*tmp) {
++ if (fmt == *tmp) {
++ write_unlock(&binfmt_lock);
++ return -EBUSY;
+ }
-+#endif
++ tmp = &(*tmp)->next;
++ }
++ fmt->next = formats;
++ formats = fmt;
++ write_unlock(&binfmt_lock);
++ return 0;
++}
+
- /* didnt get the lock, go to sleep: */
- spin_unlock_mutex(&lock->wait_lock, flags);
- schedule();
-@@ -177,6 +212,9 @@
- /* got the lock - rejoice! */
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
- debug_mutex_set_owner(lock, task_thread_info(task));
-+#ifdef CONFIG_CHOPSTIX
-+ lock->owner = task_thread_info(task);
-+#endif
-
- /* set it to 0 if there are no waiters left: */
- if (likely(list_empty(&lock->wait_list)))
-@@ -202,6 +240,7 @@
- mutex_lock_nested(struct mutex *lock, unsigned int subclass)
- {
- might_sleep();
++EXPORT_SYMBOL(register_binfmt);
+
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
- }
-
-@@ -211,6 +250,7 @@
- mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
- {
- might_sleep();
++int unregister_binfmt(struct linux_binfmt * fmt)
++{
++ struct linux_binfmt ** tmp = &formats;
+
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
- }
-
-@@ -246,6 +286,23 @@
-
- debug_mutex_wake_waiter(lock, waiter);
-
-+#ifdef CONFIG_CHOPSTIX
-+ if (rec_event) {
-+ if (lock->owner) {
-+ struct event event;
-+ struct event_spec espec;
++ write_lock(&binfmt_lock);
++ while (*tmp) {
++ if (fmt == *tmp) {
++ *tmp = fmt->next;
++ fmt->next = NULL;
++ write_unlock(&binfmt_lock);
++ return 0;
++ }
++ tmp = &(*tmp)->next;
++ }
++ write_unlock(&binfmt_lock);
++ return -EINVAL;
++}
+
-+ espec.reason = 1; /* unlock */
-+ event.event_data=&espec;
-+ event.task = lock->owner->task;
-+ espec.pc=lock;
-+ event.event_type=5;
-+ (*rec_event)(&event, 1);
-+ }
-+ else
-+ BUG();
++EXPORT_SYMBOL(unregister_binfmt);
++
++static inline void put_binfmt(struct linux_binfmt * fmt)
++{
++ module_put(fmt->module);
++}
++
++/*
++ * Note that a shared library must be both readable and executable due to
++ * security reasons.
++ *
++ * Also note that we take the address to load from from the file itself.
++ */
++asmlinkage long sys_uselib(const char __user * library)
++{
++ struct file * file;
++ struct nameidata nd;
++ int error;
++
++ error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
++ if (error)
++ goto out;
++
++ error = -EACCES;
++ if (nd.mnt->mnt_flags & MNT_NOEXEC)
++ goto exit;
++ error = -EINVAL;
++ if (!S_ISREG(nd.dentry->d_inode->i_mode))
++ goto exit;
++
++ error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
++ if (error)
++ goto exit;
++
++ file = nameidata_to_filp(&nd, O_RDONLY);
++ error = PTR_ERR(file);
++ if (IS_ERR(file))
++ goto out;
++
++ error = -ENOEXEC;
++ if(file->f_op) {
++ struct linux_binfmt * fmt;
++
++ read_lock(&binfmt_lock);
++ for (fmt = formats ; fmt ; fmt = fmt->next) {
++ if (!fmt->load_shlib)
++ continue;
++ if (!try_module_get(fmt->module))
++ continue;
++ read_unlock(&binfmt_lock);
++ error = fmt->load_shlib(file);
++ read_lock(&binfmt_lock);
++ put_binfmt(fmt);
++ if (error != -ENOEXEC)
++ break;
+ }
-+#endif
- wake_up_process(waiter->task);
- }
-
-diff -Nurb linux-2.6.22-580/kernel/sched.c linux-2.6.22-590/kernel/sched.c
---- linux-2.6.22-580/kernel/sched.c 2008-04-30 09:29:26.000000000 -0400
-+++ linux-2.6.22-590/kernel/sched.c 2008-04-30 09:29:41.000000000 -0400
-@@ -10,7 +10,7 @@
- * 1998-11-19 Implemented schedule_timeout() and related stuff
- * by Andrea Arcangeli
- * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
-- * hybrid priority-list and round-robin design with
-+ * hybrid priority-list and round-robin deventn with
- * an array-switch method of distributing timeslices
- * and per-CPU runqueues. Cleanups and useful suggestions
- * by Davide Libenzi, preemptible kernel bits by Robert Love.
-@@ -56,6 +56,7 @@
-
- #include <asm/tlb.h>
- #include <asm/unistd.h>
-+#include <linux/arrays.h>
- #include <linux/vs_sched.h>
- #include <linux/vs_cvirt.h>
-
-@@ -431,6 +432,7 @@
-
- repeat_lock_task:
- rq = task_rq(p);
++ read_unlock(&binfmt_lock);
++ }
++ fput(file);
++out:
++ return error;
++exit:
++ release_open_intent(&nd);
++ path_release(&nd);
++ goto out;
++}
+
- spin_lock(&rq->lock);
- if (unlikely(rq != task_rq(p))) {
- spin_unlock(&rq->lock);
-@@ -1741,6 +1743,10 @@
- * event cannot wake it up and insert it on the runqueue either.
- */
- p->state = TASK_RUNNING;
-+#ifdef CONFIG_CHOPSTIX
-+ p->last_interrupted=0;
-+ p->last_ran_j=jiffies;
-+#endif
-
- /*
- * Make sure we do not leak PI boosting priority to the child:
-@@ -3608,6 +3614,7 @@
-
- #endif
-
++/*
++ * count() counts the number of strings in array ARGV.
++ */
++static int count(char __user * __user * argv, int max)
++{
++ int i = 0;
+
- static inline int interactive_sleep(enum sleep_type sleep_type)
- {
- return (sleep_type == SLEEP_INTERACTIVE ||
-@@ -3617,16 +3624,28 @@
- /*
- * schedule() is the main scheduler function.
- */
++ if (argv != NULL) {
++ for (;;) {
++ char __user * p;
+
-+#ifdef CONFIG_CHOPSTIX
-+extern void (*rec_event)(void *,unsigned int);
-+struct event_spec {
-+ unsigned long pc;
-+ unsigned long dcookie;
-+ unsigned count;
-+ unsigned char reason;
-+};
-+#endif
++ if (get_user(p, argv))
++ return -EFAULT;
++ if (!p)
++ break;
++ argv++;
++ if(++i > max)
++ return -E2BIG;
++ cond_resched();
++ }
++ }
++ return i;
++}
+
- asmlinkage void __sched schedule(void)
- {
- struct task_struct *prev, *next;
- struct prio_array *array;
- struct list_head *queue;
- unsigned long long now;
-- unsigned long run_time;
-+ unsigned long run_time, diff;
- int cpu, idx, new_prio;
- long *switch_count;
- struct rq *rq;
-+ int sampling_reason;
-
- /*
- * Test if we are atomic. Since do_exit() needs to call into
-@@ -3680,6 +3699,7 @@
- switch_count = &prev->nivcsw;
- if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
- switch_count = &prev->nvcsw;
++/*
++ * 'copy_strings()' copies argument/environment strings from user
++ * memory to free pages in kernel mem. These are in a format ready
++ * to be put directly into the top of new user memory.
++ */
++static int copy_strings(int argc, char __user * __user * argv,
++ struct linux_binprm *bprm)
++{
++ struct page *kmapped_page = NULL;
++ char *kaddr = NULL;
++ int ret;
+
- if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
- unlikely(signal_pending(prev))))
- prev->state = TASK_RUNNING;
-@@ -3689,6 +3709,14 @@
- vx_uninterruptible_inc(prev);
- }
- deactivate_task(prev, rq);
-+#ifdef CONFIG_CHOPSTIX
-+ if (prev->state & TASK_UNINTERRUPTIBLE) {
-+ prev->last_interrupted=jiffies;
++ while (argc-- > 0) {
++ char __user *str;
++ int len;
++ unsigned long pos;
++
++ if (get_user(str, argv+argc) ||
++ !(len = strnlen_user(str, bprm->p))) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ if (bprm->p < len) {
++ ret = -E2BIG;
++ goto out;
++ }
++
++ bprm->p -= len;
++ /* XXX: add architecture specific overflow check here. */
++ pos = bprm->p;
++
++ while (len > 0) {
++ int i, new, err;
++ int offset, bytes_to_copy;
++ struct page *page;
++
++ offset = pos % PAGE_SIZE;
++ i = pos/PAGE_SIZE;
++ page = bprm->page[i];
++ new = 0;
++ if (!page) {
++ page = alloc_page(GFP_HIGHUSER);
++ bprm->page[i] = page;
++ if (!page) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ new = 1;
+ }
-+ else if (prev->state & TASK_INTERRUPTIBLE) {
-+ prev->last_interrupted=-1;
++
++ if (page != kmapped_page) {
++ if (kmapped_page)
++ kunmap(kmapped_page);
++ kmapped_page = page;
++ kaddr = kmap(kmapped_page);
+ }
-+#endif
- }
- }
-
-@@ -3765,6 +3793,39 @@
- prev->sleep_avg = 0;
- prev->timestamp = prev->last_ran = now;
-
-+#ifdef CONFIG_CHOPSTIX
-+ /* Run only if the Chopstix module so decrees it */
-+ if (rec_event) {
-+ prev->last_ran_j = jiffies;
-+ if (next->last_interrupted!=-1) {
-+ if (next->last_interrupted) {
-+ diff = (jiffies-next->last_interrupted);
-+ sampling_reason = 0;
++ if (new && offset)
++ memset(kaddr, 0, offset);
++ bytes_to_copy = PAGE_SIZE - offset;
++ if (bytes_to_copy > len) {
++ bytes_to_copy = len;
++ if (new)
++ memset(kaddr+offset+len, 0,
++ PAGE_SIZE-offset-len);
+ }
-+ else {
-+ diff = jiffies-next->last_ran_j;
-+ sampling_reason = 1;
++ err = copy_from_user(kaddr+offset, str, bytes_to_copy);
++ if (err) {
++ ret = -EFAULT;
++ goto out;
+ }
+
-+ if (diff > HZ/5) {
-+ struct event event;
-+ struct event_spec espec;
-+ unsigned long eip;
-+
-+ espec.reason = sampling_reason;
-+ eip = next->thread.esp & 4095;
-+ event.event_data=&espec;
-+ event.task=next;
-+ espec.pc=eip;
-+ event.event_type=2;
-+ /* index in the event array currently set up */
-+ /* make sure the counters are loaded in the order we want them to show up*/
-+ (*rec_event)(&event, diff);
-+ }
++ pos += bytes_to_copy;
++ str += bytes_to_copy;
++ len -= bytes_to_copy;
+ }
-+ next->last_interrupted=0;
+ }
-+#endif
- sched_info_switch(prev, next);
- if (likely(prev != next)) {
- next->timestamp = next->last_ran = now;
-@@ -4664,6 +4725,7 @@
- get_task_struct(p);
- read_unlock(&tasklist_lock);
-
++ ret = 0;
++out:
++ if (kmapped_page)
++ kunmap(kmapped_page);
++ return ret;
++}
+
- retval = -EPERM;
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !capable(CAP_SYS_NICE))
-@@ -5032,6 +5094,7 @@
- jiffies_to_timespec(p->policy == SCHED_FIFO ?
- 0 : task_timeslice(p), &t);
- read_unlock(&tasklist_lock);
++/*
++ * Like copy_strings, but get argv and its values from kernel memory.
++ */
++int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
++{
++ int r;
++ mm_segment_t oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ r = copy_strings(argc, (char __user * __user *)argv, bprm);
++ set_fs(oldfs);
++ return r;
++}
+
- retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
- out_nounlock:
- return retval;
-@@ -7275,3 +7338,9 @@
- }
-
- #endif
++EXPORT_SYMBOL(copy_strings_kernel);
+
-+#ifdef CONFIG_CHOPSTIX
-+void (*rec_event)(void *,unsigned int);
-+EXPORT_SYMBOL(rec_event);
-+EXPORT_SYMBOL(in_sched_functions);
-+#endif
-diff -Nurb linux-2.6.22-580/mm/slab.c linux-2.6.22-590/mm/slab.c
---- linux-2.6.22-580/mm/slab.c 2008-04-30 09:29:26.000000000 -0400
-+++ linux-2.6.22-590/mm/slab.c 2008-04-30 09:29:41.000000000 -0400
-@@ -110,11 +110,13 @@
- #include <linux/fault-inject.h>
- #include <linux/rtmutex.h>
- #include <linux/reciprocal_div.h>
-+#include <linux/arrays.h>
-
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
- #include <asm/page.h>
-
++#ifdef CONFIG_MMU
++/*
++ * This routine is used to map in a page into an address space: needed by
++ * execve() for the initial stack and environment pages.
++ *
++ * vma->vm_mm->mmap_sem is held for writing.
++ */
++void install_arg_page(struct vm_area_struct *vma,
++ struct page *page, unsigned long address)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ pte_t * pte;
++ spinlock_t *ptl;
+
- /*
- * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
- * 0 for faster, smaller code (especially in the critical paths).
-@@ -249,6 +251,14 @@
- void *addr;
- };
-
-+extern void (*rec_event)(void *,unsigned int);
-+struct event_spec {
-+ unsigned long pc;
-+ unsigned long dcookie;
-+ unsigned count;
-+ unsigned char reason;
-+};
++ if (unlikely(anon_vma_prepare(vma)))
++ goto out;
+
- /*
- * struct array_cache
- *
-@@ -3443,6 +3453,19 @@
- local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
- prefetchw(objp);
-+#ifdef CONFIG_CHOPSTIX
++ flush_dcache_page(page);
++ pte = get_locked_pte(mm, address, &ptl);
++ if (!pte)
++ goto out;
++ if (!pte_none(*pte)) {
++ pte_unmap_unlock(pte, ptl);
++ goto out;
++ }
++ inc_mm_counter(mm, anon_rss);
++ lru_cache_add_active(page);
++ set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
++ page, vma->vm_page_prot))));
++ page_add_new_anon_rmap(page, vma, address);
++ pte_unmap_unlock(pte, ptl);
++
++ /* no need for flush_tlb */
++ return;
++out:
++ __free_page(page);
++ force_sig(SIGKILL, current);
++}
++
++#define EXTRA_STACK_VM_PAGES 20 /* random */
++
++int setup_arg_pages(struct linux_binprm *bprm,
++ unsigned long stack_top,
++ int executable_stack)
++{
++ unsigned long stack_base;
++ struct vm_area_struct *mpnt;
++ struct mm_struct *mm = current->mm;
++ int i, ret;
++ long arg_size;
++
++#ifdef CONFIG_STACK_GROWSUP
++ /* Move the argument and environment strings to the bottom of the
++ * stack space.
++ */
++ int offset, j;
++ char *to, *from;
++
++ /* Start by shifting all the pages down */
++ i = 0;
++ for (j = 0; j < MAX_ARG_PAGES; j++) {
++ struct page *page = bprm->page[j];
++ if (!page)
++ continue;
++ bprm->page[i++] = page;
++ }
++
++ /* Now move them within their pages */
++ offset = bprm->p % PAGE_SIZE;
++ to = kmap(bprm->page[0]);
++ for (j = 1; j < i; j++) {
++ memmove(to, to + offset, PAGE_SIZE - offset);
++ from = kmap(bprm->page[j]);
++ memcpy(to + PAGE_SIZE - offset, from, offset);
++ kunmap(bprm->page[j - 1]);
++ to = from;
++ }
++ memmove(to, to + offset, PAGE_SIZE - offset);
++ kunmap(bprm->page[j - 1]);
++
++ /* Limit stack size to 1GB */
++ stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
++ if (stack_base > (1 << 30))
++ stack_base = 1 << 30;
++ stack_base = PAGE_ALIGN(stack_top - stack_base);
++
++ /* Adjust bprm->p to point to the end of the strings. */
++ bprm->p = stack_base + PAGE_SIZE * i - offset;
++
++ mm->arg_start = stack_base;
++ arg_size = i << PAGE_SHIFT;
++
++ /* zero pages that were copied above */
++ while (i < MAX_ARG_PAGES)
++ bprm->page[i++] = NULL;
++#else
++ stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
++ stack_base = PAGE_ALIGN(stack_base);
++ bprm->p += stack_base;
++ mm->arg_start = bprm->p;
++ arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
++#endif
++
++ arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
++
++ if (bprm->loader)
++ bprm->loader += stack_base;
++ bprm->exec += stack_base;
++
++ mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!mpnt)
++ return -ENOMEM;
++
++ down_write(&mm->mmap_sem);
++ {
++ mpnt->vm_mm = mm;
++#ifdef CONFIG_STACK_GROWSUP
++ mpnt->vm_start = stack_base;
++ mpnt->vm_end = stack_base + arg_size;
++#else
++ mpnt->vm_end = stack_top;
++ mpnt->vm_start = mpnt->vm_end - arg_size;
++#endif
++ /* Adjust stack execute permissions; explicitly enable
++ * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
++ * and leave alone (arch default) otherwise. */
++ if (unlikely(executable_stack == EXSTACK_ENABLE_X))
++ mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
++ else if (executable_stack == EXSTACK_DISABLE_X)
++ mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
++ else
++ mpnt->vm_flags = VM_STACK_FLAGS;
++ mpnt->vm_flags |= mm->def_flags;
++ mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
++ if ((ret = insert_vm_struct(mm, mpnt))) {
++ up_write(&mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, mpnt);
++ return ret;
++ }
++ vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
++ mm->stack_vm = mm->total_vm;
++ }
++
++ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
++ struct page *page = bprm->page[i];
++ if (page) {
++ bprm->page[i] = NULL;
++ install_arg_page(mpnt, page, stack_base);
++ }
++ stack_base += PAGE_SIZE;
++ }
++ up_write(&mm->mmap_sem);
++
++ return 0;
++}
++
++EXPORT_SYMBOL(setup_arg_pages);
++
++#define free_arg_pages(bprm) do { } while (0)
++
++#else
++
++static inline void free_arg_pages(struct linux_binprm *bprm)
++{
++ int i;
++
++ for (i = 0; i < MAX_ARG_PAGES; i++) {
++ if (bprm->page[i])
++ __free_page(bprm->page[i]);
++ bprm->page[i] = NULL;
++ }
++}
++
++#endif /* CONFIG_MMU */
++
++struct file *open_exec(const char *name)
++{
++ struct nameidata nd;
++ int err;
++ struct file *file;
++
++ err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
++ file = ERR_PTR(err);
++
++ if (!err) {
++ struct inode *inode = nd.dentry->d_inode;
++ file = ERR_PTR(-EACCES);
++ if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
++ S_ISREG(inode->i_mode)) {
++ int err = vfs_permission(&nd, MAY_EXEC);
++ file = ERR_PTR(err);
++ if (!err) {
++ file = nameidata_to_filp(&nd, O_RDONLY);
++ if (!IS_ERR(file)) {
++ err = deny_write_access(file);
++ if (err) {
++ fput(file);
++ file = ERR_PTR(err);
++ }
++ }
++out:
++ return file;
++ }
++ }
++ release_open_intent(&nd);
++ path_release(&nd);
++ }
++ goto out;
++}
++
++EXPORT_SYMBOL(open_exec);
++
++int kernel_read(struct file *file, unsigned long offset,
++ char *addr, unsigned long count)
++{
++ mm_segment_t old_fs;
++ loff_t pos = offset;
++ int result;
++
++ old_fs = get_fs();
++ set_fs(get_ds());
++ /* The cast to a user pointer is valid due to the set_fs() */
++ result = vfs_read(file, (void __user *)addr, count, &pos);
++ set_fs(old_fs);
++ return result;
++}
++
++EXPORT_SYMBOL(kernel_read);
++
++static int exec_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk;
++ struct mm_struct * old_mm, *active_mm;
++
++ /* Notify parent that we're no longer interested in the old VM */
++ tsk = current;
++ old_mm = current->mm;
++ mm_release(tsk, old_mm);
++
++ if (old_mm) {
++ /*
++ * Make sure that if there is a core dump in progress
++ * for the old mm, we get out and die instead of going
++ * through with the exec. We must hold mmap_sem around
++ * checking core_waiters and changing tsk->mm. The
++ * core-inducing thread will increment core_waiters for
++ * each thread whose ->mm == old_mm.
++ */
++ down_read(&old_mm->mmap_sem);
++ if (unlikely(old_mm->core_waiters)) {
++ up_read(&old_mm->mmap_sem);
++ return -EINTR;
++ }
++ }
++ task_lock(tsk);
++ active_mm = tsk->active_mm;
++ tsk->mm = mm;
++ tsk->active_mm = mm;
++ activate_mm(active_mm, mm);
++ task_unlock(tsk);
++ arch_pick_mmap_layout(mm);
++ if (old_mm) {
++ up_read(&old_mm->mmap_sem);
++ BUG_ON(active_mm != old_mm);
++ mmput(old_mm);
++ return 0;
++ }
++ mmdrop(active_mm);
++ return 0;
++}
++
++/*
++ * This function makes sure the current process has its own signal table,
++ * so that flush_signal_handlers can later reset the handlers without
++ * disturbing other processes. (Other processes might share the signal
++ * table via the CLONE_SIGHAND option to clone().)
++ */
++static int de_thread(struct task_struct *tsk)
++{
++ struct signal_struct *sig = tsk->signal;
++ struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
++ spinlock_t *lock = &oldsighand->siglock;
++ struct task_struct *leader = NULL;
++ int count;
++
++ /*
++ * If we don't share sighandlers, then we aren't sharing anything
++ * and we can just re-use it all.
++ */
++ if (atomic_read(&oldsighand->count) <= 1) {
++ BUG_ON(atomic_read(&sig->count) != 1);
++ signalfd_detach(tsk);
++ exit_itimers(sig);
++ return 0;
++ }
++
++ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
++ if (!newsighand)
++ return -ENOMEM;
++
++ if (thread_group_empty(tsk))
++ goto no_thread_group;
++
++ /*
++ * Kill all other threads in the thread group.
++ * We must hold tasklist_lock to call zap_other_threads.
++ */
++ read_lock(&tasklist_lock);
++ spin_lock_irq(lock);
++ if (sig->flags & SIGNAL_GROUP_EXIT) {
++ /*
++ * Another group action in progress, just
++ * return so that the signal is processed.
++ */
++ spin_unlock_irq(lock);
++ read_unlock(&tasklist_lock);
++ kmem_cache_free(sighand_cachep, newsighand);
++ return -EAGAIN;
++ }
++
++ /*
++ * child_reaper ignores SIGKILL, change it now.
++ * Reparenting needs write_lock on tasklist_lock,
++ * so it is safe to do it under read_lock.
++ */
++ if (unlikely(tsk->group_leader == child_reaper(tsk)))
++ tsk->nsproxy->pid_ns->child_reaper = tsk;
++
++ zap_other_threads(tsk);
++ read_unlock(&tasklist_lock);
++
++ /*
++ * Account for the thread group leader hanging around:
++ */
++ count = 1;
++ if (!thread_group_leader(tsk)) {
++ count = 2;
++ /*
++ * The SIGALRM timer survives the exec, but needs to point
++ * at us as the new group leader now. We have a race with
++ * a timer firing now getting the old leader, so we need to
++ * synchronize with any firing (by calling del_timer_sync)
++ * before we can safely let the old group leader die.
++ */
++ sig->tsk = tsk;
++ spin_unlock_irq(lock);
++ if (hrtimer_cancel(&sig->real_timer))
++ hrtimer_restart(&sig->real_timer);
++ spin_lock_irq(lock);
++ }
++ while (atomic_read(&sig->count) > count) {
++ sig->group_exit_task = tsk;
++ sig->notify_count = count;
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ spin_unlock_irq(lock);
++ schedule();
++ spin_lock_irq(lock);
++ }
++ sig->group_exit_task = NULL;
++ sig->notify_count = 0;
++ spin_unlock_irq(lock);
++
++ /*
++ * At this point all other threads have exited, all we have to
++ * do is to wait for the thread group leader to become inactive,
++ * and to assume its PID:
++ */
++ if (!thread_group_leader(tsk)) {
++ /*
++ * Wait for the thread group leader to be a zombie.
++ * It should already be zombie at this point, most
++ * of the time.
++ */
++ leader = tsk->group_leader;
++ while (leader->exit_state != EXIT_ZOMBIE)
++ yield();
++
++ /*
++ * The only record we have of the real-time age of a
++ * process, regardless of execs it's done, is start_time.
++ * All the past CPU time is accumulated in signal_struct
++ * from sister threads now dead. But in this non-leader
++ * exec, nothing survives from the original leader thread,
++ * whose birth marks the true age of this process now.
++ * When we take on its identity by switching to its PID, we
++ * also take its birthdate (always earlier than our own).
++ */
++ tsk->start_time = leader->start_time;
++
++ write_lock_irq(&tasklist_lock);
++
++ BUG_ON(leader->tgid != tsk->tgid);
++ BUG_ON(tsk->pid == tsk->tgid);
++ /*
++ * An exec() starts a new thread group with the
++ * TGID of the previous thread group. Rehash the
++ * two threads with a switched PID, and release
++ * the former thread group leader:
++ */
++
++ /* Become a process group leader with the old leader's pid.
++ * The old leader becomes a thread of the this thread group.
++ * Note: The old leader also uses this pid until release_task
++ * is called. Odd but simple and correct.
++ */
++ detach_pid(tsk, PIDTYPE_PID);
++ tsk->pid = leader->pid;
++ attach_pid(tsk, PIDTYPE_PID, find_pid(tsk->pid));
++ transfer_pid(leader, tsk, PIDTYPE_PGID);
++ transfer_pid(leader, tsk, PIDTYPE_SID);
++ list_replace_rcu(&leader->tasks, &tsk->tasks);
++
++ tsk->group_leader = tsk;
++ leader->group_leader = tsk;
++
++ tsk->exit_signal = SIGCHLD;
++
++ BUG_ON(leader->exit_state != EXIT_ZOMBIE);
++ leader->exit_state = EXIT_DEAD;
++
++ write_unlock_irq(&tasklist_lock);
++ }
++
++ /*
++ * There may be one thread left which is just exiting,
++ * but it's safe to stop telling the group to kill themselves.
++ */
++ sig->flags = 0;
++
++no_thread_group:
++ signalfd_detach(tsk);
++ exit_itimers(sig);
++ if (leader)
++ release_task(leader);
++
++ BUG_ON(atomic_read(&sig->count) != 1);
++
++ if (atomic_read(&oldsighand->count) == 1) {
++ /*
++ * Now that we nuked the rest of the thread group,
++ * it turns out we are not sharing sighand any more either.
++ * So we can just keep it.
++ */
++ kmem_cache_free(sighand_cachep, newsighand);
++ } else {
++ /*
++ * Move our state over to newsighand and switch it in.
++ */
++ atomic_set(&newsighand->count, 1);
++ memcpy(newsighand->action, oldsighand->action,
++ sizeof(newsighand->action));
++
++ write_lock_irq(&tasklist_lock);
++ spin_lock(&oldsighand->siglock);
++ spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
++
++ rcu_assign_pointer(tsk->sighand, newsighand);
++ recalc_sigpending();
++
++ spin_unlock(&newsighand->siglock);
++ spin_unlock(&oldsighand->siglock);
++ write_unlock_irq(&tasklist_lock);
++
++ __cleanup_sighand(oldsighand);
++ }
++
++ BUG_ON(!thread_group_leader(tsk));
++ return 0;
++}
++
++/*
++ * These functions flushes out all traces of the currently running executable
++ * so that a new one can be started
++ */
++
++static void flush_old_files(struct files_struct * files)
++{
++ long j = -1;
++ struct fdtable *fdt;
++
++ spin_lock(&files->file_lock);
++ for (;;) {
++ unsigned long set, i;
++
++ j++;
++ i = j * __NFDBITS;
++ fdt = files_fdtable(files);
++ if (i >= fdt->max_fds)
++ break;
++ set = fdt->close_on_exec->fds_bits[j];
++ if (!set)
++ continue;
++ fdt->close_on_exec->fds_bits[j] = 0;
++ spin_unlock(&files->file_lock);
++ for ( ; set ; i++,set >>= 1) {
++ if (set & 1) {
++ sys_close(i);
++ }
++ }
++ spin_lock(&files->file_lock);
++
++ }
++ spin_unlock(&files->file_lock);
++}
++
++void get_task_comm(char *buf, struct task_struct *tsk)
++{
++ /* buf must be at least sizeof(tsk->comm) in size */
++ task_lock(tsk);
++ strncpy(buf, tsk->comm, sizeof(tsk->comm));
++ task_unlock(tsk);
++}
++
++void set_task_comm(struct task_struct *tsk, char *buf)
++{
++ task_lock(tsk);
++ strlcpy(tsk->comm, buf, sizeof(tsk->comm));
++ task_unlock(tsk);
++}
++
++int flush_old_exec(struct linux_binprm * bprm)
++{
++ char * name;
++ int i, ch, retval;
++ struct files_struct *files;
++ char tcomm[sizeof(current->comm)];
++
++ /*
++ * Make sure we have a private signal table and that
++ * we are unassociated from the previous thread group.
++ */
++ retval = de_thread(current);
++ if (retval)
++ goto out;
++
++ /*
++ * Make sure we have private file handles. Ask the
++ * fork helper to do the work for us and the exit
++ * helper to do the cleanup of the old one.
++ */
++ files = current->files; /* refcounted so safe to hold */
++ retval = unshare_files();
++ if (retval)
++ goto out;
++ /*
++ * Release all of the old mmap stuff
++ */
++ retval = exec_mmap(bprm->mm);
++ if (retval)
++ goto mmap_failed;
++
++ bprm->mm = NULL; /* We're using it now */
++
++ /* This is the point of no return */
++ put_files_struct(files);
++
++ current->sas_ss_sp = current->sas_ss_size = 0;
++
++ if (current->euid == current->uid && current->egid == current->gid)
++ current->mm->dumpable = 1;
++ else
++ current->mm->dumpable = suid_dumpable;
++
++ name = bprm->filename;
++
++ /* Copies the binary name from after last slash */
++ for (i=0; (ch = *(name++)) != '\0';) {
++ if (ch == '/')
++ i = 0; /* overwrite what we wrote */
++ else
++ if (i < (sizeof(tcomm) - 1))
++ tcomm[i++] = ch;
++ }
++ tcomm[i] = '\0';
++ set_task_comm(current, tcomm);
++
++ current->flags &= ~PF_RANDOMIZE;
++ flush_thread();
++
++ /* Set the new mm task size. We have to do that late because it may
++ * depend on TIF_32BIT which is only updated in flush_thread() on
++ * some architectures like powerpc
++ */
++ current->mm->task_size = TASK_SIZE;
++
++ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
++ suid_keys(current);
++ current->mm->dumpable = suid_dumpable;
++ current->pdeath_signal = 0;
++ } else if (file_permission(bprm->file, MAY_READ) ||
++ (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
++ suid_keys(current);
++ current->mm->dumpable = suid_dumpable;
++ }
++
++ /* An exec changes our domain. We are no longer part of the thread
++ group */
++
++ current->self_exec_id++;
++
++ flush_signal_handlers(current, 0);
++ flush_old_files(current->files);
++
++ return 0;
++
++mmap_failed:
++ reset_files_struct(current, files);
++out:
++ return retval;
++}
++
++EXPORT_SYMBOL(flush_old_exec);
++
++/*
++ * Fill the binprm structure from the inode.
++ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
++ */
++int prepare_binprm(struct linux_binprm *bprm)
++{
++ int mode;
++ struct inode * inode = bprm->file->f_path.dentry->d_inode;
++ int retval;
++
++ mode = inode->i_mode;
++ if (bprm->file->f_op == NULL)
++ return -EACCES;
++
++ bprm->e_uid = current->euid;
++ bprm->e_gid = current->egid;
++
++ if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
++ /* Set-uid? */
++ if (mode & S_ISUID) {
++ current->personality &= ~PER_CLEAR_ON_SETID;
++ bprm->e_uid = inode->i_uid;
++ }
++
++ /* Set-gid? */
++ /*
++ * If setgid is set but no group execute bit then this
++ * is a candidate for mandatory locking, not a setgid
++ * executable.
++ */
++ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++ current->personality &= ~PER_CLEAR_ON_SETID;
++ bprm->e_gid = inode->i_gid;
++ }
++ }
++
++ /* fill in binprm security blob */
++ retval = security_bprm_set(bprm);
++ if (retval)
++ return retval;
++
++ memset(bprm->buf,0,BINPRM_BUF_SIZE);
++ return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
++}
++
++EXPORT_SYMBOL(prepare_binprm);
++
++static int unsafe_exec(struct task_struct *p)
++{
++ int unsafe = 0;
++ if (p->ptrace & PT_PTRACED) {
++ if (p->ptrace & PT_PTRACE_CAP)
++ unsafe |= LSM_UNSAFE_PTRACE_CAP;
++ else
++ unsafe |= LSM_UNSAFE_PTRACE;
++ }
++ if (atomic_read(&p->fs->count) > 1 ||
++ atomic_read(&p->files->count) > 1 ||
++ atomic_read(&p->sighand->count) > 1)
++ unsafe |= LSM_UNSAFE_SHARE;
++
++ return unsafe;
++}
++
++void compute_creds(struct linux_binprm *bprm)
++{
++ int unsafe;
++
++ if (bprm->e_uid != current->uid) {
++ suid_keys(current);
++ current->pdeath_signal = 0;
++ }
++ exec_keys(current);
++
++ task_lock(current);
++ unsafe = unsafe_exec(current);
++ security_bprm_apply_creds(bprm, unsafe);
++ task_unlock(current);
++ security_bprm_post_apply_creds(bprm);
++}
++EXPORT_SYMBOL(compute_creds);
++
++/*
++ * Arguments are '\0' separated strings found at the location bprm->p
++ * points to; chop off the first by relocating brpm->p to right after
++ * the first '\0' encountered.
++ */
++void remove_arg_zero(struct linux_binprm *bprm)
++{
++ if (bprm->argc) {
++ char ch;
++
++ do {
++ unsigned long offset;
++ unsigned long index;
++ char *kaddr;
++ struct page *page;
++
++ offset = bprm->p & ~PAGE_MASK;
++ index = bprm->p >> PAGE_SHIFT;
++
++ page = bprm->page[index];
++ kaddr = kmap_atomic(page, KM_USER0);
++
++ /* run through page until we reach end or find NUL */
++ do {
++ ch = *(kaddr + offset);
++
++ /* discard that character... */
++ bprm->p++;
++ offset++;
++ } while (offset < PAGE_SIZE && ch != '\0');
++
++ kunmap_atomic(kaddr, KM_USER0);
++
++ /* free the old page */
++ if (offset == PAGE_SIZE) {
++ __free_page(page);
++ bprm->page[index] = NULL;
++ }
++ } while (ch != '\0');
++
++ bprm->argc--;
++ }
++}
++EXPORT_SYMBOL(remove_arg_zero);
++
++/*
++ * cycle the list of binary formats handler, until one recognizes the image
++ */
++int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
++{
++ int try,retval;
++ struct linux_binfmt *fmt;
++#ifdef __alpha__
++ /* handle /sbin/loader.. */
++ {
++ struct exec * eh = (struct exec *) bprm->buf;
++
++ if (!bprm->loader && eh->fh.f_magic == 0x183 &&
++ (eh->fh.f_flags & 0x3000) == 0x3000)
++ {
++ struct file * file;
++ unsigned long loader;
++
++ allow_write_access(bprm->file);
++ fput(bprm->file);
++ bprm->file = NULL;
++
++ loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++ file = open_exec("/sbin/loader");
++ retval = PTR_ERR(file);
++ if (IS_ERR(file))
++ return retval;
++
++ /* Remember if the application is TASO. */
++ bprm->sh_bang = eh->ah.entry < 0x100000000UL;
++
++ bprm->file = file;
++ bprm->loader = loader;
++ retval = prepare_binprm(bprm);
++ if (retval<0)
++ return retval;
++ /* should call search_binary_handler recursively here,
++ but it does not matter */
++ }
++ }
++#endif
++ retval = security_bprm_check(bprm);
++ if (retval)
++ return retval;
++
++ /* kernel module loader fixup */
++ /* so we don't try to load run modprobe in kernel space. */
++ set_fs(USER_DS);
++
++ retval = audit_bprm(bprm);
++ if (retval)
++ return retval;
++
++ retval = -ENOENT;
++ for (try=0; try<2; try++) {
++ read_lock(&binfmt_lock);
++ for (fmt = formats ; fmt ; fmt = fmt->next) {
++ int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
++ if (!fn)
++ continue;
++ if (!try_module_get(fmt->module))
++ continue;
++ read_unlock(&binfmt_lock);
++ retval = fn(bprm, regs);
++ if (retval >= 0) {
++ put_binfmt(fmt);
++ allow_write_access(bprm->file);
++ if (bprm->file)
++ fput(bprm->file);
++ bprm->file = NULL;
++ current->did_exec = 1;
++ proc_exec_connector(current);
++ return retval;
++ }
++ read_lock(&binfmt_lock);
++ put_binfmt(fmt);
++ if (retval != -ENOEXEC || bprm->mm == NULL)
++ break;
++ if (!bprm->file) {
++ read_unlock(&binfmt_lock);
++ return retval;
++ }
++ }
++ read_unlock(&binfmt_lock);
++ if (retval != -ENOEXEC || bprm->mm == NULL) {
++ break;
++#ifdef CONFIG_KMOD
++ }else{
++#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
++ if (printable(bprm->buf[0]) &&
++ printable(bprm->buf[1]) &&
++ printable(bprm->buf[2]) &&
++ printable(bprm->buf[3]))
++ break; /* -ENOEXEC */
++ request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
++#endif
++ }
++ }
++ return retval;
++}
++
++EXPORT_SYMBOL(search_binary_handler);
++
++/*
++ * sys_execve() executes a new program.
++ */
++int do_execve(char * filename,
++ char __user *__user *argv,
++ char __user *__user *envp,
++ struct pt_regs * regs)
++{
++ struct linux_binprm *bprm;
++ struct file *file;
++ int retval;
++ int i;
++
++ retval = -ENOMEM;
++ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
++ if (!bprm)
++ goto out_ret;
++
++ file = open_exec(filename);
++ retval = PTR_ERR(file);
++ if (IS_ERR(file))
++ goto out_kfree;
++
++ sched_exec();
++
++ bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++ bprm->file = file;
++ bprm->filename = filename;
++ bprm->interp = filename;
++ bprm->mm = mm_alloc();
++ retval = -ENOMEM;
++ if (!bprm->mm)
++ goto out_file;
++
++ retval = init_new_context(current, bprm->mm);
++ if (retval < 0)
++ goto out_mm;
++
++ bprm->argc = count(argv, bprm->p / sizeof(void *));
++ if ((retval = bprm->argc) < 0)
++ goto out_mm;
++
++ bprm->envc = count(envp, bprm->p / sizeof(void *));
++ if ((retval = bprm->envc) < 0)
++ goto out_mm;
++
++ retval = security_bprm_alloc(bprm);
++ if (retval)
++ goto out;
++
++ retval = prepare_binprm(bprm);
++ if (retval < 0)
++ goto out;
++
++ retval = copy_strings_kernel(1, &bprm->filename, bprm);
++ if (retval < 0)
++ goto out;
++
++ bprm->exec = bprm->p;
++ retval = copy_strings(bprm->envc, envp, bprm);
++ if (retval < 0)
++ goto out;
++
++ retval = copy_strings(bprm->argc, argv, bprm);
++ if (retval < 0)
++ goto out;
++
++ retval = search_binary_handler(bprm,regs);
++ if (retval >= 0) {
++ free_arg_pages(bprm);
++
++ /* execve success */
++ security_bprm_free(bprm);
++ acct_update_integrals(current);
++ kfree(bprm);
++ return retval;
++ }
++
++out:
++ /* Something went wrong, return the inode and free the argument pages*/
++ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
++ struct page * page = bprm->page[i];
++ if (page)
++ __free_page(page);
++ }
++
++ if (bprm->security)
++ security_bprm_free(bprm);
++
++out_mm:
++ if (bprm->mm)
++ mmdrop(bprm->mm);
++
++out_file:
++ if (bprm->file) {
++ allow_write_access(bprm->file);
++ fput(bprm->file);
++ }
++
++out_kfree:
++ kfree(bprm);
++
++out_ret:
++ return retval;
++}
++
++int set_binfmt(struct linux_binfmt *new)
++{
++ struct linux_binfmt *old = current->binfmt;
++
++ if (new) {
++ if (!try_module_get(new->module))
++ return -1;
++ }
++ current->binfmt = new;
++ if (old)
++ module_put(old->module);
++ return 0;
++}
++
++EXPORT_SYMBOL(set_binfmt);
++
++/* format_corename will inspect the pattern parameter, and output a
++ * name into corename, which must have space for at least
++ * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
++ */
++static int format_corename(char *corename, const char *pattern, long signr)
++{
++ const char *pat_ptr = pattern;
++ char *out_ptr = corename;
++ char *const out_end = corename + CORENAME_MAX_SIZE;
++ int rc;
++ int pid_in_pattern = 0;
++ int ispipe = 0;
++
++ if (*pattern == '|')
++ ispipe = 1;
++
++ /* Repeat as long as we have more pattern to process and more output
++ space */
++ while (*pat_ptr) {
++ if (*pat_ptr != '%') {
++ if (out_ptr == out_end)
++ goto out;
++ *out_ptr++ = *pat_ptr++;
++ } else {
++ switch (*++pat_ptr) {
++ case 0:
++ goto out;
++ /* Double percent, output one percent */
++ case '%':
++ if (out_ptr == out_end)
++ goto out;
++ *out_ptr++ = '%';
++ break;
++ /* pid */
++ case 'p':
++ pid_in_pattern = 1;
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%d", current->tgid);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* uid */
++ case 'u':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%d", current->uid);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* gid */
++ case 'g':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%d", current->gid);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* signal that caused the coredump */
++ case 's':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%ld", signr);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* UNIX time of coredump */
++ case 't': {
++ struct timeval tv;
++ vx_gettimeofday(&tv);
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%lu", tv.tv_sec);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ }
++ /* hostname */
++ case 'h':
++ down_read(&uts_sem);
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%s", utsname()->nodename);
++ up_read(&uts_sem);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ /* executable */
++ case 'e':
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ "%s", current->comm);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ break;
++ default:
++ break;
++ }
++ ++pat_ptr;
++ }
++ }
++ /* Backward compatibility with core_uses_pid:
++ *
++ * If core_pattern does not include a %p (as is the default)
++ * and core_uses_pid is set, then .%pid will be appended to
++ * the filename. Do not do this for piped commands. */
++ if (!ispipe && !pid_in_pattern
++ && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
++ rc = snprintf(out_ptr, out_end - out_ptr,
++ ".%d", current->tgid);
++ if (rc > out_end - out_ptr)
++ goto out;
++ out_ptr += rc;
++ }
++out:
++ *out_ptr = 0;
++ return ispipe;
++}
++
++static void zap_process(struct task_struct *start)
++{
++ struct task_struct *t;
++
++ start->signal->flags = SIGNAL_GROUP_EXIT;
++ start->signal->group_stop_count = 0;
++
++ t = start;
++ do {
++ if (t != current && t->mm) {
++ t->mm->core_waiters++;
++ sigaddset(&t->pending.signal, SIGKILL);
++ signal_wake_up(t, 1);
++ }
++ } while ((t = next_thread(t)) != start);
++}
++
++static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
++ int exit_code)
++{
++ struct task_struct *g, *p;
++ unsigned long flags;
++ int err = -EAGAIN;
++
++ spin_lock_irq(&tsk->sighand->siglock);
++ if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
++ tsk->signal->group_exit_code = exit_code;
++ zap_process(tsk);
++ err = 0;
++ }
++ spin_unlock_irq(&tsk->sighand->siglock);
++ if (err)
++ return err;
++
++ if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
++ goto done;
++
++ rcu_read_lock();
++ for_each_process(g) {
++ if (g == tsk->group_leader)
++ continue;
++
++ p = g;
++ do {
++ if (p->mm) {
++ if (p->mm == mm) {
++ /*
++ * p->sighand can't disappear, but
++ * may be changed by de_thread()
++ */
++ lock_task_sighand(p, &flags);
++ zap_process(p);
++ unlock_task_sighand(p, &flags);
++ }
++ break;
++ }
++ } while ((p = next_thread(p)) != g);
++ }
++ rcu_read_unlock();
++done:
++ return mm->core_waiters;
++}
++
++static int coredump_wait(int exit_code)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = tsk->mm;
++ struct completion startup_done;
++ struct completion *vfork_done;
++ int core_waiters;
++
++ init_completion(&mm->core_done);
++ init_completion(&startup_done);
++ mm->core_startup_done = &startup_done;
++
++ core_waiters = zap_threads(tsk, mm, exit_code);
++ up_write(&mm->mmap_sem);
++
++ if (unlikely(core_waiters < 0))
++ goto fail;
++
++ /*
++ * Make sure nobody is waiting for us to release the VM,
++ * otherwise we can deadlock when we wait on each other
++ */
++ vfork_done = tsk->vfork_done;
++ if (vfork_done) {
++ tsk->vfork_done = NULL;
++ complete(vfork_done);
++ }
++
++ if (core_waiters)
++ wait_for_completion(&startup_done);
++fail:
++ BUG_ON(mm->core_waiters);
++ return core_waiters;
++}
++
++int do_coredump(long signr, int exit_code, struct pt_regs * regs)
++{
++ char corename[CORENAME_MAX_SIZE + 1];
++ struct mm_struct *mm = current->mm;
++ struct linux_binfmt * binfmt;
++ struct inode * inode;
++ struct file * file;
++ int retval = 0;
++ int fsuid = current->fsuid;
++ int flag = 0;
++ int ispipe = 0;
++
++ audit_core_dumps(signr);
++
++ binfmt = current->binfmt;
++ if (!binfmt || !binfmt->core_dump)
++ goto fail;
++ down_write(&mm->mmap_sem);
++ if (!mm->dumpable) {
++ up_write(&mm->mmap_sem);
++ goto fail;
++ }
++
++ /*
++ * We cannot trust fsuid as being the "true" uid of the
++ * process nor do we know its entire history. We only know it
++ * was tainted so we dump it as root in mode 2.
++ */
++ if (mm->dumpable == 2) { /* Setuid core dump mode */
++ flag = O_EXCL; /* Stop rewrite attacks */
++ current->fsuid = 0; /* Dump root private */
++ }
++ mm->dumpable = 0;
++
++ retval = coredump_wait(exit_code);
++ if (retval < 0)
++ goto fail;
++
++ /*
++ * Clear any false indication of pending signals that might
++ * be seen by the filesystem code called to write the core file.
++ */
++ clear_thread_flag(TIF_SIGPENDING);
++
++ if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
++ goto fail_unlock;
++
++ /*
++ * lock_kernel() because format_corename() is controlled by sysctl, which
++ * uses lock_kernel()
++ */
++ lock_kernel();
++ ispipe = format_corename(corename, core_pattern, signr);
++ unlock_kernel();
++ if (ispipe) {
++ /* SIGPIPE can happen, but it's just never processed */
++ if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) {
++ printk(KERN_INFO "Core dump to %s pipe failed\n",
++ corename);
++ goto fail_unlock;
++ }
++ } else
++ file = filp_open(corename,
++ O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
++ 0600);
++ if (IS_ERR(file))
++ goto fail_unlock;
++ inode = file->f_path.dentry->d_inode;
++ if (inode->i_nlink > 1)
++ goto close_fail; /* multiple links - don't dump */
++ if (!ispipe && d_unhashed(file->f_path.dentry))
++ goto close_fail;
++
++ /* AK: actually i see no reason to not allow this for named pipes etc.,
++ but keep the previous behaviour for now. */
++ if (!ispipe && !S_ISREG(inode->i_mode))
++ goto close_fail;
++ /*
++ * Dont allow local users get cute and trick others to coredump
++ * into their pre-created files:
++ */
++ if (inode->i_uid != current->fsuid)
++ goto close_fail;
++ if (!file->f_op)
++ goto close_fail;
++ if (!file->f_op->write)
++ goto close_fail;
++ if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
++ goto close_fail;
++
++ retval = binfmt->core_dump(signr, regs, file);
++
++ if (retval)
++ current->signal->group_exit_code |= 0x80;
++close_fail:
++ filp_close(file, NULL);
++fail_unlock:
++ current->fsuid = fsuid;
++ complete_all(&mm->core_done);
++fail:
++ return retval;
++}
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/arrays.h linux-2.6.22-590/include/linux/arrays.h
+--- linux-2.6.22-580/include/linux/arrays.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/include/linux/arrays.h 2009-02-18 09:57:23.000000000 -0500
+@@ -0,0 +1,36 @@
++#ifndef __ARRAYS_H__
++#define __ARRAYS_H__
++#include <linux/list.h>
++
++#define SAMPLING_METHOD_DEFAULT 0
++#define SAMPLING_METHOD_LOG 1
++
++/* Every probe has an array handler */
++
++/* XXX - Optimize this structure */
++
++extern void (*rec_event)(void *,unsigned int);
++struct array_handler {
++ struct list_head link;
++ unsigned int (*hash_func)(void *);
++ unsigned int (*sampling_func)(void *,int,void *);
++ unsigned short size;
++ unsigned int threshold;
++ unsigned char **expcount;
++ unsigned int sampling_method;
++ unsigned int **arrays;
++ unsigned int arraysize;
++ unsigned int num_samples[2];
++ void **epoch_samples; /* size-sized lists of samples */
++ unsigned int (*serialize)(void *, void *);
++ unsigned char code[5];
++};
++
++struct event {
++ struct list_head link;
++ void *event_data;
++ unsigned int count;
++ unsigned int event_type;
++ struct task_struct *task;
++};
++#endif
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/mutex.h linux-2.6.22-590/include/linux/mutex.h
+--- linux-2.6.22-580/include/linux/mutex.h 2007-07-08 19:32:17.000000000 -0400
++++ linux-2.6.22-590/include/linux/mutex.h 2009-02-18 09:57:23.000000000 -0500
+@@ -53,6 +53,10 @@
+ struct thread_info *owner;
+ const char *name;
+ void *magic;
++#else
++#ifdef CONFIG_CHOPSTIX
++ struct thread_info *owner;
++#endif
+ #endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/sched.h linux-2.6.22-590/include/linux/sched.h
+--- linux-2.6.22-580/include/linux/sched.h 2009-02-18 09:56:02.000000000 -0500
++++ linux-2.6.22-590/include/linux/sched.h 2009-02-18 09:57:23.000000000 -0500
+@@ -850,6 +850,10 @@
+ #endif
+ unsigned long sleep_avg;
+ unsigned long long timestamp, last_ran;
++#ifdef CONFIG_CHOPSTIX
++ unsigned long last_interrupted, last_ran_j;
++#endif
++
+ unsigned long long sched_time; /* sched_clock time spent running */
+ enum sleep_type sleep_type;
+
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/sched.h.orig linux-2.6.22-590/include/linux/sched.h.orig
+--- linux-2.6.22-580/include/linux/sched.h.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/include/linux/sched.h.orig 2009-02-18 09:56:02.000000000 -0500
+@@ -0,0 +1,1737 @@
++#ifndef _LINUX_SCHED_H
++#define _LINUX_SCHED_H
++
++#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
++
++/*
++ * cloning flags:
++ */
++#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
++#define CLONE_VM 0x00000100 /* set if VM shared between processes */
++#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
++#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
++#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
++#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
++#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
++#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
++#define CLONE_THREAD 0x00010000 /* Same thread group? */
++#define CLONE_NEWNS 0x00020000 /* New namespace group? */
++#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
++#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
++#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
++#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
++#define CLONE_DETACHED 0x00400000 /* Unused, ignored */
++#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
++#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
++#define CLONE_STOPPED 0x02000000 /* Start in stopped state */
++#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
++#define CLONE_NEWIPC 0x08000000 /* New ipcs */
++#define CLONE_KTHREAD 0x10000000 /* clone a kernel thread */
++
++/*
++ * Scheduling policies
++ */
++#define SCHED_NORMAL 0
++#define SCHED_FIFO 1
++#define SCHED_RR 2
++#define SCHED_BATCH 3
++
++#ifdef __KERNEL__
++
++struct sched_param {
++ int sched_priority;
++};
++
++#include <asm/param.h> /* for HZ */
++
++#include <linux/capability.h>
++#include <linux/threads.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/timex.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/thread_info.h>
++#include <linux/cpumask.h>
++#include <linux/errno.h>
++#include <linux/nodemask.h>
++
++#include <asm/system.h>
++#include <asm/semaphore.h>
++#include <asm/page.h>
++#include <asm/ptrace.h>
++#include <asm/mmu.h>
++#include <asm/cputime.h>
++
++#include <linux/smp.h>
++#include <linux/sem.h>
++#include <linux/signal.h>
++#include <linux/securebits.h>
++#include <linux/fs_struct.h>
++#include <linux/compiler.h>
++#include <linux/completion.h>
++#include <linux/pid.h>
++#include <linux/percpu.h>
++#include <linux/topology.h>
++#include <linux/seccomp.h>
++#include <linux/rcupdate.h>
++#include <linux/futex.h>
++#include <linux/rtmutex.h>
++
++#include <linux/time.h>
++#include <linux/param.h>
++#include <linux/resource.h>
++#include <linux/timer.h>
++#include <linux/hrtimer.h>
++#include <linux/task_io_accounting.h>
++
++#include <asm/processor.h>
++
++struct exec_domain;
++struct futex_pi_state;
++struct bio;
++
++/*
++ * List of flags we want to share for kernel threads,
++ * if only because they are not used by them anyway.
++ */
++#define CLONE_KERNEL (CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_KTHREAD)
++
++/*
++ * These are the constant used to fake the fixed-point load-average
++ * counting. Some notes:
++ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
++ * a load-average precision of 10 bits integer + 11 bits fractional
++ * - if you want to count load-averages more often, you need more
++ * precision, or rounding will get you. With 2-second counting freq,
++ * the EXP_n values would be 1981, 2034 and 2043 if still using only
++ * 11 bit fractions.
++ */
++extern unsigned long avenrun[]; /* Load averages */
++
++#define FSHIFT 11 /* nr of bits of precision */
++#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
++#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
++#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
++#define EXP_5 2014 /* 1/exp(5sec/5min) */
++#define EXP_15 2037 /* 1/exp(5sec/15min) */
++
++#define CALC_LOAD(load,exp,n) \
++ load *= exp; \
++ load += n*(FIXED_1-exp); \
++ load >>= FSHIFT;
++
++extern unsigned long total_forks;
++extern int nr_threads;
++DECLARE_PER_CPU(unsigned long, process_counts);
++extern int nr_processes(void);
++extern unsigned long nr_running(void);
++extern unsigned long nr_uninterruptible(void);
++extern unsigned long nr_active(void);
++extern unsigned long nr_iowait(void);
++extern unsigned long weighted_cpuload(const int cpu);
++
++
++/*
++ * Task state bitmask. NOTE! These bits are also
++ * encoded in fs/proc/array.c: get_task_state().
++ *
++ * We have two separate sets of flags: task->state
++ * is about runnability, while task->exit_state are
++ * about the task exiting. Confusing, but this way
++ * modifying one set can't modify the other one by
++ * mistake.
++ */
++#define TASK_RUNNING 0
++#define TASK_INTERRUPTIBLE 1
++#define TASK_UNINTERRUPTIBLE 2
++#define TASK_STOPPED 4
++#define TASK_TRACED 8
++#define TASK_ONHOLD 16
++/* in tsk->exit_state */
++#define EXIT_ZOMBIE 32
++#define EXIT_DEAD 64
++/* in tsk->state again */
++#define TASK_NONINTERACTIVE 128
++#define TASK_DEAD 256
++
++#define __set_task_state(tsk, state_value) \
++ do { (tsk)->state = (state_value); } while (0)
++#define set_task_state(tsk, state_value) \
++ set_mb((tsk)->state, (state_value))
++
++/*
++ * set_current_state() includes a barrier so that the write of current->state
++ * is correctly serialised wrt the caller's subsequent test of whether to
++ * actually sleep:
++ *
++ * set_current_state(TASK_UNINTERRUPTIBLE);
++ * if (do_i_need_to_sleep())
++ * schedule();
++ *
++ * If the caller does not need such serialisation then use __set_current_state()
++ */
++#define __set_current_state(state_value) \
++ do { current->state = (state_value); } while (0)
++#define set_current_state(state_value) \
++ set_mb(current->state, (state_value))
++
++/* Task command name length */
++#define TASK_COMM_LEN 16
++
++#include <linux/spinlock.h>
++
++/*
++ * This serializes "schedule()" and also protects
++ * the run-queue from deletions/modifications (but
++ * _adding_ to the beginning of the run-queue has
++ * a separate lock).
++ */
++extern rwlock_t tasklist_lock;
++extern spinlock_t mmlist_lock;
++
++struct task_struct;
++
++extern void sched_init(void);
++extern void sched_init_smp(void);
++extern void init_idle(struct task_struct *idle, int cpu);
++
++extern cpumask_t nohz_cpu_mask;
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
++extern int select_nohz_load_balancer(int cpu);
++#else
++static inline int select_nohz_load_balancer(int cpu)
++{
++ return 0;
++}
++#endif
++
++/*
++ * Only dump TASK_* tasks. (0 for all tasks)
++ */
++extern void show_state_filter(unsigned long state_filter);
++
++static inline void show_state(void)
++{
++ show_state_filter(0);
++}
++
++extern void show_regs(struct pt_regs *);
++
++/*
++ * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
++ * task), SP is the stack pointer of the first frame that should be shown in the back
++ * trace (or NULL if the entire call-chain of the task should be shown).
++ */
++extern void show_stack(struct task_struct *task, unsigned long *sp);
++
++void io_schedule(void);
++long io_schedule_timeout(long timeout);
++
++extern void cpu_init (void);
++extern void trap_init(void);
++extern void update_process_times(int user);
++extern void scheduler_tick(void);
++
++#ifdef CONFIG_DETECT_SOFTLOCKUP
++extern void softlockup_tick(void);
++extern void spawn_softlockup_task(void);
++extern void touch_softlockup_watchdog(void);
++extern void touch_all_softlockup_watchdogs(void);
++#else
++static inline void softlockup_tick(void)
++{
++}
++static inline void spawn_softlockup_task(void)
++{
++}
++static inline void touch_softlockup_watchdog(void)
++{
++}
++static inline void touch_all_softlockup_watchdogs(void)
++{
++}
++#endif
++
++
++/* Attach to any functions which should be ignored in wchan output. */
++#define __sched __attribute__((__section__(".sched.text")))
++/* Is this address in the __sched functions? */
++extern int in_sched_functions(unsigned long addr);
++
++#define MAX_SCHEDULE_TIMEOUT LONG_MAX
++extern signed long FASTCALL(schedule_timeout(signed long timeout));
++extern signed long schedule_timeout_interruptible(signed long timeout);
++extern signed long schedule_timeout_uninterruptible(signed long timeout);
++asmlinkage void schedule(void);
++
++struct nsproxy;
++
++/* Maximum number of active map areas.. This is a random (large) number */
++#define DEFAULT_MAX_MAP_COUNT 65536
++
++extern int sysctl_max_map_count;
++
++#include <linux/aio.h>
++
++extern unsigned long
++arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
++ unsigned long, unsigned long);
++extern unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags);
++extern void arch_unmap_area(struct mm_struct *, unsigned long);
++extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
++
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++/*
++ * The mm counters are not protected by its page_table_lock,
++ * so must be incremented atomically.
++ */
++typedef atomic_long_t mm_counter_t;
++#define __set_mm_counter(mm, member, value) \
++ atomic_long_set(&(mm)->_##member, value)
++#define get_mm_counter(mm, member) \
++ ((unsigned long)atomic_long_read(&(mm)->_##member))
++
++#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
++/*
++ * The mm counters are protected by its page_table_lock,
++ * so can be incremented directly.
++ */
++typedef unsigned long mm_counter_t;
++#define __set_mm_counter(mm, member, value) (mm)->_##member = (value)
++#define get_mm_counter(mm, member) ((mm)->_##member)
++
++#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
++
++#define set_mm_counter(mm, member, value) \
++ vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value))
++#define add_mm_counter(mm, member, value) \
++ vx_ ## member ## pages_add((mm), (value))
++#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm))
++#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm))
++
++#define get_mm_rss(mm) \
++ (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
++#define update_hiwater_rss(mm) do { \
++ unsigned long _rss = get_mm_rss(mm); \
++ if ((mm)->hiwater_rss < _rss) \
++ (mm)->hiwater_rss = _rss; \
++} while (0)
++#define update_hiwater_vm(mm) do { \
++ if ((mm)->hiwater_vm < (mm)->total_vm) \
++ (mm)->hiwater_vm = (mm)->total_vm; \
++} while (0)
++
++struct mm_struct {
++ struct vm_area_struct * mmap; /* list of VMAs */
++ struct rb_root mm_rb;
++ struct vm_area_struct * mmap_cache; /* last find_vma result */
++ unsigned long (*get_unmapped_area) (struct file *filp,
++ unsigned long addr, unsigned long len,
++ unsigned long pgoff, unsigned long flags);
++ void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
++ unsigned long mmap_base; /* base of mmap area */
++ unsigned long task_size; /* size of task vm space */
++ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
++ unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
++ pgd_t * pgd;
++ atomic_t mm_users; /* How many users with user space? */
++ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
++ int map_count; /* number of VMAs */
++ struct rw_semaphore mmap_sem;
++ spinlock_t page_table_lock; /* Protects page tables and some counters */
++
++ struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
++ * together off init_mm.mmlist, and are protected
++ * by mmlist_lock
++ */
++
++ /* Special counters, in some configurations protected by the
++ * page_table_lock, in other configurations by being atomic.
++ */
++ mm_counter_t _file_rss;
++ mm_counter_t _anon_rss;
++
++ unsigned long hiwater_rss; /* High-watermark of RSS usage */
++ unsigned long hiwater_vm; /* High-water virtual memory usage */
++
++ unsigned long total_vm, locked_vm, shared_vm, exec_vm;
++ unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
++ unsigned long start_code, end_code, start_data, end_data;
++ unsigned long start_brk, brk, start_stack;
++ unsigned long arg_start, arg_end, env_start, env_end;
++
++ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
++
++ cpumask_t cpu_vm_mask;
++
++ /* Architecture-specific MM context */
++ mm_context_t context;
++ struct vx_info *mm_vx_info;
++
++ /* Swap token stuff */
++ /*
++ * Last value of global fault stamp as seen by this process.
++ * In other words, this value gives an indication of how long
++ * it has been since this task got the token.
++ * Look at mm/thrash.c
++ */
++ unsigned int faultstamp;
++ unsigned int token_priority;
++ unsigned int last_interval;
++
++ unsigned char dumpable:2;
++
++ /* coredumping support */
++ int core_waiters;
++ struct completion *core_startup_done, core_done;
++
++ /* aio bits */
++ rwlock_t ioctx_list_lock;
++ struct kioctx *ioctx_list;
++};
++
++struct sighand_struct {
++ atomic_t count;
++ struct k_sigaction action[_NSIG];
++ spinlock_t siglock;
++ struct list_head signalfd_list;
++};
++
++struct pacct_struct {
++ int ac_flag;
++ long ac_exitcode;
++ unsigned long ac_mem;
++ cputime_t ac_utime, ac_stime;
++ unsigned long ac_minflt, ac_majflt;
++};
++
++/*
++ * NOTE! "signal_struct" does not have it's own
++ * locking, because a shared signal_struct always
++ * implies a shared sighand_struct, so locking
++ * sighand_struct is always a proper superset of
++ * the locking of signal_struct.
++ */
++struct signal_struct {
++ atomic_t count;
++ atomic_t live;
++
++ wait_queue_head_t wait_chldexit; /* for wait4() */
++
++ /* current thread group signal load-balancing target: */
++ struct task_struct *curr_target;
++
++ /* shared signal handling: */
++ struct sigpending shared_pending;
++
++ /* thread group exit support */
++ int group_exit_code;
++ /* overloaded:
++ * - notify group_exit_task when ->count is equal to notify_count
++ * - everyone except group_exit_task is stopped during signal delivery
++ * of fatal signals, group_exit_task processes the signal.
++ */
++ struct task_struct *group_exit_task;
++ int notify_count;
++
++ /* thread group stop support, overloads group_exit_code too */
++ int group_stop_count;
++ unsigned int flags; /* see SIGNAL_* flags below */
++
++ /* POSIX.1b Interval Timers */
++ struct list_head posix_timers;
++
++ /* ITIMER_REAL timer for the process */
++ struct hrtimer real_timer;
++ struct task_struct *tsk;
++ ktime_t it_real_incr;
++
++ /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
++ cputime_t it_prof_expires, it_virt_expires;
++ cputime_t it_prof_incr, it_virt_incr;
++
++ /* job control IDs */
++ pid_t pgrp;
++ struct pid *tty_old_pgrp;
++
++ union {
++ pid_t session __deprecated;
++ pid_t __session;
++ };
++
++ /* boolean value for session group leader */
++ int leader;
++
++ struct tty_struct *tty; /* NULL if no tty */
++
++ /*
++ * Cumulative resource counters for dead threads in the group,
++ * and for reaped dead child processes forked by this group.
++ * Live threads maintain their own counters and add to these
++ * in __exit_signal, except for the group leader.
++ */
++ cputime_t utime, stime, cutime, cstime;
++ unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
++ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
++ unsigned long inblock, oublock, cinblock, coublock;
++
++ /*
++ * Cumulative ns of scheduled CPU time for dead threads in the
++ * group, not including a zombie group leader. (This only differs
++ * from jiffies_to_ns(utime + stime) if sched_clock uses something
++ * other than jiffies.)
++ */
++ unsigned long long sched_time;
++
++ /*
++ * We don't bother to synchronize most readers of this at all,
++ * because there is no reader checking a limit that actually needs
++ * to get both rlim_cur and rlim_max atomically, and either one
++ * alone is a single word that can safely be read normally.
++ * getrlimit/setrlimit use task_lock(current->group_leader) to
++ * protect this instead of the siglock, because they really
++ * have no need to disable irqs.
++ */
++ struct rlimit rlim[RLIM_NLIMITS];
++
++ struct list_head cpu_timers[3];
++
++ /* keep the process-shared keyrings here so that they do the right
++ * thing in threads created with CLONE_THREAD */
++#ifdef CONFIG_KEYS
++ struct key *session_keyring; /* keyring inherited over fork */
++ struct key *process_keyring; /* keyring private to this process */
++#endif
++#ifdef CONFIG_BSD_PROCESS_ACCT
++ struct pacct_struct pacct; /* per-process accounting information */
++#endif
++#ifdef CONFIG_TASKSTATS
++ struct taskstats *stats;
++#endif
++};
++
++/* Context switch must be unlocked if interrupts are to be enabled */
++#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
++# define __ARCH_WANT_UNLOCKED_CTXSW
++#endif
++
++/*
++ * Bits in flags field of signal_struct.
++ */
++#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
++#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */
++#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
++#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
++
++
++/*
++ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
++ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
++ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
++ * values are inverted: lower p->prio value means higher priority.
++ *
++ * The MAX_USER_RT_PRIO value allows the actual maximum
++ * RT priority to be separate from the value exported to
++ * user-space. This allows kernel threads to set their
++ * priority to a value higher than any user task. Note:
++ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
++ */
++
++#define MAX_USER_RT_PRIO 100
++#define MAX_RT_PRIO MAX_USER_RT_PRIO
++
++#define MAX_PRIO (MAX_RT_PRIO + 40)
++
++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
++#define rt_task(p) rt_prio((p)->prio)
++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
++#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
++
++/*
++ * Some day this will be a full-fledged user tracking system..
++ */
++struct user_struct {
++ atomic_t __count; /* reference count */
++ atomic_t processes; /* How many processes does this user have? */
++ atomic_t files; /* How many open files does this user have? */
++ atomic_t sigpending; /* How many pending signals does this user have? */
++#ifdef CONFIG_INOTIFY_USER
++ atomic_t inotify_watches; /* How many inotify watches does this user have? */
++ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
++#endif
++ /* protected by mq_lock */
++ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
++ unsigned long locked_shm; /* How many pages of mlocked shm ? */
++
++#ifdef CONFIG_KEYS
++ struct key *uid_keyring; /* UID specific keyring */
++ struct key *session_keyring; /* UID's default session keyring */
++#endif
++
++ /* Hash table maintenance information */
++ struct list_head uidhash_list;
++ uid_t uid;
++ xid_t xid;
++};
++
++extern struct user_struct *find_user(xid_t, uid_t);
++
++extern struct user_struct root_user;
++#define INIT_USER (&root_user)
++
++struct backing_dev_info;
++struct reclaim_state;
++
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++struct sched_info {
++ /* cumulative counters */
++ unsigned long cpu_time, /* time spent on the cpu */
++ run_delay, /* time spent waiting on a runqueue */
++ pcnt; /* # of timeslices run on this cpu */
++
++ /* timestamps */
++ unsigned long last_arrival, /* when we last ran on a cpu */
++ last_queued; /* when we were last queued to run */
++};
++#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
++
++#ifdef CONFIG_SCHEDSTATS
++extern const struct file_operations proc_schedstat_operations;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_TASK_DELAY_ACCT
++struct task_delay_info {
++ spinlock_t lock;
++ unsigned int flags; /* Private per-task flags */
++
++ /* For each stat XXX, add following, aligned appropriately
++ *
++ * struct timespec XXX_start, XXX_end;
++ * u64 XXX_delay;
++ * u32 XXX_count;
++ *
++ * Atomicity of updates to XXX_delay, XXX_count protected by
++ * single lock above (split into XXX_lock if contention is an issue).
++ */
++
++ /*
++ * XXX_count is incremented on every XXX operation, the delay
++ * associated with the operation is added to XXX_delay.
++ * XXX_delay contains the accumulated delay time in nanoseconds.
++ */
++ struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
++ u64 blkio_delay; /* wait for sync block io completion */
++ u64 swapin_delay; /* wait for swapin block io completion */
++ u32 blkio_count; /* total count of the number of sync block */
++ /* io operations performed */
++ u32 swapin_count; /* total count of the number of swapin block */
++ /* io operations performed */
++};
++#endif /* CONFIG_TASK_DELAY_ACCT */
++
++static inline int sched_info_on(void)
++{
++#ifdef CONFIG_SCHEDSTATS
++ return 1;
++#elif defined(CONFIG_TASK_DELAY_ACCT)
++ extern int delayacct_on;
++ return delayacct_on;
++#else
++ return 0;
++#endif
++}
++
++enum idle_type
++{
++ SCHED_IDLE,
++ NOT_IDLE,
++ NEWLY_IDLE,
++ MAX_IDLE_TYPES
++};
++
++/*
++ * sched-domains (multiprocessor balancing) declarations:
++ */
++#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
++
++#ifdef CONFIG_SMP
++#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
++#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
++#define SD_BALANCE_EXEC 4 /* Balance on exec */
++#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
++#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
++#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
++#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
++#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
++#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
++#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
++#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
++
++#define BALANCE_FOR_MC_POWER \
++ (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
++
++#define BALANCE_FOR_PKG_POWER \
++ ((sched_mc_power_savings || sched_smt_power_savings) ? \
++ SD_POWERSAVINGS_BALANCE : 0)
++
++#define test_sd_parent(sd, flag) ((sd->parent && \
++ (sd->parent->flags & flag)) ? 1 : 0)
++
++
++struct sched_group {
++ struct sched_group *next; /* Must be a circular list */
++ cpumask_t cpumask;
++
++ /*
++ * CPU power of this group, SCHED_LOAD_SCALE being max power for a
++ * single CPU. This is read only (except for setup, hotplug CPU).
++ * Note : Never change cpu_power without recompute its reciprocal
++ */
++ unsigned int __cpu_power;
++ /*
++ * reciprocal value of cpu_power to avoid expensive divides
++ * (see include/linux/reciprocal_div.h)
++ */
++ u32 reciprocal_cpu_power;
++};
++
++struct sched_domain {
++ /* These fields must be setup */
++ struct sched_domain *parent; /* top domain must be null terminated */
++ struct sched_domain *child; /* bottom domain must be null terminated */
++ struct sched_group *groups; /* the balancing groups of the domain */
++ cpumask_t span; /* span of all CPUs in this domain */
++ unsigned long min_interval; /* Minimum balance interval ms */
++ unsigned long max_interval; /* Maximum balance interval ms */
++ unsigned int busy_factor; /* less balancing by factor if busy */
++ unsigned int imbalance_pct; /* No balance until over watermark */
++ unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
++ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
++ unsigned int busy_idx;
++ unsigned int idle_idx;
++ unsigned int newidle_idx;
++ unsigned int wake_idx;
++ unsigned int forkexec_idx;
++ int flags; /* See SD_* */
++
++ /* Runtime fields. */
++ unsigned long last_balance; /* init to jiffies. units in jiffies */
++ unsigned int balance_interval; /* initialise to 1. units in ms. */
++ unsigned int nr_balance_failed; /* initialise to 0 */
++
++#ifdef CONFIG_SCHEDSTATS
++ /* load_balance() stats */
++ unsigned long lb_cnt[MAX_IDLE_TYPES];
++ unsigned long lb_failed[MAX_IDLE_TYPES];
++ unsigned long lb_balanced[MAX_IDLE_TYPES];
++ unsigned long lb_imbalance[MAX_IDLE_TYPES];
++ unsigned long lb_gained[MAX_IDLE_TYPES];
++ unsigned long lb_hot_gained[MAX_IDLE_TYPES];
++ unsigned long lb_nobusyg[MAX_IDLE_TYPES];
++ unsigned long lb_nobusyq[MAX_IDLE_TYPES];
++
++ /* Active load balancing */
++ unsigned long alb_cnt;
++ unsigned long alb_failed;
++ unsigned long alb_pushed;
++
++ /* SD_BALANCE_EXEC stats */
++ unsigned long sbe_cnt;
++ unsigned long sbe_balanced;
++ unsigned long sbe_pushed;
++
++ /* SD_BALANCE_FORK stats */
++ unsigned long sbf_cnt;
++ unsigned long sbf_balanced;
++ unsigned long sbf_pushed;
++
++ /* try_to_wake_up() stats */
++ unsigned long ttwu_wake_remote;
++ unsigned long ttwu_move_affine;
++ unsigned long ttwu_move_balance;
++#endif
++};
++
++extern int partition_sched_domains(cpumask_t *partition1,
++ cpumask_t *partition2);
++
++/*
++ * Maximum cache size the migration-costs auto-tuning code will
++ * search from:
++ */
++extern unsigned int max_cache_size;
++
++#endif /* CONFIG_SMP */
++
++
++struct io_context; /* See blkdev.h */
++struct cpuset;
++
++#define NGROUPS_SMALL 32
++#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))
++struct group_info {
++ int ngroups;
++ atomic_t usage;
++ gid_t small_block[NGROUPS_SMALL];
++ int nblocks;
++ gid_t *blocks[0];
++};
++
++/*
++ * get_group_info() must be called with the owning task locked (via task_lock())
++ * when task != current. The reason being that the vast majority of callers are
++ * looking at current->group_info, which can not be changed except by the
++ * current task. Changing current->group_info requires the task lock, too.
++ */
++#define get_group_info(group_info) do { \
++ atomic_inc(&(group_info)->usage); \
++} while (0)
++
++#define put_group_info(group_info) do { \
++ if (atomic_dec_and_test(&(group_info)->usage)) \
++ groups_free(group_info); \
++} while (0)
++
++extern struct group_info *groups_alloc(int gidsetsize);
++extern void groups_free(struct group_info *group_info);
++extern int set_current_groups(struct group_info *group_info);
++extern int groups_search(struct group_info *group_info, gid_t grp);
++/* access the groups "array" with this macro */
++#define GROUP_AT(gi, i) \
++ ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
++
++#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
++extern void prefetch_stack(struct task_struct *t);
++#else
++static inline void prefetch_stack(struct task_struct *t) { }
++#endif
++
++struct audit_context; /* See audit.c */
++struct mempolicy;
++struct pipe_inode_info;
++struct uts_namespace;
++
++enum sleep_type {
++ SLEEP_NORMAL,
++ SLEEP_NONINTERACTIVE,
++ SLEEP_INTERACTIVE,
++ SLEEP_INTERRUPTED,
++};
++
++struct prio_array;
++
++struct task_struct {
++ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ void *stack;
++ atomic_t usage;
++ unsigned int flags; /* per process flags, defined below */
++ unsigned int ptrace;
++
++ int lock_depth; /* BKL lock depth */
++
++#ifdef CONFIG_SMP
++#ifdef __ARCH_WANT_UNLOCKED_CTXSW
++ int oncpu;
++#endif
++#endif
++ int load_weight; /* for niceness load balancing purposes */
++ int prio, static_prio, normal_prio;
++ struct list_head run_list;
++ struct prio_array *array;
++
++ unsigned short ioprio;
++#ifdef CONFIG_BLK_DEV_IO_TRACE
++ unsigned int btrace_seq;
++#endif
++ unsigned long sleep_avg;
++ unsigned long long timestamp, last_ran;
++ unsigned long long sched_time; /* sched_clock time spent running */
++ enum sleep_type sleep_type;
++
++ unsigned int policy;
++ cpumask_t cpus_allowed;
++ unsigned int time_slice, first_time_slice;
++
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++ struct sched_info sched_info;
++#endif
++
++ struct list_head tasks;
++ /*
++ * ptrace_list/ptrace_children forms the list of my children
++ * that were stolen by a ptracer.
++ */
++ struct list_head ptrace_children;
++ struct list_head ptrace_list;
++
++ struct mm_struct *mm, *active_mm;
++
++/* task state */
++ struct linux_binfmt *binfmt;
++ int exit_state;
++ int exit_code, exit_signal;
++ int pdeath_signal; /* The signal sent when the parent dies */
++ /* ??? */
++ unsigned int personality;
++ unsigned did_exec:1;
++ pid_t pid;
++ pid_t tgid;
++
++#ifdef CONFIG_CC_STACKPROTECTOR
++ /* Canary value for the -fstack-protector gcc feature */
++ unsigned long stack_canary;
++#endif
++ /*
++ * pointers to (original) parent process, youngest child, younger sibling,
++ * older sibling, respectively. (p->father can be replaced with
++ * p->parent->pid)
++ */
++ struct task_struct *real_parent; /* real parent process (when being debugged) */
++ struct task_struct *parent; /* parent process */
++ /*
++ * children/sibling forms the list of my children plus the
++ * tasks I'm ptracing.
++ */
++ struct list_head children; /* list of my children */
++ struct list_head sibling; /* linkage in my parent's children list */
++ struct task_struct *group_leader; /* threadgroup leader */
++
++ /* PID/PID hash table linkage. */
++ struct pid_link pids[PIDTYPE_MAX];
++ struct list_head thread_group;
++
++ struct completion *vfork_done; /* for vfork() */
++ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
++ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
++
++ unsigned int rt_priority;
++ cputime_t utime, stime;
++ unsigned long nvcsw, nivcsw; /* context switch counts */
++ struct timespec start_time;
++/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
++ unsigned long min_flt, maj_flt;
++
++ cputime_t it_prof_expires, it_virt_expires;
++ unsigned long long it_sched_expires;
++ struct list_head cpu_timers[3];
++
++/* process credentials */
++ uid_t uid,euid,suid,fsuid;
++ gid_t gid,egid,sgid,fsgid;
++ struct group_info *group_info;
++ kernel_cap_t cap_effective, cap_inheritable, cap_permitted;
++ unsigned keep_capabilities:1;
++ struct user_struct *user;
++#ifdef CONFIG_KEYS
++ struct key *request_key_auth; /* assumed request_key authority */
++ struct key *thread_keyring; /* keyring private to this thread */
++ unsigned char jit_keyring; /* default keyring to attach requested keys to */
++#endif
++ /*
++ * fpu_counter contains the number of consecutive context switches
++ * that the FPU is used. If this is over a threshold, the lazy fpu
++ * saving becomes unlazy to save the trap. This is an unsigned char
++ * so that after 256 times the counter wraps and the behavior turns
++ * lazy again; this to deal with bursty apps that only use FPU for
++ * a short time
++ */
++ unsigned char fpu_counter;
++ int oomkilladj; /* OOM kill score adjustment (bit shift). */
++ char comm[TASK_COMM_LEN]; /* executable name excluding path
++ - access with [gs]et_task_comm (which lock
++ it with task_lock())
++ - initialized normally by flush_old_exec */
++/* file system info */
++ int link_count, total_link_count;
++#ifdef CONFIG_SYSVIPC
++/* ipc stuff */
++ struct sysv_sem sysvsem;
++#endif
++/* CPU-specific state of this task */
++ struct thread_struct thread;
++/* filesystem information */
++ struct fs_struct *fs;
++/* open file information */
++ struct files_struct *files;
++/* namespaces */
++ struct nsproxy *nsproxy;
++/* signal handlers */
++ struct signal_struct *signal;
++ struct sighand_struct *sighand;
++
++ sigset_t blocked, real_blocked;
++ sigset_t saved_sigmask; /* To be restored with TIF_RESTORE_SIGMASK */
++ struct sigpending pending;
++
++ unsigned long sas_ss_sp;
++ size_t sas_ss_size;
++ int (*notifier)(void *priv);
++ void *notifier_data;
++ sigset_t *notifier_mask;
++
++ void *security;
++ struct audit_context *audit_context;
++
++/* vserver context data */
++ struct vx_info *vx_info;
++ struct nx_info *nx_info;
++
++ xid_t xid;
++ nid_t nid;
++ tag_t tag;
++
++ seccomp_t seccomp;
++
++/* Thread group tracking */
++ u32 parent_exec_id;
++ u32 self_exec_id;
++/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
++ spinlock_t alloc_lock;
++
++ /* Protection of the PI data structures: */
++ spinlock_t pi_lock;
++
++#ifdef CONFIG_RT_MUTEXES
++ /* PI waiters blocked on a rt_mutex held by this task */
++ struct plist_head pi_waiters;
++ /* Deadlock detection and priority inheritance handling */
++ struct rt_mutex_waiter *pi_blocked_on;
++#endif
++
++#ifdef CONFIG_DEBUG_MUTEXES
++ /* mutex deadlock detection */
++ struct mutex_waiter *blocked_on;
++#endif
++#ifdef CONFIG_TRACE_IRQFLAGS
++ unsigned int irq_events;
++ int hardirqs_enabled;
++ unsigned long hardirq_enable_ip;
++ unsigned int hardirq_enable_event;
++ unsigned long hardirq_disable_ip;
++ unsigned int hardirq_disable_event;
++ int softirqs_enabled;
++ unsigned long softirq_disable_ip;
++ unsigned int softirq_disable_event;
++ unsigned long softirq_enable_ip;
++ unsigned int softirq_enable_event;
++ int hardirq_context;
++ int softirq_context;
++#endif
++#ifdef CONFIG_LOCKDEP
++# define MAX_LOCK_DEPTH 30UL
++ u64 curr_chain_key;
++ int lockdep_depth;
++ struct held_lock held_locks[MAX_LOCK_DEPTH];
++ unsigned int lockdep_recursion;
++#endif
++
++/* journalling filesystem info */
++ void *journal_info;
++
++/* stacked block device info */
++ struct bio *bio_list, **bio_tail;
++
++/* VM state */
++ struct reclaim_state *reclaim_state;
++
++ struct backing_dev_info *backing_dev_info;
++
++ struct io_context *io_context;
++
++ unsigned long ptrace_message;
++ siginfo_t *last_siginfo; /* For ptrace use. */
++/*
++ * current io wait handle: wait queue entry to use for io waits
++ * If this thread is processing aio, this points at the waitqueue
++ * inside the currently handled kiocb. It may be NULL (i.e. default
++ * to a stack based synchronous wait) if its doing sync IO.
++ */
++ wait_queue_t *io_wait;
++#ifdef CONFIG_TASK_XACCT
++/* i/o counters(bytes read/written, #syscalls */
++ u64 rchar, wchar, syscr, syscw;
++#endif
++ struct task_io_accounting ioac;
++#if defined(CONFIG_TASK_XACCT)
++ u64 acct_rss_mem1; /* accumulated rss usage */
++ u64 acct_vm_mem1; /* accumulated virtual memory usage */
++ cputime_t acct_stimexpd;/* stime since last update */
++#endif
++#ifdef CONFIG_NUMA
++ struct mempolicy *mempolicy;
++ short il_next;
++#endif
++#ifdef CONFIG_CPUSETS
++ struct cpuset *cpuset;
++ nodemask_t mems_allowed;
++ int cpuset_mems_generation;
++ int cpuset_mem_spread_rotor;
++#endif
++ struct robust_list_head __user *robust_list;
++#ifdef CONFIG_COMPAT
++ struct compat_robust_list_head __user *compat_robust_list;
++#endif
++ struct list_head pi_state_list;
++ struct futex_pi_state *pi_state_cache;
++
++ atomic_t fs_excl; /* holding fs exclusive resources */
++ struct rcu_head rcu;
++
++ /*
++ * cache last used pipe for splice
++ */
++ struct pipe_inode_info *splice_pipe;
++#ifdef CONFIG_TASK_DELAY_ACCT
++ struct task_delay_info *delays;
++#endif
++#ifdef CONFIG_FAULT_INJECTION
++ int make_it_fail;
++#endif
++};
++
++static inline pid_t process_group(struct task_struct *tsk)
++{
++ return tsk->signal->pgrp;
++}
++
++static inline pid_t signal_session(struct signal_struct *sig)
++{
++ return sig->__session;
++}
++
++static inline pid_t process_session(struct task_struct *tsk)
++{
++ return signal_session(tsk->signal);
++}
++
++static inline void set_signal_session(struct signal_struct *sig, pid_t session)
++{
++ sig->__session = session;
++}
++
++static inline struct pid *task_pid(struct task_struct *task)
++{
++ return task->pids[PIDTYPE_PID].pid;
++}
++
++static inline struct pid *task_tgid(struct task_struct *task)
++{
++ return task->group_leader->pids[PIDTYPE_PID].pid;
++}
++
++static inline struct pid *task_pgrp(struct task_struct *task)
++{
++ return task->group_leader->pids[PIDTYPE_PGID].pid;
++}
++
++static inline struct pid *task_session(struct task_struct *task)
++{
++ return task->group_leader->pids[PIDTYPE_SID].pid;
++}
++
++/**
++ * pid_alive - check that a task structure is not stale
++ * @p: Task structure to be checked.
++ *
++ * Test if a process is not yet dead (at most zombie state)
++ * If pid_alive fails, then pointers within the task structure
++ * can be stale and must not be dereferenced.
++ */
++static inline int pid_alive(struct task_struct *p)
++{
++ return p->pids[PIDTYPE_PID].pid != NULL;
++}
++
++/**
++ * is_init - check if a task structure is init
++ * @tsk: Task structure to be checked.
++ *
++ * Check if a task structure is the first user space task the kernel created.
++ */
++static inline int is_init(struct task_struct *tsk)
++{
++ return tsk->pid == 1;
++}
++
++extern struct pid *cad_pid;
++
++extern void free_task(struct task_struct *tsk);
++#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
++
++extern void __put_task_struct(struct task_struct *t);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ __put_task_struct(t);
++}
++
++/*
++ * Per process flags
++ */
++#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
++ /* Not implemented yet, only for 486*/
++#define PF_STARTING 0x00000002 /* being created */
++#define PF_EXITING 0x00000004 /* getting shut down */
++#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
++#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
++#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
++#define PF_DUMPCORE 0x00000200 /* dumped core */
++#define PF_SIGNALED 0x00000400 /* killed by a signal */
++#define PF_MEMALLOC 0x00000800 /* Allocating memory */
++#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
++#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
++#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
++#define PF_FROZEN 0x00010000 /* frozen for system suspend */
++#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
++#define PF_KSWAPD 0x00040000 /* I am kswapd */
++#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
++#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
++#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */
++#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
++#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
++#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
++#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
++#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
++#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
++#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
++
++/*
++ * Only the _current_ task can read/write to tsk->flags, but other
++ * tasks can access tsk->flags in readonly mode for example
++ * with tsk_used_math (like during threaded core dumping).
++ * There is however an exception to this rule during ptrace
++ * or during fork: the ptracer task is allowed to write to the
++ * child->flags of its traced child (same goes for fork, the parent
++ * can write to the child->flags), because we're guaranteed the
++ * child is not running and in turn not changing child->flags
++ * at the same time the parent does it.
++ */
++#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
++#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
++#define clear_used_math() clear_stopped_child_used_math(current)
++#define set_used_math() set_stopped_child_used_math(current)
++#define conditional_stopped_child_used_math(condition, child) \
++ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
++#define conditional_used_math(condition) \
++ conditional_stopped_child_used_math(condition, current)
++#define copy_to_stopped_child_used_math(child) \
++ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
++/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
++#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
++#define used_math() tsk_used_math(current)
++
++#ifdef CONFIG_SMP
++extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
++#else
++static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
++{
++ if (!cpu_isset(0, new_mask))
++ return -EINVAL;
++ return 0;
++}
++#endif
++
++extern unsigned long long sched_clock(void);
++extern unsigned long long
++current_sched_time(const struct task_struct *current_task);
++
++/* sched_exec is called by processes performing an exec */
++#ifdef CONFIG_SMP
++extern void sched_exec(void);
++#else
++#define sched_exec() {}
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern void idle_task_exit(void);
++#else
++static inline void idle_task_exit(void) {}
++#endif
++
++extern void sched_idle_next(void);
++
++#ifdef CONFIG_RT_MUTEXES
++extern int rt_mutex_getprio(struct task_struct *p);
++extern void rt_mutex_setprio(struct task_struct *p, int prio);
++extern void rt_mutex_adjust_pi(struct task_struct *p);
++#else
++static inline int rt_mutex_getprio(struct task_struct *p)
++{
++ return p->normal_prio;
++}
++# define rt_mutex_adjust_pi(p) do { } while (0)
++#endif
++
++extern void set_user_nice(struct task_struct *p, long nice);
++extern int task_prio(const struct task_struct *p);
++extern int task_nice(const struct task_struct *p);
++extern int can_nice(const struct task_struct *p, const int nice);
++extern int task_curr(const struct task_struct *p);
++extern int idle_cpu(int cpu);
++extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
++extern struct task_struct *idle_task(int cpu);
++extern struct task_struct *curr_task(int cpu);
++extern void set_curr_task(int cpu, struct task_struct *p);
++
++void yield(void);
++
++/*
++ * The default (Linux) execution domain.
++ */
++extern struct exec_domain default_exec_domain;
++
++union thread_union {
++ struct thread_info thread_info;
++ unsigned long stack[THREAD_SIZE/sizeof(long)];
++};
++
++#ifndef __HAVE_ARCH_KSTACK_END
++static inline int kstack_end(void *addr)
++{
++ /* Reliable end of stack detection:
++ * Some APM bios versions misalign the stack
++ */
++ return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
++}
++#endif
++
++extern union thread_union init_thread_union;
++extern struct task_struct init_task;
++
++extern struct mm_struct init_mm;
++
++#define find_task_by_real_pid(nr) \
++ find_task_by_pid_type(PIDTYPE_REALPID, nr)
++#define find_task_by_pid(nr) \
++ find_task_by_pid_type(PIDTYPE_PID, nr)
++
++extern struct task_struct *find_task_by_pid_type(int type, int pid);
++extern void __set_special_pids(pid_t session, pid_t pgrp);
++
++/* per-UID process charging. */
++extern struct user_struct * alloc_uid(xid_t, uid_t);
++static inline struct user_struct *get_uid(struct user_struct *u)
++{
++ atomic_inc(&u->__count);
++ return u;
++}
++extern void free_uid(struct user_struct *);
++extern void switch_uid(struct user_struct *);
++
++#include <asm/current.h>
++
++extern void do_timer(unsigned long ticks);
++
++extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
++extern int FASTCALL(wake_up_process(struct task_struct * tsk));
++extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
++ unsigned long clone_flags));
++#ifdef CONFIG_SMP
++ extern void kick_process(struct task_struct *tsk);
++#else
++ static inline void kick_process(struct task_struct *tsk) { }
++#endif
++extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
++extern void FASTCALL(sched_exit(struct task_struct * p));
++
++extern int in_group_p(gid_t);
++extern int in_egroup_p(gid_t);
++
++extern void proc_caches_init(void);
++extern void flush_signals(struct task_struct *);
++extern void ignore_signals(struct task_struct *);
++extern void flush_signal_handlers(struct task_struct *, int force_default);
++extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
++
++static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
++{
++ unsigned long flags;
++ int ret;
++
++ spin_lock_irqsave(&tsk->sighand->siglock, flags);
++ ret = dequeue_signal(tsk, mask, info);
++ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
++
++ return ret;
++}
++
++extern void block_all_signals(int (*notifier)(void *priv), void *priv,
++ sigset_t *mask);
++extern void unblock_all_signals(void);
++extern void release_task(struct task_struct * p);
++extern int send_sig_info(int, struct siginfo *, struct task_struct *);
++extern int send_group_sig_info(int, struct siginfo *, struct task_struct *);
++extern int force_sigsegv(int, struct task_struct *);
++extern int force_sig_info(int, struct siginfo *, struct task_struct *);
++extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
++extern int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
++extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
++extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
++extern int kill_pgrp(struct pid *pid, int sig, int priv);
++extern int kill_pid(struct pid *pid, int sig, int priv);
++extern int kill_proc_info(int, struct siginfo *, pid_t);
++extern void do_notify_parent(struct task_struct *, int);
++extern void force_sig(int, struct task_struct *);
++extern void force_sig_specific(int, struct task_struct *);
++extern int send_sig(int, struct task_struct *, int);
++extern void zap_other_threads(struct task_struct *p);
++extern int kill_proc(pid_t, int, int);
++extern struct sigqueue *sigqueue_alloc(void);
++extern void sigqueue_free(struct sigqueue *);
++extern int send_sigqueue(int, struct sigqueue *, struct task_struct *);
++extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *);
++extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
++extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
++
++static inline int kill_cad_pid(int sig, int priv)
++{
++ return kill_pid(cad_pid, sig, priv);
++}
++
++/* These can be the second arg to send_sig_info/send_group_sig_info. */
++#define SEND_SIG_NOINFO ((struct siginfo *) 0)
++#define SEND_SIG_PRIV ((struct siginfo *) 1)
++#define SEND_SIG_FORCED ((struct siginfo *) 2)
++
++static inline int is_si_special(const struct siginfo *info)
++{
++ return info <= SEND_SIG_FORCED;
++}
++
++/* True if we are on the alternate signal stack. */
++
++static inline int on_sig_stack(unsigned long sp)
++{
++ return (sp - current->sas_ss_sp < current->sas_ss_size);
++}
++
++static inline int sas_ss_flags(unsigned long sp)
++{
++ return (current->sas_ss_size == 0 ? SS_DISABLE
++ : on_sig_stack(sp) ? SS_ONSTACK : 0);
++}
++
++/*
++ * Routines for handling mm_structs
++ */
++extern struct mm_struct * mm_alloc(void);
++
++/* mmdrop drops the mm and the page tables */
++extern void FASTCALL(__mmdrop(struct mm_struct *));
++static inline void mmdrop(struct mm_struct * mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ __mmdrop(mm);
++}
++
++/* mmput gets rid of the mappings and all user-space */
++extern void mmput(struct mm_struct *);
++/* Grab a reference to a task's mm, if it is not already going away */
++extern struct mm_struct *get_task_mm(struct task_struct *task);
++/* Remove the current tasks stale references to the old mm_struct */
++extern void mm_release(struct task_struct *, struct mm_struct *);
++
++extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
++extern void flush_thread(void);
++extern void exit_thread(void);
++
++extern void exit_files(struct task_struct *);
++extern void __cleanup_signal(struct signal_struct *);
++extern void __cleanup_sighand(struct sighand_struct *);
++extern void exit_itimers(struct signal_struct *);
++
++extern NORET_TYPE void do_group_exit(int);
++
++extern void daemonize(const char *, ...);
++extern int allow_signal(int);
++extern int disallow_signal(int);
++
++extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
++extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
++struct task_struct *fork_idle(int);
++
++extern void set_task_comm(struct task_struct *tsk, char *from);
++extern void get_task_comm(char *to, struct task_struct *tsk);
++
++#ifdef CONFIG_SMP
++extern void wait_task_inactive(struct task_struct * p);
++#else
++#define wait_task_inactive(p) do { } while (0)
++#endif
++
++#define remove_parent(p) list_del_init(&(p)->sibling)
++#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
++
++#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
++
++#define for_each_process(p) \
++ for (p = &init_task ; (p = next_task(p)) != &init_task ; )
++
++/*
++ * Careful: do_each_thread/while_each_thread is a double loop so
++ * 'break' will not work as expected - use goto instead.
++ */
++#define do_each_thread(g, t) \
++ for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
++
++#define while_each_thread(g, t) \
++ while ((t = next_thread(t)) != g)
++
++/* de_thread depends on thread_group_leader not being a pid based check */
++#define thread_group_leader(p) (p == p->group_leader)
++
++/* Do to the insanities of de_thread it is possible for a process
++ * to have the pid of the thread group leader without actually being
++ * the thread group leader. For iteration through the pids in proc
++ * all we care about is that we have a task with the appropriate
++ * pid, we don't actually care if we have the right task.
++ */
++static inline int has_group_leader_pid(struct task_struct *p)
++{
++ return p->pid == p->tgid;
++}
++
++static inline struct task_struct *next_thread(const struct task_struct *p)
++{
++ return list_entry(rcu_dereference(p->thread_group.next),
++ struct task_struct, thread_group);
++}
++
++static inline int thread_group_empty(struct task_struct *p)
++{
++ return list_empty(&p->thread_group);
++}
++
++#define delay_group_leader(p) \
++ (thread_group_leader(p) && !thread_group_empty(p))
++
++/*
++ * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
++ * subscriptions and synchronises with wait4(). Also used in procfs. Also
++ * pins the final release of task.io_context. Also protects ->cpuset.
++ *
++ * Nests both inside and outside of read_lock(&tasklist_lock).
++ * It must not be nested with write_lock_irq(&tasklist_lock),
++ * neither inside nor outside.
++ */
++static inline void task_lock(struct task_struct *p)
++{
++ spin_lock(&p->alloc_lock);
++}
++
++static inline void task_unlock(struct task_struct *p)
++{
++ spin_unlock(&p->alloc_lock);
++}
++
++extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
++ unsigned long *flags);
++
++static inline void unlock_task_sighand(struct task_struct *tsk,
++ unsigned long *flags)
++{
++ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
++}
++
++#ifndef __HAVE_THREAD_FUNCTIONS
++
++#define task_thread_info(task) ((struct thread_info *)(task)->stack)
++#define task_stack_page(task) ((task)->stack)
++
++static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
++{
++ *task_thread_info(p) = *task_thread_info(org);
++ task_thread_info(p)->task = p;
++}
++
++static inline unsigned long *end_of_stack(struct task_struct *p)
++{
++ return (unsigned long *)(task_thread_info(p) + 1);
++}
++
++#endif
++
++/* set thread flags in other task's structures
++ * - see asm/thread_info.h for TIF_xxxx flags available
++ */
++static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ set_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ clear_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
++{
++ return test_ti_thread_flag(task_thread_info(tsk), flag);
++}
++
++static inline void set_tsk_need_resched(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
++}
++
++static inline void clear_tsk_need_resched(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
++}
++
++static inline int signal_pending(struct task_struct *p)
++{
++ return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
++}
++
++static inline int need_resched(void)
++{
++ return unlikely(test_thread_flag(TIF_NEED_RESCHED));
++}
++
++/*
++ * cond_resched() and cond_resched_lock(): latency reduction via
++ * explicit rescheduling in places that are safe. The return
++ * value indicates whether a reschedule was done in fact.
++ * cond_resched_lock() will drop the spinlock before scheduling,
++ * cond_resched_softirq() will enable bhs before scheduling.
++ */
++extern int cond_resched(void);
++extern int cond_resched_lock(spinlock_t * lock);
++extern int cond_resched_softirq(void);
++
++/*
++ * Does a critical section need to be broken due to another
++ * task waiting?:
++ */
++#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
++# define need_lockbreak(lock) ((lock)->break_lock)
++#else
++# define need_lockbreak(lock) 0
++#endif
++
++/*
++ * Does a critical section need to be broken due to another
++ * task waiting or preemption being signalled:
++ */
++static inline int lock_need_resched(spinlock_t *lock)
++{
++ if (need_lockbreak(lock) || need_resched())
++ return 1;
++ return 0;
++}
++
++/*
++ * Reevaluate whether the task has signals pending delivery.
++ * Wake the task if so.
++ * This is required every time the blocked sigset_t changes.
++ * callers must hold sighand->siglock.
++ */
++extern void recalc_sigpending_and_wake(struct task_struct *t);
++extern void recalc_sigpending(void);
++
++extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++
++/*
++ * Wrappers for p->thread_info->cpu access. No-op on UP.
++ */
++#ifdef CONFIG_SMP
++
++static inline unsigned int task_cpu(const struct task_struct *p)
++{
++ return task_thread_info(p)->cpu;
++}
++
++static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++ task_thread_info(p)->cpu = cpu;
++}
++
++#else
++
++static inline unsigned int task_cpu(const struct task_struct *p)
++{
++ return 0;
++}
++
++static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++}
++
++#endif /* CONFIG_SMP */
++
++#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
++extern void arch_pick_mmap_layout(struct mm_struct *mm);
++#else
++static inline void arch_pick_mmap_layout(struct mm_struct *mm)
++{
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++ mm->get_unmapped_area = arch_get_unmapped_area;
++ mm->unmap_area = arch_unmap_area;
++}
++#endif
++
++extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
++extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
++
++extern int sched_mc_power_savings, sched_smt_power_savings;
++
++extern void normalize_rt_tasks(void);
++
++#ifdef CONFIG_TASK_XACCT
++static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
++{
++ tsk->rchar += amt;
++}
++
++static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
++{
++ tsk->wchar += amt;
++}
++
++static inline void inc_syscr(struct task_struct *tsk)
++{
++ tsk->syscr++;
++}
++
++static inline void inc_syscw(struct task_struct *tsk)
++{
++ tsk->syscw++;
++}
++#else
++static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
++{
++}
++
++static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
++{
++}
++
++static inline void inc_syscr(struct task_struct *tsk)
++{
++}
++
++static inline void inc_syscw(struct task_struct *tsk)
++{
++}
++#endif
++
++#endif /* __KERNEL__ */
++
++#endif
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/mutex.c linux-2.6.22-590/kernel/mutex.c
+--- linux-2.6.22-580/kernel/mutex.c 2007-07-08 19:32:17.000000000 -0400
++++ linux-2.6.22-590/kernel/mutex.c 2009-02-18 09:57:23.000000000 -0500
+@@ -18,6 +18,17 @@
+ #include <linux/spinlock.h>
+ #include <linux/interrupt.h>
+ #include <linux/debug_locks.h>
++#include <linux/arrays.h>
++
++#undef CONFIG_CHOPSTIX
++#ifdef CONFIG_CHOPSTIX
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++#endif
+
+ /*
+ * In the DEBUG case we are using the "NULL fastpath" for mutexes,
+@@ -43,6 +54,9 @@
+ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
+ {
+ atomic_set(&lock->count, 1);
++#ifdef CONFIG_CHOPSTIX
++ lock->owner=NULL;
++#endif
+ spin_lock_init(&lock->wait_lock);
+ INIT_LIST_HEAD(&lock->wait_list);
+
+@@ -88,6 +102,7 @@
+ * The locking fastpath is the 1->0 transition from
+ * 'unlocked' into 'locked' state.
+ */
++
+ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+ }
+
+@@ -168,6 +183,27 @@
+ }
+ __set_task_state(task, state);
+
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event) {
++ if (lock->owner) {
++ struct event event;
++ struct event_spec espec;
++ struct task_struct *p = lock->owner->task;
++ /*spin_lock(&p->alloc_lock);*/
++ espec.reason = 0; /* lock */
++ event.event_data=&espec;
++ event.task = p;
++ espec.pc=lock;
++ event.event_type=5;
++ (*rec_event)(&event, 1);
++ /*spin_unlock(&p->alloc_lock);*/
++
++ }
++ else
++ BUG();
++ }
++#endif
++
+ /* didnt get the lock, go to sleep: */
+ spin_unlock_mutex(&lock->wait_lock, flags);
+ schedule();
+@@ -177,6 +213,9 @@
+ /* got the lock - rejoice! */
+ mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ debug_mutex_set_owner(lock, task_thread_info(task));
++#ifdef CONFIG_CHOPSTIX
++ lock->owner = task_thread_info(task);
++#endif
+
+ /* set it to 0 if there are no waiters left: */
+ if (likely(list_empty(&lock->wait_list)))
+@@ -202,6 +241,7 @@
+ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
+ {
+ might_sleep();
++
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
+ }
+
+@@ -211,6 +251,7 @@
+ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
+ {
+ might_sleep();
++
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
+ }
+
+@@ -246,6 +287,23 @@
+
+ debug_mutex_wake_waiter(lock, waiter);
+
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event) {
++ if (lock->owner) {
++ struct event event;
++ struct event_spec espec;
++
++ espec.reason = 1; /* unlock */
++ event.event_data=&espec;
++ event.task = lock->owner->task;
++ espec.pc=lock;
++ event.event_type=5;
++ (*rec_event)(&event, 1);
++ }
++ else
++ BUG();
++ }
++#endif
+ wake_up_process(waiter->task);
+ }
+
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/sched.c linux-2.6.22-590/kernel/sched.c
+--- linux-2.6.22-580/kernel/sched.c 2009-02-18 09:56:02.000000000 -0500
++++ linux-2.6.22-590/kernel/sched.c 2009-02-18 09:57:23.000000000 -0500
+@@ -10,7 +10,7 @@
+ * 1998-11-19 Implemented schedule_timeout() and related stuff
+ * by Andrea Arcangeli
+ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
+- * hybrid priority-list and round-robin design with
++ * hybrid priority-list and round-robin deventn with
+ * an array-switch method of distributing timeslices
+ * and per-CPU runqueues. Cleanups and useful suggestions
+ * by Davide Libenzi, preemptible kernel bits by Robert Love.
+@@ -23,6 +23,7 @@
+ #include <linux/nmi.h>
+ #include <linux/init.h>
+ #include <asm/uaccess.h>
++#include <linux/arrays.h>
+ #include <linux/highmem.h>
+ #include <linux/smp_lock.h>
+ #include <asm/mmu_context.h>
+@@ -59,6 +60,9 @@
+ #include <linux/vs_sched.h>
+ #include <linux/vs_cvirt.h>
+
++#define INTERRUPTIBLE -1
++#define RUNNING 0
++
+ /*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is default implementation.
+@@ -431,6 +435,7 @@
+
+ repeat_lock_task:
+ rq = task_rq(p);
++
+ spin_lock(&rq->lock);
+ if (unlikely(rq != task_rq(p))) {
+ spin_unlock(&rq->lock);
+@@ -1741,6 +1746,21 @@
+ * event cannot wake it up and insert it on the runqueue either.
+ */
+ p->state = TASK_RUNNING;
++#ifdef CONFIG_CHOPSTIX
++ /* The jiffy of last interruption */
++ if (p->state & TASK_UNINTERRUPTIBLE) {
++ p->last_interrupted=jiffies;
++ }
++ else
++ if (p->state & TASK_INTERRUPTIBLE) {
++ p->last_interrupted=INTERRUPTIBLE;
++ }
++ else
++ p->last_interrupted=RUNNING;
++
++ /* The jiffy of last execution */
++ p->last_ran_j=jiffies;
++#endif
+
+ /*
+ * Make sure we do not leak PI boosting priority to the child:
+@@ -3608,6 +3628,7 @@
+
+ #endif
+
++
+ static inline int interactive_sleep(enum sleep_type sleep_type)
+ {
+ return (sleep_type == SLEEP_INTERACTIVE ||
+@@ -3617,16 +3638,28 @@
+ /*
+ * schedule() is the main scheduler function.
+ */
++
++#ifdef CONFIG_CHOPSTIX
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++#endif
++
+ asmlinkage void __sched schedule(void)
+ {
+ struct task_struct *prev, *next;
+ struct prio_array *array;
+ struct list_head *queue;
+ unsigned long long now;
+- unsigned long run_time;
++ unsigned long run_time, diff;
+ int cpu, idx, new_prio;
+ long *switch_count;
+ struct rq *rq;
++ int sampling_reason;
+
+ /*
+ * Test if we are atomic. Since do_exit() needs to call into
+@@ -3680,6 +3713,7 @@
+ switch_count = &prev->nivcsw;
+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ switch_count = &prev->nvcsw;
++
+ if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+ unlikely(signal_pending(prev))))
+ prev->state = TASK_RUNNING;
+@@ -3689,6 +3723,17 @@
+ vx_uninterruptible_inc(prev);
+ }
+ deactivate_task(prev, rq);
++#ifdef CONFIG_CHOPSTIX
++ /* An uninterruptible process just yielded. Record the current jiffie */
++ if (prev->state & TASK_UNINTERRUPTIBLE) {
++ prev->last_interrupted=jiffies;
++ }
++ /* An interruptible process just yielded, or it got preempted.
++ * Mark it as interruptible */
++ else if (prev->state & TASK_INTERRUPTIBLE) {
++ prev->last_interrupted=INTERRUPTIBLE;
++ }
++#endif
+ }
+ }
+
+@@ -3765,6 +3810,40 @@
+ prev->sleep_avg = 0;
+ prev->timestamp = prev->last_ran = now;
+
++#ifdef CONFIG_CHOPSTIX
++ /* Run only if the Chopstix module so decrees it */
++ if (rec_event) {
++ prev->last_ran_j = jiffies;
++ if (next->last_interrupted!=INTERRUPTIBLE) {
++ if (next->last_interrupted!=RUNNING) {
++ diff = (jiffies-next->last_interrupted);
++ sampling_reason = 0;/* BLOCKING */
++ }
++ else {
++ diff = jiffies-next->last_ran_j;
++ sampling_reason = 1;/* PREEMPTION */
++ }
++
++ if (diff >= HZ/10) {
++ struct event event;
++ struct event_spec espec;
++ unsigned long eip;
++
++ espec.reason = sampling_reason;
++ eip = next->thread.esp & 4095;
++ event.event_data=&espec;
++ event.task=next;
++ espec.pc=eip;
++ event.event_type=2;
++ /* index in the event array currently set up */
++ /* make sure the counters are loaded in the order we want them to show up*/
++ (*rec_event)(&event, diff);
++ }
++ }
++ /* next has been elected to run */
++ next->last_interrupted=0;
++ }
++#endif
+ sched_info_switch(prev, next);
+ if (likely(prev != next)) {
+ next->timestamp = next->last_ran = now;
+@@ -4664,6 +4743,7 @@
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+
++
+ retval = -EPERM;
+ if ((current->euid != p->euid) && (current->euid != p->uid) &&
+ !capable(CAP_SYS_NICE))
+@@ -5032,6 +5112,7 @@
+ jiffies_to_timespec(p->policy == SCHED_FIFO ?
+ 0 : task_timeslice(p), &t);
+ read_unlock(&tasklist_lock);
++
+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
+ out_nounlock:
+ return retval;
+@@ -7275,3 +7356,14 @@
+ }
+
+ #endif
++
++#ifdef CONFIG_CHOPSTIX
++void (*rec_event)(void *,unsigned int) = NULL;
++
++/* To support safe calling from asm */
++asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
++ (*rec_event)(event_signature_in, count);
++}
++EXPORT_SYMBOL(rec_event);
++EXPORT_SYMBOL(in_sched_functions);
++#endif
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/sched.c.orig linux-2.6.22-590/kernel/sched.c.orig
+--- linux-2.6.22-580/kernel/sched.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/kernel/sched.c.orig 2009-02-18 09:56:02.000000000 -0500
+@@ -0,0 +1,7277 @@
++/*
++ * kernel/sched.c
++ *
++ * Kernel scheduler and related syscalls
++ *
++ * Copyright (C) 1991-2002 Linus Torvalds
++ *
++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
++ * make semaphores SMP safe
++ * 1998-11-19 Implemented schedule_timeout() and related stuff
++ * by Andrea Arcangeli
++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
++ * hybrid priority-list and round-robin design with
++ * an array-switch method of distributing timeslices
++ * and per-CPU runqueues. Cleanups and useful suggestions
++ * by Davide Libenzi, preemptible kernel bits by Robert Love.
++ * 2003-09-03 Interactivity tuning by Con Kolivas.
++ * 2004-04-02 Scheduler domains code by Nick Piggin
++ */
++
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/nmi.h>
++#include <linux/init.h>
++#include <asm/uaccess.h>
++#include <linux/highmem.h>
++#include <linux/smp_lock.h>
++#include <asm/mmu_context.h>
++#include <linux/interrupt.h>
++#include <linux/capability.h>
++#include <linux/completion.h>
++#include <linux/kernel_stat.h>
++#include <linux/debug_locks.h>
++#include <linux/security.h>
++#include <linux/notifier.h>
++#include <linux/profile.h>
++#include <linux/freezer.h>
++#include <linux/vmalloc.h>
++#include <linux/blkdev.h>
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/threads.h>
++#include <linux/timer.h>
++#include <linux/rcupdate.h>
++#include <linux/cpu.h>
++#include <linux/cpuset.h>
++#include <linux/percpu.h>
++#include <linux/kthread.h>
++#include <linux/seq_file.h>
++#include <linux/syscalls.h>
++#include <linux/times.h>
++#include <linux/tsacct_kern.h>
++#include <linux/kprobes.h>
++#include <linux/delayacct.h>
++#include <linux/reciprocal_div.h>
++
++#include <asm/tlb.h>
++#include <asm/unistd.h>
++#include <linux/vs_sched.h>
++#include <linux/vs_cvirt.h>
++
++/*
++ * Scheduler clock - returns current time in nanosec units.
++ * This is default implementation.
++ * Architectures and sub-architectures can override this.
++ */
++unsigned long long __attribute__((weak)) sched_clock(void)
++{
++ return (unsigned long long)jiffies * (1000000000 / HZ);
++}
++
++/*
++ * Convert user-nice values [ -20 ... 0 ... 19 ]
++ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
++ * and back.
++ */
++#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
++#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
++#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
++
++/*
++ * 'User priority' is the nice value converted to something we
++ * can work with better when scaling various scheduler parameters,
++ * it's a [ 0 ... 39 ] range.
++ */
++#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
++#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
++#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
++
++/*
++ * Some helpers for converting nanosecond timing to jiffy resolution
++ */
++#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
++#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
++
++/*
++ * These are the 'tuning knobs' of the scheduler:
++ *
++ * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
++ * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
++ * Timeslices get refilled after they expire.
++ */
++#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
++#define DEF_TIMESLICE (100 * HZ / 1000)
++#define ON_RUNQUEUE_WEIGHT 30
++#define CHILD_PENALTY 95
++#define PARENT_PENALTY 100
++#define EXIT_WEIGHT 3
++#define PRIO_BONUS_RATIO 25
++#define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
++#define INTERACTIVE_DELTA 2
++#define MAX_SLEEP_AVG (DEF_TIMESLICE * MAX_BONUS)
++#define STARVATION_LIMIT (MAX_SLEEP_AVG)
++#define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG))
++
++/*
++ * If a task is 'interactive' then we reinsert it in the active
++ * array after it has expired its current timeslice. (it will not
++ * continue to run immediately, it will still roundrobin with
++ * other interactive tasks.)
++ *
++ * This part scales the interactivity limit depending on niceness.
++ *
++ * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
++ * Here are a few examples of different nice levels:
++ *
++ * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
++ * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
++ * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0]
++ * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
++ * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
++ *
++ * (the X axis represents the possible -5 ... 0 ... +5 dynamic
++ * priority range a task can explore, a value of '1' means the
++ * task is rated interactive.)
++ *
++ * Ie. nice +19 tasks can never get 'interactive' enough to be
++ * reinserted into the active array. And only heavily CPU-hog nice -20
++ * tasks will be expired. Default nice 0 tasks are somewhere between,
++ * it takes some effort for them to get interactive, but it's not
++ * too hard.
++ */
++
++#define CURRENT_BONUS(p) \
++ (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
++ MAX_SLEEP_AVG)
++
++#define GRANULARITY (10 * HZ / 1000 ? : 1)
++
++#ifdef CONFIG_SMP
++#define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
++ (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
++ num_online_cpus())
++#else
++#define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
++ (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
++#endif
++
++#define SCALE(v1,v1_max,v2_max) \
++ (v1) * (v2_max) / (v1_max)
++
++#define DELTA(p) \
++ (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
++ INTERACTIVE_DELTA)
++
++#define TASK_INTERACTIVE(p) \
++ ((p)->prio <= (p)->static_prio - DELTA(p))
++
++#define INTERACTIVE_SLEEP(p) \
++ (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
++ (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
++
++#define TASK_PREEMPTS_CURR(p, rq) \
++ ((p)->prio < (rq)->curr->prio)
++
++#define SCALE_PRIO(x, prio) \
++ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
++
++static unsigned int static_prio_timeslice(int static_prio)
++{
++ if (static_prio < NICE_TO_PRIO(0))
++ return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
++ else
++ return SCALE_PRIO(DEF_TIMESLICE, static_prio);
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
++ * Since cpu_power is a 'constant', we can use a reciprocal divide.
++ */
++static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
++{
++ return reciprocal_divide(load, sg->reciprocal_cpu_power);
++}
++
++/*
++ * Each time a sched group cpu_power is changed,
++ * we must compute its reciprocal value
++ */
++static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
++{
++ sg->__cpu_power += val;
++ sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
++}
++#endif
++
++/*
++ * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
++ * to time slice values: [800ms ... 100ms ... 5ms]
++ *
++ * The higher a thread's priority, the bigger timeslices
++ * it gets during one round of execution. But even the lowest
++ * priority thread gets MIN_TIMESLICE worth of execution time.
++ */
++
++static inline unsigned int task_timeslice(struct task_struct *p)
++{
++ return static_prio_timeslice(p->static_prio);
++}
++
++/*
++ * These are the runqueue data structures:
++ */
++
++struct prio_array {
++ unsigned int nr_active;
++ DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
++ struct list_head queue[MAX_PRIO];
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ *
++ * Locking rule: those places that want to lock multiple runqueues
++ * (such as the load balancing or the thread migration code), lock
++ * acquire operations must be ordered by ascending &runqueue.
++ */
++struct rq {
++ spinlock_t lock;
++
++ /*
++ * nr_running and cpu_load should be in the same cacheline because
++ * remote CPUs use both these fields when doing load calculation.
++ */
++ unsigned long nr_running;
++ unsigned long raw_weighted_load;
++#ifdef CONFIG_SMP
++ unsigned long cpu_load[3];
++ unsigned char idle_at_tick;
++#ifdef CONFIG_NO_HZ
++ unsigned char in_nohz_recently;
++#endif
++#endif
++ unsigned long long nr_switches;
++
++ /*
++ * This is part of a global counter where only the total sum
++ * over all CPUs matters. A task can increase this counter on
++ * one CPU and if it got migrated afterwards it may decrease
++ * it on another CPU. Always updated under the runqueue lock:
++ */
++ unsigned long nr_uninterruptible;
++
++ unsigned long expired_timestamp;
++ /* Cached timestamp set by update_cpu_clock() */
++ unsigned long long most_recent_timestamp;
++ struct task_struct *curr, *idle;
++ unsigned long next_balance;
++ struct mm_struct *prev_mm;
++ struct prio_array *active, *expired, arrays[2];
++ int best_expired_prio;
++ atomic_t nr_iowait;
++
++#ifdef CONFIG_SMP
++ struct sched_domain *sd;
++
++ /* For active balancing */
++ int active_balance;
++ int push_cpu;
++ int cpu; /* cpu of this runqueue */
++
++ struct task_struct *migration_thread;
++ struct list_head migration_queue;
++#endif
++ unsigned long norm_time;
++ unsigned long idle_time;
++#ifdef CONFIG_VSERVER_IDLETIME
++ int idle_skip;
++#endif
++#ifdef CONFIG_VSERVER_HARDCPU
++ struct list_head hold_queue;
++ unsigned long nr_onhold;
++ int idle_tokens;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++ /* latency stats */
++ struct sched_info rq_sched_info;
++
++ /* sys_sched_yield() stats */
++ unsigned long yld_exp_empty;
++ unsigned long yld_act_empty;
++ unsigned long yld_both_empty;
++ unsigned long yld_cnt;
++
++ /* schedule() stats */
++ unsigned long sched_switch;
++ unsigned long sched_cnt;
++ unsigned long sched_goidle;
++
++ /* try_to_wake_up() stats */
++ unsigned long ttwu_cnt;
++ unsigned long ttwu_local;
++#endif
++ struct lock_class_key rq_lock_key;
++};
++
++static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++static inline int cpu_of(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++ return rq->cpu;
++#else
++ return 0;
++#endif
++}
++
++/*
++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
++ * See detach_destroy_domains: synchronize_sched for details.
++ *
++ * The domain tree of any CPU may only be accessed from within
++ * preempt-disabled sections.
++ */
++#define for_each_domain(cpu, __sd) \
++ for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
++
++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
++#define this_rq() (&__get_cpu_var(runqueues))
++#define task_rq(p) cpu_rq(task_cpu(p))
++#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next) do { } while (0)
++#endif
++#ifndef finish_arch_switch
++# define finish_arch_switch(prev) do { } while (0)
++#endif
++
++#ifndef __ARCH_WANT_UNLOCKED_CTXSW
++static inline int task_running(struct rq *rq, struct task_struct *p)
++{
++ return rq->curr == p;
++}
++
++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++}
++
++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK
++ /* this is a valid case when another task releases the spinlock */
++ rq->lock.owner = current;
++#endif
++ /*
++ * If we are tracking spinlock dependencies then we have to
++ * fix up the runqueue lock - which gets 'carried over' from
++ * prev into current:
++ */
++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++
++ spin_unlock_irq(&rq->lock);
++}
++
++#else /* __ARCH_WANT_UNLOCKED_CTXSW */
++static inline int task_running(struct rq *rq, struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++ return p->oncpu;
++#else
++ return rq->curr == p;
++#endif
++}
++
++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++#ifdef CONFIG_SMP
++ /*
++ * We can optimise this out completely for !SMP, because the
++ * SMP rebalancing from interrupt is the only thing that cares
++ * here.
++ */
++ next->oncpu = 1;
++#endif
++#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
++ spin_unlock_irq(&rq->lock);
++#else
++ spin_unlock(&rq->lock);
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++ /*
++ * After ->oncpu is cleared, the task can be moved to a different CPU.
++ * We must ensure this doesn't happen until the switch is completely
++ * finished.
++ */
++ smp_wmb();
++ prev->oncpu = 0;
++#endif
++#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
++ local_irq_enable();
++#endif
++}
++#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
++
++/*
++ * __task_rq_lock - lock the runqueue a given task resides on.
++ * Must be called interrupts disabled.
++ */
++static inline struct rq *__task_rq_lock(struct task_struct *p)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++repeat_lock_task:
++ rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (unlikely(rq != task_rq(p))) {
++ spin_unlock(&rq->lock);
++ goto repeat_lock_task;
++ }
++ return rq;
++}
++
++/*
++ * task_rq_lock - lock the runqueue a given task resides on and disable
++ * interrupts. Note the ordering: we can safely lookup the task_rq without
++ * explicitly disabling preemption.
++ */
++static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++repeat_lock_task:
++ local_irq_save(*flags);
++ rq = task_rq(p);
++ spin_lock(&rq->lock);
++ if (unlikely(rq != task_rq(p))) {
++ spin_unlock_irqrestore(&rq->lock, *flags);
++ goto repeat_lock_task;
++ }
++ return rq;
++}
++
++static inline void __task_rq_unlock(struct rq *rq)
++ __releases(rq->lock)
++{
++ spin_unlock(&rq->lock);
++}
++
++static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
++ __releases(rq->lock)
++{
++ spin_unlock_irqrestore(&rq->lock, *flags);
++}
++
++#ifdef CONFIG_SCHEDSTATS
++/*
++ * bump this up when changing the output format or the meaning of an existing
++ * format, so that tools can adapt (or abort)
++ */
++#define SCHEDSTAT_VERSION 14
++
++static int show_schedstat(struct seq_file *seq, void *v)
++{
++ int cpu;
++
++ seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
++ seq_printf(seq, "timestamp %lu\n", jiffies);
++ for_each_online_cpu(cpu) {
++ struct rq *rq = cpu_rq(cpu);
++#ifdef CONFIG_SMP
++ struct sched_domain *sd;
++ int dcnt = 0;
++#endif
++
++ /* runqueue-specific stats */
++ seq_printf(seq,
++ "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
++ cpu, rq->yld_both_empty,
++ rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
++ rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
++ rq->ttwu_cnt, rq->ttwu_local,
++ rq->rq_sched_info.cpu_time,
++ rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
++
++ seq_printf(seq, "\n");
++
++#ifdef CONFIG_SMP
++ /* domain-specific stats */
++ preempt_disable();
++ for_each_domain(cpu, sd) {
++ enum idle_type itype;
++ char mask_str[NR_CPUS];
++
++ cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
++ seq_printf(seq, "domain%d %s", dcnt++, mask_str);
++ for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
++ itype++) {
++ seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
++ "%lu",
++ sd->lb_cnt[itype],
++ sd->lb_balanced[itype],
++ sd->lb_failed[itype],
++ sd->lb_imbalance[itype],
++ sd->lb_gained[itype],
++ sd->lb_hot_gained[itype],
++ sd->lb_nobusyq[itype],
++ sd->lb_nobusyg[itype]);
++ }
++ seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
++ " %lu %lu %lu\n",
++ sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
++ sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
++ sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
++ sd->ttwu_wake_remote, sd->ttwu_move_affine,
++ sd->ttwu_move_balance);
++ }
++ preempt_enable();
++#endif
++ }
++ return 0;
++}
++
++static int schedstat_open(struct inode *inode, struct file *file)
++{
++ unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
++ char *buf = kmalloc(size, GFP_KERNEL);
++ struct seq_file *m;
++ int res;
++
++ if (!buf)
++ return -ENOMEM;
++ res = single_open(file, show_schedstat, NULL);
++ if (!res) {
++ m = file->private_data;
++ m->buf = buf;
++ m->size = size;
++ } else
++ kfree(buf);
++ return res;
++}
++
++const struct file_operations proc_schedstat_operations = {
++ .open = schedstat_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++/*
++ * Expects runqueue lock to be held for atomicity of update
++ */
++static inline void
++rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
++{
++ if (rq) {
++ rq->rq_sched_info.run_delay += delta_jiffies;
++ rq->rq_sched_info.pcnt++;
++ }
++}
++
++/*
++ * Expects runqueue lock to be held for atomicity of update
++ */
++static inline void
++rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
++{
++ if (rq)
++ rq->rq_sched_info.cpu_time += delta_jiffies;
++}
++# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
++# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
++#else /* !CONFIG_SCHEDSTATS */
++static inline void
++rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
++{}
++static inline void
++rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
++{}
++# define schedstat_inc(rq, field) do { } while (0)
++# define schedstat_add(rq, field, amt) do { } while (0)
++#endif
++
++/*
++ * this_rq_lock - lock this runqueue and disable interrupts.
++ */
++static inline struct rq *this_rq_lock(void)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ local_irq_disable();
++ rq = this_rq();
++ spin_lock(&rq->lock);
++
++ return rq;
++}
++
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++/*
++ * Called when a process is dequeued from the active array and given
++ * the cpu. We should note that with the exception of interactive
++ * tasks, the expired queue will become the active queue after the active
++ * queue is empty, without explicitly dequeuing and requeuing tasks in the
++ * expired queue. (Interactive tasks may be requeued directly to the
++ * active queue, thus delaying tasks in the expired queue from running;
++ * see scheduler_tick()).
++ *
++ * This function is only called from sched_info_arrive(), rather than
++ * dequeue_task(). Even though a task may be queued and dequeued multiple
++ * times as it is shuffled about, we're really interested in knowing how
++ * long it was from the *first* time it was queued to the time that it
++ * finally hit a cpu.
++ */
++static inline void sched_info_dequeued(struct task_struct *t)
++{
++ t->sched_info.last_queued = 0;
++}
++
++/*
++ * Called when a task finally hits the cpu. We can now calculate how
++ * long it was waiting to run. We also note when it began so that we
++ * can keep stats on how long its timeslice is.
++ */
++static void sched_info_arrive(struct task_struct *t)
++{
++ unsigned long now = jiffies, delta_jiffies = 0;
++
++ if (t->sched_info.last_queued)
++ delta_jiffies = now - t->sched_info.last_queued;
++ sched_info_dequeued(t);
++ t->sched_info.run_delay += delta_jiffies;
++ t->sched_info.last_arrival = now;
++ t->sched_info.pcnt++;
++
++ rq_sched_info_arrive(task_rq(t), delta_jiffies);
++}
++
++/*
++ * Called when a process is queued into either the active or expired
++ * array. The time is noted and later used to determine how long we
++ * had to wait for us to reach the cpu. Since the expired queue will
++ * become the active queue after active queue is empty, without dequeuing
++ * and requeuing any tasks, we are interested in queuing to either. It
++ * is unusual but not impossible for tasks to be dequeued and immediately
++ * requeued in the same or another array: this can happen in sched_yield(),
++ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
++ * to runqueue.
++ *
++ * This function is only called from enqueue_task(), but also only updates
++ * the timestamp if it is already not set. It's assumed that
++ * sched_info_dequeued() will clear that stamp when appropriate.
++ */
++static inline void sched_info_queued(struct task_struct *t)
++{
++ if (unlikely(sched_info_on()))
++ if (!t->sched_info.last_queued)
++ t->sched_info.last_queued = jiffies;
++}
++
++/*
++ * Called when a process ceases being the active-running process, either
++ * voluntarily or involuntarily. Now we can calculate how long we ran.
++ */
++static inline void sched_info_depart(struct task_struct *t)
++{
++ unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
++
++ t->sched_info.cpu_time += delta_jiffies;
++ rq_sched_info_depart(task_rq(t), delta_jiffies);
++}
++
++/*
++ * Called when tasks are switched involuntarily due, typically, to expiring
++ * their time slice. (This may also be called when switching to or from
++ * the idle task.) We are only called when prev != next.
++ */
++static inline void
++__sched_info_switch(struct task_struct *prev, struct task_struct *next)
++{
++ struct rq *rq = task_rq(prev);
++
++ /*
++ * prev now departs the cpu. It's not interesting to record
++ * stats about how efficient we were at scheduling the idle
++ * process, however.
++ */
++ if (prev != rq->idle)
++ sched_info_depart(prev);
++
++ if (next != rq->idle)
++ sched_info_arrive(next);
++}
++static inline void
++sched_info_switch(struct task_struct *prev, struct task_struct *next)
++{
++ if (unlikely(sched_info_on()))
++ __sched_info_switch(prev, next);
++}
++#else
++#define sched_info_queued(t) do { } while (0)
++#define sched_info_switch(t, next) do { } while (0)
++#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
++
++/*
++ * Adding/removing a task to/from a priority array:
++ */
++static void dequeue_task(struct task_struct *p, struct prio_array *array)
++{
++ BUG_ON(p->state & TASK_ONHOLD);
++ array->nr_active--;
++ list_del(&p->run_list);
++ if (list_empty(array->queue + p->prio))
++ __clear_bit(p->prio, array->bitmap);
++}
++
++static void enqueue_task(struct task_struct *p, struct prio_array *array)
++{
++ BUG_ON(p->state & TASK_ONHOLD);
++ sched_info_queued(p);
++ list_add_tail(&p->run_list, array->queue + p->prio);
++ __set_bit(p->prio, array->bitmap);
++ array->nr_active++;
++ p->array = array;
++}
++
++/*
++ * Put task to the end of the run list without the overhead of dequeue
++ * followed by enqueue.
++ */
++static void requeue_task(struct task_struct *p, struct prio_array *array)
++{
++ BUG_ON(p->state & TASK_ONHOLD);
++ list_move_tail(&p->run_list, array->queue + p->prio);
++}
++
++static inline void
++enqueue_task_head(struct task_struct *p, struct prio_array *array)
++{
++ BUG_ON(p->state & TASK_ONHOLD);
++ list_add(&p->run_list, array->queue + p->prio);
++ __set_bit(p->prio, array->bitmap);
++ array->nr_active++;
++ p->array = array;
++}
++
++/*
++ * __normal_prio - return the priority that is based on the static
++ * priority but is modified by bonuses/penalties.
++ *
++ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
++ * into the -5 ... 0 ... +5 bonus/penalty range.
++ *
++ * We use 25% of the full 0...39 priority range so that:
++ *
++ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
++ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
++ *
++ * Both properties are important to certain workloads.
++ */
++
++static inline int __normal_prio(struct task_struct *p)
++{
++ int bonus, prio;
++
++ bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
++
++ prio = p->static_prio - bonus;
++
++ /* adjust effective priority */
++ prio = vx_adjust_prio(p, prio, MAX_USER_PRIO);
++
++ if (prio < MAX_RT_PRIO)
++ prio = MAX_RT_PRIO;
++ if (prio > MAX_PRIO-1)
++ prio = MAX_PRIO-1;
++ return prio;
++}
++
++/*
++ * To aid in avoiding the subversion of "niceness" due to uneven distribution
++ * of tasks with abnormal "nice" values across CPUs the contribution that
++ * each task makes to its run queue's load is weighted according to its
++ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
++ * scaled version of the new time slice allocation that they receive on time
++ * slice expiry etc.
++ */
++
++/*
++ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
++ * If static_prio_timeslice() is ever changed to break this assumption then
++ * this code will need modification
++ */
++#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
++#define LOAD_WEIGHT(lp) \
++ (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
++#define PRIO_TO_LOAD_WEIGHT(prio) \
++ LOAD_WEIGHT(static_prio_timeslice(prio))
++#define RTPRIO_TO_LOAD_WEIGHT(rp) \
++ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
++
++static void set_load_weight(struct task_struct *p)
++{
++ if (has_rt_policy(p)) {
++#ifdef CONFIG_SMP
++ if (p == task_rq(p)->migration_thread)
++ /*
++ * The migration thread does the actual balancing.
++ * Giving its load any weight will skew balancing
++ * adversely.
++ */
++ p->load_weight = 0;
++ else
++#endif
++ p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
++ } else
++ p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
++}
++
++static inline void
++inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
++{
++ rq->raw_weighted_load += p->load_weight;
++}
++
++static inline void
++dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
++{
++ rq->raw_weighted_load -= p->load_weight;
++}
++
++static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
++{
++ rq->nr_running++;
++ inc_raw_weighted_load(rq, p);
++}
++
++static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
++{
++ rq->nr_running--;
++ dec_raw_weighted_load(rq, p);
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++ int prio;
++
++ if (has_rt_policy(p))
++ prio = MAX_RT_PRIO-1 - p->rt_priority;
++ else
++ prio = __normal_prio(p);
++ return prio;
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks, or might be boosted by
++ * interactivity modifiers. Will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++ p->normal_prio = normal_prio(p);
++ /*
++ * If we are RT tasks or we were boosted to RT priority,
++ * keep the priority unchanged. Otherwise, update priority
++ * to the normal priority:
++ */
++ if (!rt_prio(p->prio))
++ return p->normal_prio;
++ return p->prio;
++}
++
++#include "sched_mon.h"
++
++
++/*
++ * __activate_task - move a task to the runqueue.
++ */
++static void __activate_task(struct task_struct *p, struct rq *rq)
++{
++ struct prio_array *target = rq->active;
++
++ if (batch_task(p))
++ target = rq->expired;
++ vxm_activate_task(p, rq);
++ enqueue_task(p, target);
++ inc_nr_running(p, rq);
++}
++
++/*
++ * __activate_idle_task - move idle task to the _front_ of runqueue.
++ */
++static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
++{
++ vxm_activate_idle(p, rq);
++ enqueue_task_head(p, rq->active);
++ inc_nr_running(p, rq);
++}
++
++/*
++ * Recalculate p->normal_prio and p->prio after having slept,
++ * updating the sleep-average too:
++ */
++static int recalc_task_prio(struct task_struct *p, unsigned long long now)
++{
++ /* Caller must always ensure 'now >= p->timestamp' */
++ unsigned long sleep_time = now - p->timestamp;
++
++ if (batch_task(p))
++ sleep_time = 0;
++
++ if (likely(sleep_time > 0)) {
++ /*
++ * This ceiling is set to the lowest priority that would allow
++ * a task to be reinserted into the active array on timeslice
++ * completion.
++ */
++ unsigned long ceiling = INTERACTIVE_SLEEP(p);
++
++ if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
++ /*
++ * Prevents user tasks from achieving best priority
++ * with one single large enough sleep.
++ */
++ p->sleep_avg = ceiling;
++ /*
++ * Using INTERACTIVE_SLEEP() as a ceiling places a
++ * nice(0) task 1ms sleep away from promotion, and
++ * gives it 700ms to round-robin with no chance of
++ * being demoted. This is more than generous, so
++ * mark this sleep as non-interactive to prevent the
++ * on-runqueue bonus logic from intervening should
++ * this task not receive cpu immediately.
++ */
++ p->sleep_type = SLEEP_NONINTERACTIVE;
++ } else {
++ /*
++ * Tasks waking from uninterruptible sleep are
++ * limited in their sleep_avg rise as they
++ * are likely to be waiting on I/O
++ */
++ if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
++ if (p->sleep_avg >= ceiling)
++ sleep_time = 0;
++ else if (p->sleep_avg + sleep_time >=
++ ceiling) {
++ p->sleep_avg = ceiling;
++ sleep_time = 0;
++ }
++ }
++
++ /*
++ * This code gives a bonus to interactive tasks.
++ *
++ * The boost works by updating the 'average sleep time'
++ * value here, based on ->timestamp. The more time a
++ * task spends sleeping, the higher the average gets -
++ * and the higher the priority boost gets as well.
++ */
++ p->sleep_avg += sleep_time;
++
++ }
++ if (p->sleep_avg > NS_MAX_SLEEP_AVG)
++ p->sleep_avg = NS_MAX_SLEEP_AVG;
++ }
++
++ return effective_prio(p);
++}
++
++/*
++ * activate_task - move a task to the runqueue and do priority recalculation
++ *
++ * Update all the scheduling statistics stuff. (sleep average
++ * calculation, priority modifiers, etc.)
++ */
++static void activate_task(struct task_struct *p, struct rq *rq, int local)
++{
++ unsigned long long now;
++
++ if (rt_task(p))
++ goto out;
++
++ now = sched_clock();
++#ifdef CONFIG_SMP
++ if (!local) {
++ /* Compensate for drifting sched_clock */
++ struct rq *this_rq = this_rq();
++ now = (now - this_rq->most_recent_timestamp)
++ + rq->most_recent_timestamp;
++ }
++#endif
++
++ /*
++ * Sleep time is in units of nanosecs, so shift by 20 to get a
++ * milliseconds-range estimation of the amount of time that the task
++ * spent sleeping:
++ */
++ if (unlikely(prof_on == SLEEP_PROFILING)) {
++ if (p->state == TASK_UNINTERRUPTIBLE)
++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
++ (now - p->timestamp) >> 20);
++ }
++
++ p->prio = recalc_task_prio(p, now);
++
++ /*
++ * This checks to make sure it's not an uninterruptible task
++ * that is now waking up.
++ */
++ if (p->sleep_type == SLEEP_NORMAL) {
++ /*
++ * Tasks which were woken up by interrupts (ie. hw events)
++ * are most likely of interactive nature. So we give them
++ * the credit of extending their sleep time to the period
++ * of time they spend on the runqueue, waiting for execution
++ * on a CPU, first time around:
++ */
++ if (in_interrupt())
++ p->sleep_type = SLEEP_INTERRUPTED;
++ else {
++ /*
++ * Normal first-time wakeups get a credit too for
++ * on-runqueue time, but it will be weighted down:
++ */
++ p->sleep_type = SLEEP_INTERACTIVE;
++ }
++ }
++ p->timestamp = now;
++out:
++ vx_activate_task(p);
++ __activate_task(p, rq);
++}
++
++/*
++ * __deactivate_task - remove a task from the runqueue.
++ */
++static void __deactivate_task(struct task_struct *p, struct rq *rq)
++{
++ dec_nr_running(p, rq);
++ dequeue_task(p, p->array);
++ vxm_deactivate_task(p, rq);
++ p->array = NULL;
++}
++
++static inline
++void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++ vx_deactivate_task(p);
++ __deactivate_task(p, rq);
++}
++
++#include "sched_hard.h"
++
++/*
++ * resched_task - mark a task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++#ifdef CONFIG_SMP
++
++#ifndef tsk_is_polling
++#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
++#endif
++
++static void resched_task(struct task_struct *p)
++{
++ int cpu;
++
++ assert_spin_locked(&task_rq(p)->lock);
++
++ if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
++ return;
++
++ set_tsk_thread_flag(p, TIF_NEED_RESCHED);
++
++ cpu = task_cpu(p);
++ if (cpu == smp_processor_id())
++ return;
++
++ /* NEED_RESCHED must be visible before we test polling */
++ smp_mb();
++ if (!tsk_is_polling(p))
++ smp_send_reschedule(cpu);
++}
++
++static void resched_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ if (!spin_trylock_irqsave(&rq->lock, flags))
++ return;
++ resched_task(cpu_curr(cpu));
++ spin_unlock_irqrestore(&rq->lock, flags);
++}
++#else
++static inline void resched_task(struct task_struct *p)
++{
++ assert_spin_locked(&task_rq(p)->lock);
++ set_tsk_need_resched(p);
++}
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++ return cpu_curr(task_cpu(p)) == p;
++}
++
++/* Used instead of source_load when we know the type == 0 */
++unsigned long weighted_cpuload(const int cpu)
++{
++ return cpu_rq(cpu)->raw_weighted_load;
++}
++
++#ifdef CONFIG_SMP
++struct migration_req {
++ struct list_head list;
++
++ struct task_struct *task;
++ int dest_cpu;
++
++ struct completion done;
++};
++
++/*
++ * The task's runqueue lock must be held.
++ * Returns true if you have to wait for migration thread.
++ */
++static int
++migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
++{
++ struct rq *rq = task_rq(p);
++
++ vxm_migrate_task(p, rq, dest_cpu);
++ /*
++ * If the task is not on a runqueue (and not running), then
++ * it is sufficient to simply update the task's cpu field.
++ */
++ if (!p->array && !task_running(rq, p)) {
++ set_task_cpu(p, dest_cpu);
++ return 0;
++ }
++
++ init_completion(&req->done);
++ req->task = p;
++ req->dest_cpu = dest_cpu;
++ list_add(&req->list, &rq->migration_queue);
++
++ return 1;
++}
++
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++void wait_task_inactive(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++ struct prio_array *array;
++ int running;
++
++repeat:
++ /*
++ * We do the initial early heuristics without holding
++ * any task-queue locks at all. We'll only try to get
++ * the runqueue lock when things look like they will
++ * work out!
++ */
++ rq = task_rq(p);
++
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since "task_running()" will
++ * return false if the runqueue has changed and p
++ * is actually now running somewhere else!
++ */
++ while (task_running(rq, p))
++ cpu_relax();
++
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ running = task_running(rq, p);
++ array = p->array;
++ task_rq_unlock(rq, &flags);
++
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ goto repeat;
++ }
++
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it wa still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(array)) {
++ yield();
++ goto repeat;
++ }
++
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesnt have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++ int cpu;
++
++ preempt_disable();
++ cpu = task_cpu(p);
++ if ((cpu != smp_processor_id()) && task_curr(p))
++ smp_send_reschedule(cpu);
++ preempt_enable();
++}
++
++/*
++ * Return a low guess at the load of a migration-source cpu weighted
++ * according to the scheduling class and "nice" value.
++ *
++ * We want to under-estimate the load of migration sources, to
++ * balance conservatively.
++ */
++static inline unsigned long source_load(int cpu, int type)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (type == 0)
++ return rq->raw_weighted_load;
++
++ return min(rq->cpu_load[type-1], rq->raw_weighted_load);
++}
++
++/*
++ * Return a high guess at the load of a migration-target cpu weighted
++ * according to the scheduling class and "nice" value.
++ */
++static inline unsigned long target_load(int cpu, int type)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (type == 0)
++ return rq->raw_weighted_load;
++
++ return max(rq->cpu_load[type-1], rq->raw_weighted_load);
++}
++
++/*
++ * Return the average load per task on the cpu's run queue
++ */
++static inline unsigned long cpu_avg_load_per_task(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long n = rq->nr_running;
++
++ return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
++}
++
++/*
++ * find_idlest_group finds and returns the least busy CPU group within the
++ * domain.
++ */
++static struct sched_group *
++find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
++{
++ struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
++ unsigned long min_load = ULONG_MAX, this_load = 0;
++ int load_idx = sd->forkexec_idx;
++ int imbalance = 100 + (sd->imbalance_pct-100)/2;
++
++ do {
++ unsigned long load, avg_load;
++ int local_group;
++ int i;
++
++ /* Skip over this group if it has no CPUs allowed */
++ if (!cpus_intersects(group->cpumask, p->cpus_allowed))
++ goto nextgroup;
++
++ local_group = cpu_isset(this_cpu, group->cpumask);
++
++ /* Tally up the load of all CPUs in the group */
++ avg_load = 0;
++
++ for_each_cpu_mask(i, group->cpumask) {
++ /* Bias balancing toward cpus of our domain */
++ if (local_group)
++ load = source_load(i, load_idx);
++ else
++ load = target_load(i, load_idx);
++
++ avg_load += load;
++ }
++
++ /* Adjust by relative CPU power of the group */
++ avg_load = sg_div_cpu_power(group,
++ avg_load * SCHED_LOAD_SCALE);
++
++ if (local_group) {
++ this_load = avg_load;
++ this = group;
++ } else if (avg_load < min_load) {
++ min_load = avg_load;
++ idlest = group;
++ }
++nextgroup:
++ group = group->next;
++ } while (group != sd->groups);
++
++ if (!idlest || 100*this_load < imbalance*min_load)
++ return NULL;
++ return idlest;
++}
++
++/*
++ * find_idlest_cpu - find the idlest cpu among the cpus in group.
++ */
++static int
++find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
++{
++ cpumask_t tmp;
++ unsigned long load, min_load = ULONG_MAX;
++ int idlest = -1;
++ int i;
++
++ /* Traverse only the allowed CPUs */
++ cpus_and(tmp, group->cpumask, p->cpus_allowed);
++
++ for_each_cpu_mask(i, tmp) {
++ load = weighted_cpuload(i);
++
++ if (load < min_load || (load == min_load && i == this_cpu)) {
++ min_load = load;
++ idlest = i;
++ }
++ }
++
++ return idlest;
++}
++
++/*
++ * sched_balance_self: balance the current task (running on cpu) in domains
++ * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
++ * SD_BALANCE_EXEC.
++ *
++ * Balance, ie. select the least loaded group.
++ *
++ * Returns the target CPU number, or the same CPU if no balancing is needed.
++ *
++ * preempt must be disabled.
++ */
++static int sched_balance_self(int cpu, int flag)
++{
++ struct task_struct *t = current;
++ struct sched_domain *tmp, *sd = NULL;
++
++ for_each_domain(cpu, tmp) {
++ /*
++ * If power savings logic is enabled for a domain, stop there.
++ */
++ if (tmp->flags & SD_POWERSAVINGS_BALANCE)
++ break;
++ if (tmp->flags & flag)
++ sd = tmp;
++ }
++
++ while (sd) {
++ cpumask_t span;
++ struct sched_group *group;
++ int new_cpu, weight;
++
++ if (!(sd->flags & flag)) {
++ sd = sd->child;
++ continue;
++ }
++
++ span = sd->span;
++ group = find_idlest_group(sd, t, cpu);
++ if (!group) {
++ sd = sd->child;
++ continue;
++ }
++
++ new_cpu = find_idlest_cpu(group, t, cpu);
++ if (new_cpu == -1 || new_cpu == cpu) {
++ /* Now try balancing at a lower domain level of cpu */
++ sd = sd->child;
++ continue;
++ }
++
++ /* Now try balancing at a lower domain level of new_cpu */
++ cpu = new_cpu;
++ sd = NULL;
++ weight = cpus_weight(span);
++ for_each_domain(cpu, tmp) {
++ if (weight <= cpus_weight(tmp->span))
++ break;
++ if (tmp->flags & flag)
++ sd = tmp;
++ }
++ /* while loop will break here if sd == NULL */
++ }
++
++ return cpu;
++}
++
++#endif /* CONFIG_SMP */
++
++/*
++ * wake_idle() will wake a task on an idle cpu if task->cpu is
++ * not idle and an idle cpu is available. The span of cpus to
++ * search starts with cpus closest then further out as needed,
++ * so we always favor a closer, idle cpu.
++ *
++ * Returns the CPU we should wake onto.
++ */
++#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
++static int wake_idle(int cpu, struct task_struct *p)
++{
++ cpumask_t tmp;
++ struct sched_domain *sd;
++ int i;
++
++ /*
++ * If it is idle, then it is the best cpu to run this task.
++ *
++ * This cpu is also the best, if it has more than one task already.
++ * Siblings must be also busy(in most cases) as they didn't already
++ * pickup the extra load from this cpu and hence we need not check
++ * sibling runqueue info. This will avoid the checks and cache miss
++ * penalities associated with that.
++ */
++ if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
++ return cpu;
++
++ for_each_domain(cpu, sd) {
++ if (sd->flags & SD_WAKE_IDLE) {
++ cpus_and(tmp, sd->span, p->cpus_allowed);
++ for_each_cpu_mask(i, tmp) {
++ if (idle_cpu(i))
++ return i;
++ }
++ }
++ else
++ break;
++ }
++ return cpu;
++}
++#else
++static inline int wake_idle(int cpu, struct task_struct *p)
++{
++ return cpu;
++}
++#endif
++
++/***
++ * try_to_wake_up - wake up a thread
++ * @p: the to-be-woken-up thread
++ * @state: the mask of task states that can be woken
++ * @sync: do a synchronous wakeup?
++ *
++ * Put it on the run-queue if it's not already there. The "current"
++ * thread is always on the run-queue (except when the actual
++ * re-schedule is in progress), and as such you're allowed to do
++ * the simpler "current->state = TASK_RUNNING" to mark yourself
++ * runnable without the overhead of this.
++ *
++ * returns failure only if the task is already active.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
++{
++ int cpu, this_cpu, success = 0;
++ unsigned long flags;
++ long old_state;
++ struct rq *rq;
++#ifdef CONFIG_SMP
++ struct sched_domain *sd, *this_sd = NULL;
++ unsigned long load, this_load;
++ int new_cpu;
++#endif
++
++ rq = task_rq_lock(p, &flags);
++ old_state = p->state;
++
++ /* we need to unhold suspended tasks */
++ if (old_state & TASK_ONHOLD) {
++ vx_unhold_task(p, rq);
++ old_state = p->state;
++ }
++ if (!(old_state & state))
++ goto out;
++
++ if (p->array)
++ goto out_running;
++
++ cpu = task_cpu(p);
++ this_cpu = smp_processor_id();
++
++#ifdef CONFIG_SMP
++ if (unlikely(task_running(rq, p)))
++ goto out_activate;
++
++ new_cpu = cpu;
++
++ schedstat_inc(rq, ttwu_cnt);
++ if (cpu == this_cpu) {
++ schedstat_inc(rq, ttwu_local);
++ goto out_set_cpu;
++ }
++
++ for_each_domain(this_cpu, sd) {
++ if (cpu_isset(cpu, sd->span)) {
++ schedstat_inc(sd, ttwu_wake_remote);
++ this_sd = sd;
++ break;
++ }
++ }
++
++ if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
++ goto out_set_cpu;
++
++ /*
++ * Check for affine wakeup and passive balancing possibilities.
++ */
++ if (this_sd) {
++ int idx = this_sd->wake_idx;
++ unsigned int imbalance;
++
++ imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
++
++ load = source_load(cpu, idx);
++ this_load = target_load(this_cpu, idx);
++
++ new_cpu = this_cpu; /* Wake to this CPU if we can */
++
++ if (this_sd->flags & SD_WAKE_AFFINE) {
++ unsigned long tl = this_load;
++ unsigned long tl_per_task;
++
++ tl_per_task = cpu_avg_load_per_task(this_cpu);
++
++ /*
++ * If sync wakeup then subtract the (maximum possible)
++ * effect of the currently running task from the load
++ * of the current CPU:
++ */
++ if (sync)
++ tl -= current->load_weight;
++
++ if ((tl <= load &&
++ tl + target_load(cpu, idx) <= tl_per_task) ||
++ 100*(tl + p->load_weight) <= imbalance*load) {
++ /*
++ * This domain has SD_WAKE_AFFINE and
++ * p is cache cold in this domain, and
++ * there is no bad imbalance.
++ */
++ schedstat_inc(this_sd, ttwu_move_affine);
++ goto out_set_cpu;
++ }
++ }
++
++ /*
++ * Start passive balancing when half the imbalance_pct
++ * limit is reached.
++ */
++ if (this_sd->flags & SD_WAKE_BALANCE) {
++ if (imbalance*this_load <= 100*load) {
++ schedstat_inc(this_sd, ttwu_move_balance);
++ goto out_set_cpu;
++ }
++ }
++ }
++
++ new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
++out_set_cpu:
++ new_cpu = wake_idle(new_cpu, p);
++ if (new_cpu != cpu) {
++ set_task_cpu(p, new_cpu);
++ task_rq_unlock(rq, &flags);
++ /* might preempt at this point */
++ rq = task_rq_lock(p, &flags);
++ old_state = p->state;
++ if (!(old_state & state))
++ goto out;
++ if (p->array)
++ goto out_running;
++
++ this_cpu = smp_processor_id();
++ cpu = task_cpu(p);
++ }
++
++out_activate:
++#endif /* CONFIG_SMP */
++ if (old_state == TASK_UNINTERRUPTIBLE) {
++ rq->nr_uninterruptible--;
++ vx_uninterruptible_dec(p);
++ /*
++ * Tasks on involuntary sleep don't earn
++ * sleep_avg beyond just interactive state.
++ */
++ p->sleep_type = SLEEP_NONINTERACTIVE;
++ } else
++
++ /*
++ * Tasks that have marked their sleep as noninteractive get
++ * woken up with their sleep average not weighted in an
++ * interactive way.
++ */
++ if (old_state & TASK_NONINTERACTIVE)
++ p->sleep_type = SLEEP_NONINTERACTIVE;
++
++
++ activate_task(p, rq, cpu == this_cpu);
++ /*
++ * Sync wakeups (i.e. those types of wakeups where the waker
++ * has indicated that it will leave the CPU in short order)
++ * don't trigger a preemption, if the woken up task will run on
++ * this cpu. (in this case the 'I will reschedule' promise of
++ * the waker guarantees that the freshly woken up task is going
++ * to be considered on this CPU.)
++ */
++ if (!sync || cpu != this_cpu) {
++ if (TASK_PREEMPTS_CURR(p, rq))
++ resched_task(rq->curr);
++ }
++ success = 1;
++
++out_running:
++ p->state = TASK_RUNNING;
++out:
++ task_rq_unlock(rq, &flags);
++
++ return success;
++}
++
++int fastcall wake_up_process(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
++ TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int fastcall wake_up_state(struct task_struct *p, unsigned int state)
++{
++ return try_to_wake_up(p, state, 0);
++}
++
++static void task_running_tick(struct rq *rq, struct task_struct *p, int cpu);
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ */
++void fastcall sched_fork(struct task_struct *p, int clone_flags)
++{
++ int cpu = get_cpu();
++
++#ifdef CONFIG_SMP
++ cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
++#endif
++ set_task_cpu(p, cpu);
++
++ /*
++ * We mark the process as running here, but have not actually
++ * inserted it onto the runqueue yet. This guarantees that
++ * nobody will actually run it, and a signal or other external
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_RUNNING;
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child:
++ */
++ p->prio = current->normal_prio;
++
++ INIT_LIST_HEAD(&p->run_list);
++ p->array = NULL;
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++ if (unlikely(sched_info_on()))
++ memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
++ p->oncpu = 0;
++#endif
++#ifdef CONFIG_PREEMPT
++ /* Want to start with kernel preemption disabled. */
++ task_thread_info(p)->preempt_count = 1;
++#endif
++ /*
++ * Share the timeslice between parent and child, thus the
++ * total amount of pending timeslices in the system doesn't change,
++ * resulting in more scheduling fairness.
++ */
++ local_irq_disable();
++ p->time_slice = (current->time_slice + 1) >> 1;
++ /*
++ * The remainder of the first timeslice might be recovered by
++ * the parent if the child exits early enough.
++ */
++ p->first_time_slice = 1;
++ current->time_slice >>= 1;
++ p->timestamp = sched_clock();
++ if (unlikely(!current->time_slice)) {
++ /*
++ * This case is rare, it happens when the parent has only
++ * a single jiffy left from its timeslice. Taking the
++ * runqueue lock is not a problem.
++ */
++ current->time_slice = 1;
++ task_running_tick(cpu_rq(cpu), current, cpu);
++ }
++ local_irq_enable();
++ put_cpu();
++}
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
++{
++ struct rq *rq, *this_rq;
++ unsigned long flags;
++ int this_cpu, cpu;
++
++ rq = task_rq_lock(p, &flags);
++ BUG_ON(p->state != TASK_RUNNING);
++ this_cpu = smp_processor_id();
++ cpu = task_cpu(p);
++
++ /*
++ * We decrease the sleep average of forking parents
++ * and children as well, to keep max-interactive tasks
++ * from forking tasks that are max-interactive. The parent
++ * (current) is done further down, under its lock.
++ */
++ p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
++ CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
++
++ p->prio = effective_prio(p);
++
++ vx_activate_task(p);
++ if (likely(cpu == this_cpu)) {
++ if (!(clone_flags & CLONE_VM)) {
++ /*
++ * The VM isn't cloned, so we're in a good position to
++ * do child-runs-first in anticipation of an exec. This
++ * usually avoids a lot of COW overhead.
++ */
++ if (unlikely(!current->array))
++ __activate_task(p, rq);
++ else {
++ p->prio = current->prio;
++ BUG_ON(p->state & TASK_ONHOLD);
++ p->normal_prio = current->normal_prio;
++ list_add_tail(&p->run_list, ¤t->run_list);
++ p->array = current->array;
++ p->array->nr_active++;
++ inc_nr_running(p, rq);
++ }
++ set_need_resched();
++ } else
++ /* Run child last */
++ __activate_task(p, rq);
++ /*
++ * We skip the following code due to cpu == this_cpu
++ *
++ * task_rq_unlock(rq, &flags);
++ * this_rq = task_rq_lock(current, &flags);
++ */
++ this_rq = rq;
++ } else {
++ this_rq = cpu_rq(this_cpu);
++
++ /*
++ * Not the local CPU - must adjust timestamp. This should
++ * get optimised away in the !CONFIG_SMP case.
++ */
++ p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
++ + rq->most_recent_timestamp;
++ __activate_task(p, rq);
++ if (TASK_PREEMPTS_CURR(p, rq))
++ resched_task(rq->curr);
++
++ /*
++ * Parent and child are on different CPUs, now get the
++ * parent runqueue to update the parent's ->sleep_avg:
++ */
++ task_rq_unlock(rq, &flags);
++ this_rq = task_rq_lock(current, &flags);
++ }
++ current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
++ PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
++ task_rq_unlock(this_rq, &flags);
++}
++
++/*
++ * Potentially available exiting-child timeslices are
++ * retrieved here - this way the parent does not get
++ * penalized for creating too many threads.
++ *
++ * (this cannot be used to 'generate' timeslices
++ * artificially, because any timeslice recovered here
++ * was given away by the parent in the first place.)
++ */
++void fastcall sched_exit(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++
++ /*
++ * If the child was a (relative-) CPU hog then decrease
++ * the sleep_avg of the parent as well.
++ */
++ rq = task_rq_lock(p->parent, &flags);
++ if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
++ p->parent->time_slice += p->time_slice;
++ if (unlikely(p->parent->time_slice > task_timeslice(p)))
++ p->parent->time_slice = task_timeslice(p);
++ }
++ if (p->sleep_avg < p->parent->sleep_avg)
++ p->parent->sleep_avg = p->parent->sleep_avg /
++ (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
++ (EXIT_WEIGHT + 1);
++ task_rq_unlock(rq, &flags);
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
++{
++ prepare_lock_switch(rq, next);
++ prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock. (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ */
++static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct mm_struct *mm = rq->prev_mm;
++ long prev_state;
++
++ rq->prev_mm = NULL;
++
++ /*
++ * A task struct has one reference for the use as "current".
++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++ * schedule one last time. The schedule call will never return, and
++ * the scheduled task must drop that reference.
++ * The test for TASK_DEAD must occur while the runqueue locks are
++ * still held, otherwise prev could be scheduled on another cpu, die
++ * there before we look at prev->state, and then the reference would
++ * be dropped twice.
++ * Manfred Spraul <manfred@colorfullife.com>
++ */
++ prev_state = prev->state;
++ finish_arch_switch(prev);
++ finish_lock_switch(rq, prev);
++ if (mm)
++ mmdrop(mm);
++ if (unlikely(prev_state == TASK_DEAD)) {
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(prev);
++ put_task_struct(prev);
++ }
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage void schedule_tail(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq = this_rq();
++
++ finish_task_switch(rq, prev);
++#ifdef __ARCH_WANT_UNLOCKED_CTXSW
++ /* In this case, finish_task_switch does not reenable preemption */
++ preempt_enable();
++#endif
++ if (current->set_child_tid)
++ put_user(current->pid, current->set_child_tid);
++}
++
++/*
++ * context_switch - switch to the new MM and the new
++ * thread's register state.
++ */
++static inline struct task_struct *
++context_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct mm_struct *mm = next->mm;
++ struct mm_struct *oldmm = prev->active_mm;
++
++ /*
++ * For paravirt, this is coupled with an exit in switch_to to
++ * combine the page table reload and the switch backend into
++ * one hypercall.
++ */
++ arch_enter_lazy_cpu_mode();
++
++ if (!mm) {
++ next->active_mm = oldmm;
++ atomic_inc(&oldmm->mm_count);
++ enter_lazy_tlb(oldmm, next);
++ } else
++ switch_mm(oldmm, mm, next);
++
++ if (!prev->mm) {
++ prev->active_mm = NULL;
++ WARN_ON(rq->prev_mm);
++ rq->prev_mm = oldmm;
++ }
++ /*
++ * Since the runqueue lock will be released by the next
++ * task (which is an invalid locking op but in the case
++ * of the scheduler it's an obvious special-case), so we
++ * do an early lockdep release here:
++ */
++#ifndef __ARCH_WANT_UNLOCKED_CTXSW
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++#endif
++
++ /* Here we just switch the register state and the stack. */
++ switch_to(prev, next, prev);
++
++ return prev;
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, current number of uninterruptible-sleeping threads, total
++ * number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_running;
++
++ return sum;
++}
++
++unsigned long nr_uninterruptible(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_uninterruptible;
++
++ /*
++ * Since we read the counters lockless, it might be slightly
++ * inaccurate. Do not allow it to go below zero though:
++ */
++ if (unlikely((long)sum < 0))
++ sum = 0;
++
++ return sum;
++}
++
++unsigned long long nr_context_switches(void)
++{
++ int i;
++ unsigned long long sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_switches;
++
++ return sum;
++}
++
++unsigned long nr_iowait(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += atomic_read(&cpu_rq(i)->nr_iowait);
++
++ return sum;
++}
++
++unsigned long nr_active(void)
++{
++ unsigned long i, running = 0, uninterruptible = 0;
++
++ for_each_online_cpu(i) {
++ running += cpu_rq(i)->nr_running;
++ uninterruptible += cpu_rq(i)->nr_uninterruptible;
++ }
++
++ if (unlikely((long)uninterruptible < 0))
++ uninterruptible = 0;
++
++ return running + uninterruptible;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * Is this task likely cache-hot:
++ */
++static inline int
++task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
++{
++ return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
++}
++
++/*
++ * double_rq_lock - safely lock two runqueues
++ *
++ * Note this does not disable interrupts like task_rq_lock,
++ * you need to do so manually before calling.
++ */
++static void double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ BUG_ON(!irqs_disabled());
++ if (rq1 == rq2) {
++ spin_lock(&rq1->lock);
++ __acquire(rq2->lock); /* Fake it out ;) */
++ } else {
++ if (rq1 < rq2) {
++ spin_lock(&rq1->lock);
++ spin_lock(&rq2->lock);
++ } else {
++ spin_lock(&rq2->lock);
++ spin_lock(&rq1->lock);
++ }
++ }
++}
++
++/*
++ * double_rq_unlock - safely unlock two runqueues
++ *
++ * Note this does not restore interrupts like task_rq_unlock,
++ * you need to do so manually after calling.
++ */
++static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
++ __releases(rq1->lock)
++ __releases(rq2->lock)
++{
++ spin_unlock(&rq1->lock);
++ if (rq1 != rq2)
++ spin_unlock(&rq2->lock);
++ else
++ __release(rq2->lock);
++}
++
++/*
++ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
++ */
++static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
++ __releases(this_rq->lock)
++ __acquires(busiest->lock)
++ __acquires(this_rq->lock)
++{
++ if (unlikely(!irqs_disabled())) {
++ /* printk() doesn't work good under rq->lock */
++ spin_unlock(&this_rq->lock);
++ BUG_ON(1);
++ }
++ if (unlikely(!spin_trylock(&busiest->lock))) {
++ if (busiest < this_rq) {
++ spin_unlock(&this_rq->lock);
++ spin_lock(&busiest->lock);
++ spin_lock(&this_rq->lock);
++ } else
++ spin_lock(&busiest->lock);
++ }
++}
++
++/*
++ * If dest_cpu is allowed for this process, migrate the task to it.
++ * This is accomplished by forcing the cpu_allowed mask to only
++ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
++ * the cpu_allowed mask is restored.
++ */
++static void sched_migrate_task(struct task_struct *p, int dest_cpu)
++{
++ struct migration_req req;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(p, &flags);
++ if (!cpu_isset(dest_cpu, p->cpus_allowed)
++ || unlikely(cpu_is_offline(dest_cpu)))
++ goto out;
++
++ /* force the process onto the specified CPU */
++ if (migrate_task(p, dest_cpu, &req)) {
++ /* Need to wait for migration thread (might exit: take ref). */
++ struct task_struct *mt = rq->migration_thread;
++
++ get_task_struct(mt);
++ task_rq_unlock(rq, &flags);
++ wake_up_process(mt);
++ put_task_struct(mt);
++ wait_for_completion(&req.done);
++
++ return;
++ }
++out:
++ task_rq_unlock(rq, &flags);
++}
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache footprint.
++ */
++void sched_exec(void)
++{
++ int new_cpu, this_cpu = get_cpu();
++ new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
++ put_cpu();
++ if (new_cpu != this_cpu)
++ sched_migrate_task(current, new_cpu);
++}
++
++/*
++ * pull_task - move a task from a remote runqueue to the local runqueue.
++ * Both runqueues must be locked.
++ */
++static void pull_task(struct rq *src_rq, struct prio_array *src_array,
++ struct task_struct *p, struct rq *this_rq,
++ struct prio_array *this_array, int this_cpu)
++{
++ dequeue_task(p, src_array);
++ dec_nr_running(p, src_rq);
++ set_task_cpu(p, this_cpu);
++ inc_nr_running(p, this_rq);
++ enqueue_task(p, this_array);
++ p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
++ + this_rq->most_recent_timestamp;
++ /*
++ * Note that idle threads have a prio of MAX_PRIO, for this test
++ * to be always true for them.
++ */
++ if (TASK_PREEMPTS_CURR(p, this_rq))
++ resched_task(this_rq->curr);
++}
++
++/*
++ * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
++ */
++static
++int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
++ struct sched_domain *sd, enum idle_type idle,
++ int *all_pinned)
++{
++ /*
++ * We do not migrate tasks that are:
++ * 1) running (obviously), or
++ * 2) cannot be migrated to this CPU due to cpus_allowed, or
++ * 3) are cache-hot on their current CPU.
++ */
++ if (!cpu_isset(this_cpu, p->cpus_allowed))
++ return 0;
++ *all_pinned = 0;
++
++ if (task_running(rq, p))
++ return 0;
++
++ /*
++ * Aggressive migration if:
++ * 1) task is cache cold, or
++ * 2) too many balance attempts have failed.
++ */
++
++ if (sd->nr_balance_failed > sd->cache_nice_tries) {
++#ifdef CONFIG_SCHEDSTATS
++ if (task_hot(p, rq->most_recent_timestamp, sd))
++ schedstat_inc(sd, lb_hot_gained[idle]);
++#endif
++ return 1;
++ }
++
++ if (task_hot(p, rq->most_recent_timestamp, sd))
++ return 0;
++ return 1;
++}
++
++#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
++
++/*
++ * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
++ * load from busiest to this_rq, as part of a balancing operation within
++ * "domain". Returns the number of tasks moved.
++ *
++ * Called with both runqueues locked.
++ */
++static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
++ unsigned long max_nr_move, unsigned long max_load_move,
++ struct sched_domain *sd, enum idle_type idle,
++ int *all_pinned)
++{
++ int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
++ best_prio_seen, skip_for_load;
++ struct prio_array *array, *dst_array;
++ struct list_head *head, *curr;
++ struct task_struct *tmp;
++ long rem_load_move;
++
++ if (max_nr_move == 0 || max_load_move == 0)
++ goto out;
++
++ rem_load_move = max_load_move;
++ pinned = 1;
++ this_best_prio = rq_best_prio(this_rq);
++ best_prio = rq_best_prio(busiest);
++ /*
++ * Enable handling of the case where there is more than one task
++ * with the best priority. If the current running task is one
++ * of those with prio==best_prio we know it won't be moved
++ * and therefore it's safe to override the skip (based on load) of
++ * any task we find with that prio.
++ */
++ best_prio_seen = best_prio == busiest->curr->prio;
++
++ /*
++ * We first consider expired tasks. Those will likely not be
++ * executed in the near future, and they are most likely to
++ * be cache-cold, thus switching CPUs has the least effect
++ * on them.
++ */
++ if (busiest->expired->nr_active) {
++ array = busiest->expired;
++ dst_array = this_rq->expired;
++ } else {
++ array = busiest->active;
++ dst_array = this_rq->active;
++ }
++
++new_array:
++ /* Start searching at priority 0: */
++ idx = 0;
++skip_bitmap:
++ if (!idx)
++ idx = sched_find_first_bit(array->bitmap);
++ else
++ idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
++ if (idx >= MAX_PRIO) {
++ if (array == busiest->expired && busiest->active->nr_active) {
++ array = busiest->active;
++ dst_array = this_rq->active;
++ goto new_array;
++ }
++ goto out;
++ }
++
++ head = array->queue + idx;
++ curr = head->prev;
++skip_queue:
++ tmp = list_entry(curr, struct task_struct, run_list);
++
++ curr = curr->prev;
++
++ /*
++ * To help distribute high priority tasks accross CPUs we don't
++ * skip a task if it will be the highest priority task (i.e. smallest
++ * prio value) on its new queue regardless of its load weight
++ */
++ skip_for_load = tmp->load_weight > rem_load_move;
++ if (skip_for_load && idx < this_best_prio)
++ skip_for_load = !best_prio_seen && idx == best_prio;
++ if (skip_for_load ||
++ !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
++
++ best_prio_seen |= idx == best_prio;
++ if (curr != head)
++ goto skip_queue;
++ idx++;
++ goto skip_bitmap;
++ }
++
++ pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
++ pulled++;
++ rem_load_move -= tmp->load_weight;
++
++ /*
++ * We only want to steal up to the prescribed number of tasks
++ * and the prescribed amount of weighted load.
++ */
++ if (pulled < max_nr_move && rem_load_move > 0) {
++ if (idx < this_best_prio)
++ this_best_prio = idx;
++ if (curr != head)
++ goto skip_queue;
++ idx++;
++ goto skip_bitmap;
++ }
++out:
++ /*
++ * Right now, this is the only place pull_task() is called,
++ * so we can safely collect pull_task() stats here rather than
++ * inside pull_task().
++ */
++ schedstat_add(sd, lb_gained[idle], pulled);
++
++ if (all_pinned)
++ *all_pinned = pinned;
++ return pulled;
++}
++
++/*
++ * find_busiest_group finds and returns the busiest CPU group within the
++ * domain. It calculates and returns the amount of weighted load which
++ * should be moved to restore balance via the imbalance parameter.
++ */
++static struct sched_group *
++find_busiest_group(struct sched_domain *sd, int this_cpu,
++ unsigned long *imbalance, enum idle_type idle, int *sd_idle,
++ cpumask_t *cpus, int *balance)
++{
++ struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
++ unsigned long max_load, avg_load, total_load, this_load, total_pwr;
++ unsigned long max_pull;
++ unsigned long busiest_load_per_task, busiest_nr_running;
++ unsigned long this_load_per_task, this_nr_running;
++ int load_idx;
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++ int power_savings_balance = 1;
++ unsigned long leader_nr_running = 0, min_load_per_task = 0;
++ unsigned long min_nr_running = ULONG_MAX;
++ struct sched_group *group_min = NULL, *group_leader = NULL;
++#endif
++
++ max_load = this_load = total_load = total_pwr = 0;
++ busiest_load_per_task = busiest_nr_running = 0;
++ this_load_per_task = this_nr_running = 0;
++ if (idle == NOT_IDLE)
++ load_idx = sd->busy_idx;
++ else if (idle == NEWLY_IDLE)
++ load_idx = sd->newidle_idx;
++ else
++ load_idx = sd->idle_idx;
++
++ do {
++ unsigned long load, group_capacity;
++ int local_group;
++ int i;
++ unsigned int balance_cpu = -1, first_idle_cpu = 0;
++ unsigned long sum_nr_running, sum_weighted_load;
++
++ local_group = cpu_isset(this_cpu, group->cpumask);
++
++ if (local_group)
++ balance_cpu = first_cpu(group->cpumask);
++
++ /* Tally up the load of all CPUs in the group */
++ sum_weighted_load = sum_nr_running = avg_load = 0;
++
++ for_each_cpu_mask(i, group->cpumask) {
++ struct rq *rq;
++
++ if (!cpu_isset(i, *cpus))
++ continue;
++
++ rq = cpu_rq(i);
++
++ if (*sd_idle && !idle_cpu(i))
++ *sd_idle = 0;
++
++ /* Bias balancing toward cpus of our domain */
++ if (local_group) {
++ if (idle_cpu(i) && !first_idle_cpu) {
++ first_idle_cpu = 1;
++ balance_cpu = i;
++ }
++
++ load = target_load(i, load_idx);
++ } else
++ load = source_load(i, load_idx);
++
++ avg_load += load;
++ sum_nr_running += rq->nr_running;
++ sum_weighted_load += rq->raw_weighted_load;
++ }
++
++ /*
++ * First idle cpu or the first cpu(busiest) in this sched group
++ * is eligible for doing load balancing at this and above
++ * domains.
++ */
++ if (local_group && balance_cpu != this_cpu && balance) {
++ *balance = 0;
++ goto ret;
++ }
++
++ total_load += avg_load;
++ total_pwr += group->__cpu_power;
++
++ /* Adjust by relative CPU power of the group */
++ avg_load = sg_div_cpu_power(group,
++ avg_load * SCHED_LOAD_SCALE);
++
++ group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
++
++ if (local_group) {
++ this_load = avg_load;
++ this = group;
++ this_nr_running = sum_nr_running;
++ this_load_per_task = sum_weighted_load;
++ } else if (avg_load > max_load &&
++ sum_nr_running > group_capacity) {
++ max_load = avg_load;
++ busiest = group;
++ busiest_nr_running = sum_nr_running;
++ busiest_load_per_task = sum_weighted_load;
++ }
++
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++ /*
++ * Busy processors will not participate in power savings
++ * balance.
++ */
++ if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
++ goto group_next;
++
++ /*
++ * If the local group is idle or completely loaded
++ * no need to do power savings balance at this domain
++ */
++ if (local_group && (this_nr_running >= group_capacity ||
++ !this_nr_running))
++ power_savings_balance = 0;
++
++ /*
++ * If a group is already running at full capacity or idle,
++ * don't include that group in power savings calculations
++ */
++ if (!power_savings_balance || sum_nr_running >= group_capacity
++ || !sum_nr_running)
++ goto group_next;
++
++ /*
++ * Calculate the group which has the least non-idle load.
++ * This is the group from where we need to pick up the load
++ * for saving power
++ */
++ if ((sum_nr_running < min_nr_running) ||
++ (sum_nr_running == min_nr_running &&
++ first_cpu(group->cpumask) <
++ first_cpu(group_min->cpumask))) {
++ group_min = group;
++ min_nr_running = sum_nr_running;
++ min_load_per_task = sum_weighted_load /
++ sum_nr_running;
++ }
++
++ /*
++ * Calculate the group which is almost near its
++ * capacity but still has some space to pick up some load
++ * from other group and save more power
++ */
++ if (sum_nr_running <= group_capacity - 1) {
++ if (sum_nr_running > leader_nr_running ||
++ (sum_nr_running == leader_nr_running &&
++ first_cpu(group->cpumask) >
++ first_cpu(group_leader->cpumask))) {
++ group_leader = group;
++ leader_nr_running = sum_nr_running;
++ }
++ }
++group_next:
++#endif
++ group = group->next;
++ } while (group != sd->groups);
++
++ if (!busiest || this_load >= max_load || busiest_nr_running == 0)
++ goto out_balanced;
++
++ avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
++
++ if (this_load >= avg_load ||
++ 100*max_load <= sd->imbalance_pct*this_load)
++ goto out_balanced;
++
++ busiest_load_per_task /= busiest_nr_running;
++ /*
++ * We're trying to get all the cpus to the average_load, so we don't
++ * want to push ourselves above the average load, nor do we wish to
++ * reduce the max loaded cpu below the average load, as either of these
++ * actions would just result in more rebalancing later, and ping-pong
++ * tasks around. Thus we look for the minimum possible imbalance.
++ * Negative imbalances (*we* are more loaded than anyone else) will
++ * be counted as no imbalance for these purposes -- we can't fix that
++ * by pulling tasks to us. Be careful of negative numbers as they'll
++ * appear as very large values with unsigned longs.
++ */
++ if (max_load <= busiest_load_per_task)
++ goto out_balanced;
++
++ /*
++ * In the presence of smp nice balancing, certain scenarios can have
++ * max load less than avg load(as we skip the groups at or below
++ * its cpu_power, while calculating max_load..)
++ */
++ if (max_load < avg_load) {
++ *imbalance = 0;
++ goto small_imbalance;
++ }
++
++ /* Don't want to pull so many tasks that a group would go idle */
++ max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
++
++ /* How much load to actually move to equalise the imbalance */
++ *imbalance = min(max_pull * busiest->__cpu_power,
++ (avg_load - this_load) * this->__cpu_power)
++ / SCHED_LOAD_SCALE;
++
++ /*
++ * if *imbalance is less than the average load per runnable task
++ * there is no gaurantee that any tasks will be moved so we'll have
++ * a think about bumping its value to force at least one task to be
++ * moved
++ */
++ if (*imbalance < busiest_load_per_task) {
++ unsigned long tmp, pwr_now, pwr_move;
++ unsigned int imbn;
++
++small_imbalance:
++ pwr_move = pwr_now = 0;
++ imbn = 2;
++ if (this_nr_running) {
++ this_load_per_task /= this_nr_running;
++ if (busiest_load_per_task > this_load_per_task)
++ imbn = 1;
++ } else
++ this_load_per_task = SCHED_LOAD_SCALE;
++
++ if (max_load - this_load >= busiest_load_per_task * imbn) {
++ *imbalance = busiest_load_per_task;
++ return busiest;
++ }
++
++ /*
++ * OK, we don't have enough imbalance to justify moving tasks,
++ * however we may be able to increase total CPU power used by
++ * moving them.
++ */
++
++ pwr_now += busiest->__cpu_power *
++ min(busiest_load_per_task, max_load);
++ pwr_now += this->__cpu_power *
++ min(this_load_per_task, this_load);
++ pwr_now /= SCHED_LOAD_SCALE;
++
++ /* Amount of load we'd subtract */
++ tmp = sg_div_cpu_power(busiest,
++ busiest_load_per_task * SCHED_LOAD_SCALE);
++ if (max_load > tmp)
++ pwr_move += busiest->__cpu_power *
++ min(busiest_load_per_task, max_load - tmp);
++
++ /* Amount of load we'd add */
++ if (max_load * busiest->__cpu_power <
++ busiest_load_per_task * SCHED_LOAD_SCALE)
++ tmp = sg_div_cpu_power(this,
++ max_load * busiest->__cpu_power);
++ else
++ tmp = sg_div_cpu_power(this,
++ busiest_load_per_task * SCHED_LOAD_SCALE);
++ pwr_move += this->__cpu_power *
++ min(this_load_per_task, this_load + tmp);
++ pwr_move /= SCHED_LOAD_SCALE;
++
++ /* Move if we gain throughput */
++ if (pwr_move <= pwr_now)
++ goto out_balanced;
++
++ *imbalance = busiest_load_per_task;
++ }
++
++ return busiest;
++
++out_balanced:
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++ if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
++ goto ret;
++
++ if (this == group_leader && group_leader != group_min) {
++ *imbalance = min_load_per_task;
++ return group_min;
++ }
++#endif
++ret:
++ *imbalance = 0;
++ return NULL;
++}
++
++/*
++ * find_busiest_queue - find the busiest runqueue among the cpus in group.
++ */
++static struct rq *
++find_busiest_queue(struct sched_group *group, enum idle_type idle,
++ unsigned long imbalance, cpumask_t *cpus)
++{
++ struct rq *busiest = NULL, *rq;
++ unsigned long max_load = 0;
++ int i;
++
++ for_each_cpu_mask(i, group->cpumask) {
++
++ if (!cpu_isset(i, *cpus))
++ continue;
++
++ rq = cpu_rq(i);
++
++ if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
++ continue;
++
++ if (rq->raw_weighted_load > max_load) {
++ max_load = rq->raw_weighted_load;
++ busiest = rq;
++ }
++ }
++
++ return busiest;
++}
++
++/*
++ * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
++ * so long as it is large enough.
++ */
++#define MAX_PINNED_INTERVAL 512
++
++static inline unsigned long minus_1_or_zero(unsigned long n)
++{
++ return n > 0 ? n - 1 : 0;
++}
++
++/*
++ * Check this_cpu to ensure it is balanced within domain. Attempt to move
++ * tasks if there is an imbalance.
++ */
++static int load_balance(int this_cpu, struct rq *this_rq,
++ struct sched_domain *sd, enum idle_type idle,
++ int *balance)
++{
++ int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
++ struct sched_group *group;
++ unsigned long imbalance;
++ struct rq *busiest;
++ cpumask_t cpus = CPU_MASK_ALL;
++ unsigned long flags;
++
++ /*
++ * When power savings policy is enabled for the parent domain, idle
++ * sibling can pick up load irrespective of busy siblings. In this case,
++ * let the state of idle sibling percolate up as IDLE, instead of
++ * portraying it as NOT_IDLE.
++ */
++ if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ sd_idle = 1;
++
++ schedstat_inc(sd, lb_cnt[idle]);
++
++redo:
++ group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
++ &cpus, balance);
++
++ if (*balance == 0)
++ goto out_balanced;
++
++ if (!group) {
++ schedstat_inc(sd, lb_nobusyg[idle]);
++ goto out_balanced;
++ }
++
++ busiest = find_busiest_queue(group, idle, imbalance, &cpus);
++ if (!busiest) {
++ schedstat_inc(sd, lb_nobusyq[idle]);
++ goto out_balanced;
++ }
++
++ BUG_ON(busiest == this_rq);
++
++ schedstat_add(sd, lb_imbalance[idle], imbalance);
++
++ nr_moved = 0;
++ if (busiest->nr_running > 1) {
++ /*
++ * Attempt to move tasks. If find_busiest_group has found
++ * an imbalance but busiest->nr_running <= 1, the group is
++ * still unbalanced. nr_moved simply stays zero, so it is
++ * correctly treated as an imbalance.
++ */
++ local_irq_save(flags);
++ double_rq_lock(this_rq, busiest);
++ nr_moved = move_tasks(this_rq, this_cpu, busiest,
++ minus_1_or_zero(busiest->nr_running),
++ imbalance, sd, idle, &all_pinned);
++ double_rq_unlock(this_rq, busiest);
++ local_irq_restore(flags);
++
++ /*
++ * some other cpu did the load balance for us.
++ */
++ if (nr_moved && this_cpu != smp_processor_id())
++ resched_cpu(this_cpu);
++
++ /* All tasks on this runqueue were pinned by CPU affinity */
++ if (unlikely(all_pinned)) {
++ cpu_clear(cpu_of(busiest), cpus);
++ if (!cpus_empty(cpus))
++ goto redo;
++ goto out_balanced;
++ }
++ }
++
++ if (!nr_moved) {
++ schedstat_inc(sd, lb_failed[idle]);
++ sd->nr_balance_failed++;
++
++ if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
++
++ spin_lock_irqsave(&busiest->lock, flags);
++
++ /* don't kick the migration_thread, if the curr
++ * task on busiest cpu can't be moved to this_cpu
++ */
++ if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
++ spin_unlock_irqrestore(&busiest->lock, flags);
++ all_pinned = 1;
++ goto out_one_pinned;
++ }
++
++ if (!busiest->active_balance) {
++ busiest->active_balance = 1;
++ busiest->push_cpu = this_cpu;
++ active_balance = 1;
++ }
++ spin_unlock_irqrestore(&busiest->lock, flags);
++ if (active_balance)
++ wake_up_process(busiest->migration_thread);
++
++ /*
++ * We've kicked active balancing, reset the failure
++ * counter.
++ */
++ sd->nr_balance_failed = sd->cache_nice_tries+1;
++ }
++ } else
++ sd->nr_balance_failed = 0;
++
++ if (likely(!active_balance)) {
++ /* We were unbalanced, so reset the balancing interval */
++ sd->balance_interval = sd->min_interval;
++ } else {
++ /*
++ * If we've begun active balancing, start to back off. This
++ * case may not be covered by the all_pinned logic if there
++ * is only 1 task on the busy runqueue (because we don't call
++ * move_tasks).
++ */
++ if (sd->balance_interval < sd->max_interval)
++ sd->balance_interval *= 2;
++ }
++
++ if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ return -1;
++ return nr_moved;
++
++out_balanced:
++ schedstat_inc(sd, lb_balanced[idle]);
++
++ sd->nr_balance_failed = 0;
++
++out_one_pinned:
++ /* tune up the balancing interval */
++ if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
++ (sd->balance_interval < sd->max_interval))
++ sd->balance_interval *= 2;
++
++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ return -1;
++ return 0;
++}
++
++/*
++ * Check this_cpu to ensure it is balanced within domain. Attempt to move
++ * tasks if there is an imbalance.
++ *
++ * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
++ * this_rq is locked.
++ */
++static int
++load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
++{
++ struct sched_group *group;
++ struct rq *busiest = NULL;
++ unsigned long imbalance;
++ int nr_moved = 0;
++ int sd_idle = 0;
++ cpumask_t cpus = CPU_MASK_ALL;
++
++ /*
++ * When power savings policy is enabled for the parent domain, idle
++ * sibling can pick up load irrespective of busy siblings. In this case,
++ * let the state of idle sibling percolate up as IDLE, instead of
++ * portraying it as NOT_IDLE.
++ */
++ if (sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ sd_idle = 1;
++
++ schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
++redo:
++ group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
++ &sd_idle, &cpus, NULL);
++ if (!group) {
++ schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
++ goto out_balanced;
++ }
++
++ busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
++ &cpus);
++ if (!busiest) {
++ schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
++ goto out_balanced;
++ }
++
++ BUG_ON(busiest == this_rq);
++
++ schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
++
++ nr_moved = 0;
++ if (busiest->nr_running > 1) {
++ /* Attempt to move tasks */
++ double_lock_balance(this_rq, busiest);
++ nr_moved = move_tasks(this_rq, this_cpu, busiest,
++ minus_1_or_zero(busiest->nr_running),
++ imbalance, sd, NEWLY_IDLE, NULL);
++ spin_unlock(&busiest->lock);
++
++ if (!nr_moved) {
++ cpu_clear(cpu_of(busiest), cpus);
++ if (!cpus_empty(cpus))
++ goto redo;
++ }
++ }
++
++ if (!nr_moved) {
++ schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ return -1;
++ } else
++ sd->nr_balance_failed = 0;
++
++ return nr_moved;
++
++out_balanced:
++ schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
++ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
++ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
++ return -1;
++ sd->nr_balance_failed = 0;
++
++ return 0;
++}
++
++/*
++ * idle_balance is called by schedule() if this_cpu is about to become
++ * idle. Attempts to pull tasks from other CPUs.
++ */
++static void idle_balance(int this_cpu, struct rq *this_rq)
++{
++ struct sched_domain *sd;
++ int pulled_task = 0;
++ unsigned long next_balance = jiffies + 60 * HZ;
++
++ for_each_domain(this_cpu, sd) {
++ unsigned long interval;
++
++ if (!(sd->flags & SD_LOAD_BALANCE))
++ continue;
++
++ if (sd->flags & SD_BALANCE_NEWIDLE)
++ /* If we've pulled tasks over stop searching: */
++ pulled_task = load_balance_newidle(this_cpu,
++ this_rq, sd);
++
++ interval = msecs_to_jiffies(sd->balance_interval);
++ if (time_after(next_balance, sd->last_balance + interval))
++ next_balance = sd->last_balance + interval;
++ if (pulled_task)
++ break;
++ }
++ if (!pulled_task)
++ /*
++ * We are going idle. next_balance may be set based on
++ * a busy processor. So reset next_balance.
++ */
++ this_rq->next_balance = next_balance;
++}
++
++/*
++ * active_load_balance is run by migration threads. It pushes running tasks
++ * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
++ * running on each physical CPU where possible, and avoids physical /
++ * logical imbalances.
++ *
++ * Called with busiest_rq locked.
++ */
++static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
++{
++ int target_cpu = busiest_rq->push_cpu;
++ struct sched_domain *sd;
++ struct rq *target_rq;
++
++ /* Is there any task to move? */
++ if (busiest_rq->nr_running <= 1)
++ return;
++
++ target_rq = cpu_rq(target_cpu);
++
++ /*
++ * This condition is "impossible", if it occurs
++ * we need to fix it. Originally reported by
++ * Bjorn Helgaas on a 128-cpu setup.
++ */
++ BUG_ON(busiest_rq == target_rq);
++
++ /* move a task from busiest_rq to target_rq */
++ double_lock_balance(busiest_rq, target_rq);
++
++ /* Search for an sd spanning us and the target CPU. */
++ for_each_domain(target_cpu, sd) {
++ if ((sd->flags & SD_LOAD_BALANCE) &&
++ cpu_isset(busiest_cpu, sd->span))
++ break;
++ }
++
++ if (likely(sd)) {
++ schedstat_inc(sd, alb_cnt);
++
++ if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
++ RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE,
++ NULL))
++ schedstat_inc(sd, alb_pushed);
++ else
++ schedstat_inc(sd, alb_failed);
++ }
++ spin_unlock(&target_rq->lock);
++}
++
++static void update_load(struct rq *this_rq)
++{
++ unsigned long this_load;
++ unsigned int i, scale;
++
++ this_load = this_rq->raw_weighted_load;
++
++ /* Update our load: */
++ for (i = 0, scale = 1; i < 3; i++, scale += scale) {
++ unsigned long old_load, new_load;
++
++ /* scale is effectively 1 << i now, and >> i divides by scale */
++
++ old_load = this_rq->cpu_load[i];
++ new_load = this_load;
++ /*
++ * Round up the averaging division if load is increasing. This
++ * prevents us from getting stuck on 9 if the load is 10, for
++ * example.
++ */
++ if (new_load > old_load)
++ new_load += scale-1;
++ this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
++ }
++}
++
++#ifdef CONFIG_NO_HZ
++static struct {
++ atomic_t load_balancer;
++ cpumask_t cpu_mask;
++} nohz ____cacheline_aligned = {
++ .load_balancer = ATOMIC_INIT(-1),
++ .cpu_mask = CPU_MASK_NONE,
++};
++
++/*
++ * This routine will try to nominate the ilb (idle load balancing)
++ * owner among the cpus whose ticks are stopped. ilb owner will do the idle
++ * load balancing on behalf of all those cpus. If all the cpus in the system
++ * go into this tickless mode, then there will be no ilb owner (as there is
++ * no need for one) and all the cpus will sleep till the next wakeup event
++ * arrives...
++ *
++ * For the ilb owner, tick is not stopped. And this tick will be used
++ * for idle load balancing. ilb owner will still be part of
++ * nohz.cpu_mask..
++ *
++ * While stopping the tick, this cpu will become the ilb owner if there
++ * is no other owner. And will be the owner till that cpu becomes busy
++ * or if all cpus in the system stop their ticks at which point
++ * there is no need for ilb owner.
++ *
++ * When the ilb owner becomes busy, it nominates another owner, during the
++ * next busy scheduler_tick()
++ */
++int select_nohz_load_balancer(int stop_tick)
++{
++ int cpu = smp_processor_id();
++
++ if (stop_tick) {
++ cpu_set(cpu, nohz.cpu_mask);
++ cpu_rq(cpu)->in_nohz_recently = 1;
++
++ /*
++ * If we are going offline and still the leader, give up!
++ */
++ if (cpu_is_offline(cpu) &&
++ atomic_read(&nohz.load_balancer) == cpu) {
++ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
++ BUG();
++ return 0;
++ }
++
++ /* time for ilb owner also to sleep */
++ if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
++ if (atomic_read(&nohz.load_balancer) == cpu)
++ atomic_set(&nohz.load_balancer, -1);
++ return 0;
++ }
++
++ if (atomic_read(&nohz.load_balancer) == -1) {
++ /* make me the ilb owner */
++ if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
++ return 1;
++ } else if (atomic_read(&nohz.load_balancer) == cpu)
++ return 1;
++ } else {
++ if (!cpu_isset(cpu, nohz.cpu_mask))
++ return 0;
++
++ cpu_clear(cpu, nohz.cpu_mask);
++
++ if (atomic_read(&nohz.load_balancer) == cpu)
++ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
++ BUG();
++ }
++ return 0;
++}
++#endif
++
++static DEFINE_SPINLOCK(balancing);
++
++/*
++ * It checks each scheduling domain to see if it is due to be balanced,
++ * and initiates a balancing operation if so.
++ *
++ * Balancing parameters are set up in arch_init_sched_domains.
++ */
++static inline void rebalance_domains(int cpu, enum idle_type idle)
++{
++ int balance = 1;
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long interval;
++ struct sched_domain *sd;
++ /* Earliest time when we have to do rebalance again */
++ unsigned long next_balance = jiffies + 60*HZ;
++
++ for_each_domain(cpu, sd) {
++ if (!(sd->flags & SD_LOAD_BALANCE))
++ continue;
++
++ interval = sd->balance_interval;
++ if (idle != SCHED_IDLE)
++ interval *= sd->busy_factor;
++
++ /* scale ms to jiffies */
++ interval = msecs_to_jiffies(interval);
++ if (unlikely(!interval))
++ interval = 1;
++
++ if (sd->flags & SD_SERIALIZE) {
++ if (!spin_trylock(&balancing))
++ goto out;
++ }
++
++ if (time_after_eq(jiffies, sd->last_balance + interval)) {
++ if (load_balance(cpu, rq, sd, idle, &balance)) {
++ /*
++ * We've pulled tasks over so either we're no
++ * longer idle, or one of our SMT siblings is
++ * not idle.
++ */
++ idle = NOT_IDLE;
++ }
++ sd->last_balance = jiffies;
++ }
++ if (sd->flags & SD_SERIALIZE)
++ spin_unlock(&balancing);
++out:
++ if (time_after(next_balance, sd->last_balance + interval))
++ next_balance = sd->last_balance + interval;
++
++ /*
++ * Stop the load balance at this level. There is another
++ * CPU in our sched group which is doing load balancing more
++ * actively.
++ */
++ if (!balance)
++ break;
++ }
++ rq->next_balance = next_balance;
++}
++
++/*
++ * run_rebalance_domains is triggered when needed from the scheduler tick.
++ * In CONFIG_NO_HZ case, the idle load balance owner will do the
++ * rebalancing for all the cpus for whom scheduler ticks are stopped.
++ */
++static void run_rebalance_domains(struct softirq_action *h)
++{
++ int local_cpu = smp_processor_id();
++ struct rq *local_rq = cpu_rq(local_cpu);
++ enum idle_type idle = local_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE;
++
++ rebalance_domains(local_cpu, idle);
++
++#ifdef CONFIG_NO_HZ
++ /*
++ * If this cpu is the owner for idle load balancing, then do the
++ * balancing on behalf of the other idle cpus whose ticks are
++ * stopped.
++ */
++ if (local_rq->idle_at_tick &&
++ atomic_read(&nohz.load_balancer) == local_cpu) {
++ cpumask_t cpus = nohz.cpu_mask;
++ struct rq *rq;
++ int balance_cpu;
++
++ cpu_clear(local_cpu, cpus);
++ for_each_cpu_mask(balance_cpu, cpus) {
++ /*
++ * If this cpu gets work to do, stop the load balancing
++ * work being done for other cpus. Next load
++ * balancing owner will pick it up.
++ */
++ if (need_resched())
++ break;
++
++ rebalance_domains(balance_cpu, SCHED_IDLE);
++
++ rq = cpu_rq(balance_cpu);
++ if (time_after(local_rq->next_balance, rq->next_balance))
++ local_rq->next_balance = rq->next_balance;
++ }
++ }
++#endif
++}
++
++/*
++ * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
++ *
++ * In case of CONFIG_NO_HZ, this is the place where we nominate a new
++ * idle load balancing owner or decide to stop the periodic load balancing,
++ * if the whole system is idle.
++ */
++static inline void trigger_load_balance(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++#ifdef CONFIG_NO_HZ
++ /*
++ * If we were in the nohz mode recently and busy at the current
++ * scheduler tick, then check if we need to nominate new idle
++ * load balancer.
++ */
++ if (rq->in_nohz_recently && !rq->idle_at_tick) {
++ rq->in_nohz_recently = 0;
++
++ if (atomic_read(&nohz.load_balancer) == cpu) {
++ cpu_clear(cpu, nohz.cpu_mask);
++ atomic_set(&nohz.load_balancer, -1);
++ }
++
++ if (atomic_read(&nohz.load_balancer) == -1) {
++ /*
++ * simple selection for now: Nominate the
++ * first cpu in the nohz list to be the next
++ * ilb owner.
++ *
++ * TBD: Traverse the sched domains and nominate
++ * the nearest cpu in the nohz.cpu_mask.
++ */
++ int ilb = first_cpu(nohz.cpu_mask);
++
++ if (ilb != NR_CPUS)
++ resched_cpu(ilb);
++ }
++ }
++
++ /*
++ * If this cpu is idle and doing idle load balancing for all the
++ * cpus with ticks stopped, is it time for that to stop?
++ */
++ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
++ cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
++ resched_cpu(cpu);
++ return;
++ }
++
++ /*
++ * If this cpu is idle and the idle load balancing is done by
++ * someone else, then no need raise the SCHED_SOFTIRQ
++ */
++ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
++ cpu_isset(cpu, nohz.cpu_mask))
++ return;
++#endif
++ if (time_after_eq(jiffies, rq->next_balance))
++ raise_softirq(SCHED_SOFTIRQ);
++}
++#else
++/*
++ * on UP we do not need to balance between CPUs:
++ */
++static inline void idle_balance(int cpu, struct rq *rq)
++{
++}
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++
++/*
++ * This is called on clock ticks and on context switches.
++ * Bank in p->sched_time the ns elapsed since the last tick or switch.
++ */
++static inline void
++update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
++{
++ p->sched_time += now - p->last_ran;
++ p->last_ran = rq->most_recent_timestamp = now;
++}
++
++/*
++ * Return current->sched_time plus any more ns on the sched_clock
++ * that have not yet been banked.
++ */
++unsigned long long current_sched_time(const struct task_struct *p)
++{
++ unsigned long long ns;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ ns = p->sched_time + sched_clock() - p->last_ran;
++ local_irq_restore(flags);
++
++ return ns;
++}
++
++/*
++ * We place interactive tasks back into the active array, if possible.
++ *
++ * To guarantee that this does not starve expired tasks we ignore the
++ * interactivity of a task if the first expired task had to wait more
++ * than a 'reasonable' amount of time. This deadline timeout is
++ * load-dependent, as the frequency of array switched decreases with
++ * increasing number of running tasks. We also ignore the interactivity
++ * if a better static_prio task has expired:
++ */
++static inline int expired_starving(struct rq *rq)
++{
++ if (rq->curr->static_prio > rq->best_expired_prio)
++ return 1;
++ if (!STARVATION_LIMIT || !rq->expired_timestamp)
++ return 0;
++ if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running)
++ return 1;
++ return 0;
++}
++
++/*
++ * Account user cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @hardirq_offset: the offset to subtract from hardirq_count()
++ * @cputime: the cpu time spent in user space since the last update
++ */
++void account_user_time(struct task_struct *p, cputime_t cputime)
++{
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
++ cputime64_t tmp;
++ int nice = (TASK_NICE(p) > 0);
++
++ p->utime = cputime_add(p->utime, cputime);
++ vx_account_user(vxi, cputime, nice);
++
++ /* Add user time to cpustat. */
++ tmp = cputime_to_cputime64(cputime);
++ if (nice)
++ cpustat->nice = cputime64_add(cpustat->nice, tmp);
++ else
++ cpustat->user = cputime64_add(cpustat->user, tmp);
++}
++
++/*
++ * Account system cpu time to a process.
++ * @p: the process that the cpu time gets accounted to
++ * @hardirq_offset: the offset to subtract from hardirq_count()
++ * @cputime: the cpu time spent in kernel space since the last update
++ */
++void account_system_time(struct task_struct *p, int hardirq_offset,
++ cputime_t cputime)
++{
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
++ struct rq *rq = this_rq();
++ cputime64_t tmp;
++
++ p->stime = cputime_add(p->stime, cputime);
++ vx_account_system(vxi, cputime, (p == rq->idle));
++
++ /* Add system time to cpustat. */
++ tmp = cputime_to_cputime64(cputime);
++ if (hardirq_count() - hardirq_offset)
++ cpustat->irq = cputime64_add(cpustat->irq, tmp);
++ else if (softirq_count())
++ cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
++ else if (p != rq->idle)
++ cpustat->system = cputime64_add(cpustat->system, tmp);
++ else if (atomic_read(&rq->nr_iowait) > 0)
++ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
++ else
++ cpustat->idle = cputime64_add(cpustat->idle, tmp);
++ /* Account for system time used */
++ acct_update_integrals(p);
++}
++
++/*
++ * Account for involuntary wait time.
++ * @p: the process from which the cpu time has been stolen
++ * @steal: the cpu time spent in involuntary wait
++ */
++void account_steal_time(struct task_struct *p, cputime_t steal)
++{
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ cputime64_t tmp = cputime_to_cputime64(steal);
++ struct rq *rq = this_rq();
++
++ if (p == rq->idle) {
++ p->stime = cputime_add(p->stime, steal);
++ if (atomic_read(&rq->nr_iowait) > 0)
++ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
++ else
++ cpustat->idle = cputime64_add(cpustat->idle, tmp);
++ } else
++ cpustat->steal = cputime64_add(cpustat->steal, tmp);
++}
++
++static void task_running_tick(struct rq *rq, struct task_struct *p, int cpu)
++{
++ if (p->array != rq->active) {
++ /* Task has expired but was not scheduled yet */
++ set_tsk_need_resched(p);
++ return;
++ }
++ spin_lock(&rq->lock);
++ /*
++ * The task was running during this tick - update the
++ * time slice counter. Note: we do not update a thread's
++ * priority until it either goes to sleep or uses up its
++ * timeslice. This makes it possible for interactive tasks
++ * to use up their timeslices at their highest priority levels.
++ */
++ if (rt_task(p)) {
++ /*
++ * RR tasks need a special form of timeslice management.
++ * FIFO tasks have no timeslices.
++ */
++ if ((p->policy == SCHED_RR) && !--p->time_slice) {
++ p->time_slice = task_timeslice(p);
++ p->first_time_slice = 0;
++ set_tsk_need_resched(p);
++
++ /* put it at the end of the queue: */
++ requeue_task(p, rq->active);
++ }
++ goto out_unlock;
++ }
++ if (vx_need_resched(p, --p->time_slice, cpu)) {
++ dequeue_task(p, rq->active);
++ set_tsk_need_resched(p);
++ p->prio = effective_prio(p);
++ p->time_slice = task_timeslice(p);
++ p->first_time_slice = 0;
++
++ if (!rq->expired_timestamp)
++ rq->expired_timestamp = jiffies;
++ if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
++ enqueue_task(p, rq->expired);
++ if (p->static_prio < rq->best_expired_prio)
++ rq->best_expired_prio = p->static_prio;
++ } else
++ enqueue_task(p, rq->active);
++ } else {
++ /*
++ * Prevent a too long timeslice allowing a task to monopolize
++ * the CPU. We do this by splitting up the timeslice into
++ * smaller pieces.
++ *
++ * Note: this does not mean the task's timeslices expire or
++ * get lost in any way, they just might be preempted by
++ * another task of equal priority. (one with higher
++ * priority would have preempted this task already.) We
++ * requeue this task to the end of the list on this priority
++ * level, which is in essence a round-robin of tasks with
++ * equal priority.
++ *
++ * This only applies to tasks in the interactive
++ * delta range with at least TIMESLICE_GRANULARITY to requeue.
++ */
++ if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
++ p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
++ (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
++ (p->array == rq->active)) {
++
++ requeue_task(p, rq->active);
++ set_tsk_need_resched(p);
++ }
++ }
++out_unlock:
++ spin_unlock(&rq->lock);
++}
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ *
++ * It also gets called by the fork code, when changing the parent's
++ * timeslices.
++ */
++void scheduler_tick(void)
++{
++ unsigned long long now = sched_clock();
++ struct task_struct *p = current;
++ int cpu = smp_processor_id();
++ int idle_at_tick = idle_cpu(cpu);
++ struct rq *rq = cpu_rq(cpu);
++
++ update_cpu_clock(p, rq, now);
++ vxm_sync(now, cpu);
++
++ if (idle_at_tick)
++ vx_idle_resched(rq);
++ else
++ task_running_tick(rq, p, cpu);
++#ifdef CONFIG_SMP
++ update_load(rq);
++ rq->idle_at_tick = idle_at_tick;
++ trigger_load_balance(cpu);
++#endif
++}
++
++#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
++
++void fastcall add_preempt_count(int val)
++{
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++ return;
++ preempt_count() += val;
++ /*
++ * Spinlock count overflowing soon?
++ */
++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++ PREEMPT_MASK - 10);
++}
++EXPORT_SYMBOL(add_preempt_count);
++
++void fastcall sub_preempt_count(int val)
++{
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++ return;
++ /*
++ * Is the spinlock portion underflowing?
++ */
++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++ !(preempt_count() & PREEMPT_MASK)))
++ return;
++
++ preempt_count() -= val;
++}
++EXPORT_SYMBOL(sub_preempt_count);
++
++#endif
++
++static inline int interactive_sleep(enum sleep_type sleep_type)
++{
++ return (sleep_type == SLEEP_INTERACTIVE ||
++ sleep_type == SLEEP_INTERRUPTED);
++}
++
++/*
++ * schedule() is the main scheduler function.
++ */
++asmlinkage void __sched schedule(void)
++{
++ struct task_struct *prev, *next;
++ struct prio_array *array;
++ struct list_head *queue;
++ unsigned long long now;
++ unsigned long run_time;
++ int cpu, idx, new_prio;
++ long *switch_count;
++ struct rq *rq;
++
++ /*
++ * Test if we are atomic. Since do_exit() needs to call into
++ * schedule() atomically, we ignore that path for now.
++ * Otherwise, whine if we are scheduling when we should not be.
++ */
++ if (unlikely(in_atomic() && !current->exit_state)) {
++ printk(KERN_ERR "BUG: scheduling while atomic: "
++ "%s/0x%08x/%d\n",
++ current->comm, preempt_count(), current->pid);
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++ dump_stack();
++ }
++ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++need_resched:
++ preempt_disable();
++ prev = current;
++ release_kernel_lock(prev);
++need_resched_nonpreemptible:
++ rq = this_rq();
++
++ /*
++ * The idle thread is not allowed to schedule!
++ * Remove this check after it has been exercised a bit.
++ */
++ if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
++ printk(KERN_ERR "bad: scheduling from the idle thread!\n");
++ dump_stack();
++ }
++
++ schedstat_inc(rq, sched_cnt);
++ now = sched_clock();
++ if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
++ run_time = now - prev->timestamp;
++ if (unlikely((long long)(now - prev->timestamp) < 0))
++ run_time = 0;
++ } else
++ run_time = NS_MAX_SLEEP_AVG;
++
++ /*
++ * Tasks charged proportionately less run_time at high sleep_avg to
++ * delay them losing their interactive status
++ */
++ run_time /= (CURRENT_BONUS(prev) ? : 1);
++
++ spin_lock_irq(&rq->lock);
++
++ switch_count = &prev->nivcsw;
++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
++ switch_count = &prev->nvcsw;
++ if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
++ unlikely(signal_pending(prev))))
++ prev->state = TASK_RUNNING;
++ else {
++ if (prev->state == TASK_UNINTERRUPTIBLE) {
++ rq->nr_uninterruptible++;
++ vx_uninterruptible_inc(prev);
++ }
++ deactivate_task(prev, rq);
++ }
++ }
++
++ cpu = smp_processor_id();
++ vx_set_rq_time(rq, jiffies);
++try_unhold:
++ vx_try_unhold(rq, cpu);
++pick_next:
++
++ if (unlikely(!rq->nr_running)) {
++ /* can we skip idle time? */
++ if (vx_try_skip(rq, cpu))
++ goto try_unhold;
++
++ idle_balance(cpu, rq);
++ if (!rq->nr_running) {
++ next = rq->idle;
++ rq->expired_timestamp = 0;
++ goto switch_tasks;
++ }
++ }
++
++ array = rq->active;
++ if (unlikely(!array->nr_active)) {
++ /*
++ * Switch the active and expired arrays.
++ */
++ schedstat_inc(rq, sched_switch);
++ rq->active = rq->expired;
++ rq->expired = array;
++ array = rq->active;
++ rq->expired_timestamp = 0;
++ rq->best_expired_prio = MAX_PRIO;
++ }
++
++ idx = sched_find_first_bit(array->bitmap);
++ queue = array->queue + idx;
++ next = list_entry(queue->next, struct task_struct, run_list);
++
++ /* check before we schedule this context */
++ if (!vx_schedule(next, rq, cpu))
++ goto pick_next;
++
++ if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
++ unsigned long long delta = now - next->timestamp;
++ if (unlikely((long long)(now - next->timestamp) < 0))
++ delta = 0;
++
++ if (next->sleep_type == SLEEP_INTERACTIVE)
++ delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
++
++ array = next->array;
++ new_prio = recalc_task_prio(next, next->timestamp + delta);
++
++ if (unlikely(next->prio != new_prio)) {
++ dequeue_task(next, array);
++ next->prio = new_prio;
++ enqueue_task(next, array);
++ }
++ }
++ next->sleep_type = SLEEP_NORMAL;
++switch_tasks:
++ if (next == rq->idle)
++ schedstat_inc(rq, sched_goidle);
++ prefetch(next);
++ prefetch_stack(next);
++ clear_tsk_need_resched(prev);
++ rcu_qsctr_inc(task_cpu(prev));
++
++ update_cpu_clock(prev, rq, now);
++
++ prev->sleep_avg -= run_time;
++ if ((long)prev->sleep_avg <= 0)
++ prev->sleep_avg = 0;
++ prev->timestamp = prev->last_ran = now;
++
++ sched_info_switch(prev, next);
++ if (likely(prev != next)) {
++ next->timestamp = next->last_ran = now;
++ rq->nr_switches++;
++ rq->curr = next;
++ ++*switch_count;
++
++ prepare_task_switch(rq, next);
++ prev = context_switch(rq, prev, next);
++ barrier();
++ /*
++ * this_rq must be evaluated again because prev may have moved
++ * CPUs since it called schedule(), thus the 'rq' on its stack
++ * frame will be invalid.
++ */
++ finish_task_switch(this_rq(), prev);
++ } else
++ spin_unlock_irq(&rq->lock);
++
++ prev = current;
++ if (unlikely(reacquire_kernel_lock(prev) < 0))
++ goto need_resched_nonpreemptible;
++ preempt_enable_no_resched();
++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
++ goto need_resched;
++}
++EXPORT_SYMBOL(schedule);
++
++#ifdef CONFIG_PREEMPT
++/*
++ * this is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable. Kernel preemptions off return from interrupt
++ * occur there and call schedule directly.
++ */
++asmlinkage void __sched preempt_schedule(void)
++{
++ struct thread_info *ti = current_thread_info();
++#ifdef CONFIG_PREEMPT_BKL
++ struct task_struct *task = current;
++ int saved_lock_depth;
++#endif
++ /*
++ * If there is a non-zero preempt_count or interrupts are disabled,
++ * we do not want to preempt the current task. Just return..
++ */
++ if (likely(ti->preempt_count || irqs_disabled()))
++ return;
++
++need_resched:
++ add_preempt_count(PREEMPT_ACTIVE);
++ /*
++ * We keep the big kernel semaphore locked, but we
++ * clear ->lock_depth so that schedule() doesnt
++ * auto-release the semaphore:
++ */
++#ifdef CONFIG_PREEMPT_BKL
++ saved_lock_depth = task->lock_depth;
++ task->lock_depth = -1;
++#endif
++ schedule();
++#ifdef CONFIG_PREEMPT_BKL
++ task->lock_depth = saved_lock_depth;
++#endif
++ sub_preempt_count(PREEMPT_ACTIVE);
++
++ /* we could miss a preemption opportunity between schedule and now */
++ barrier();
++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
++ goto need_resched;
++}
++EXPORT_SYMBOL(preempt_schedule);
++
++/*
++ * this is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage void __sched preempt_schedule_irq(void)
++{
++ struct thread_info *ti = current_thread_info();
++#ifdef CONFIG_PREEMPT_BKL
++ struct task_struct *task = current;
++ int saved_lock_depth;
++#endif
++ /* Catch callers which need to be fixed */
++ BUG_ON(ti->preempt_count || !irqs_disabled());
++
++need_resched:
++ add_preempt_count(PREEMPT_ACTIVE);
++ /*
++ * We keep the big kernel semaphore locked, but we
++ * clear ->lock_depth so that schedule() doesnt
++ * auto-release the semaphore:
++ */
++#ifdef CONFIG_PREEMPT_BKL
++ saved_lock_depth = task->lock_depth;
++ task->lock_depth = -1;
++#endif
++ local_irq_enable();
++ schedule();
++ local_irq_disable();
++#ifdef CONFIG_PREEMPT_BKL
++ task->lock_depth = saved_lock_depth;
++#endif
++ sub_preempt_count(PREEMPT_ACTIVE);
++
++ /* we could miss a preemption opportunity between schedule and now */
++ barrier();
++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
++ goto need_resched;
++}
++
++#endif /* CONFIG_PREEMPT */
++
++int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
++ void *key)
++{
++ return try_to_wake_up(curr->private, mode, sync);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++/*
++ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
++ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
++ * number) then we wake all the non-exclusive tasks and one exclusive task.
++ *
++ * There are circumstances in which we can try to wake a task which has already
++ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
++ * zero in this (rare) case, and we handle it by continuing to scan the queue.
++ */
++static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
++ int nr_exclusive, int sync, void *key)
++{
++ struct list_head *tmp, *next;
++
++ list_for_each_safe(tmp, next, &q->task_list) {
++ wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
++ unsigned flags = curr->flags;
++
++ if (curr->func(curr, mode, sync, key) &&
++ (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
++ break;
++ }
++}
++
++/**
++ * __wake_up - wake up threads blocked on a waitqueue.
++ * @q: the waitqueue
++ * @mode: which threads
++ * @nr_exclusive: how many wake-one or wake-many threads to wake up
++ * @key: is directly passed to the wakeup function
++ */
++void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
++ int nr_exclusive, void *key)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&q->lock, flags);
++ __wake_up_common(q, mode, nr_exclusive, 0, key);
++ spin_unlock_irqrestore(&q->lock, flags);
++}
++EXPORT_SYMBOL(__wake_up);
++
++/*
++ * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
++ */
++void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
++{
++ __wake_up_common(q, mode, 1, 0, NULL);
++}
++
++/**
++ * __wake_up_sync - wake up threads blocked on a waitqueue.
++ * @q: the waitqueue
++ * @mode: which threads
++ * @nr_exclusive: how many wake-one or wake-many threads to wake up
++ *
++ * The sync wakeup differs that the waker knows that it will schedule
++ * away soon, so while the target thread will be woken up, it will not
++ * be migrated to another CPU - ie. the two threads are 'synchronized'
++ * with each other. This can prevent needless bouncing between CPUs.
++ *
++ * On UP it can prevent extra preemption.
++ */
++void fastcall
++__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
++{
++ unsigned long flags;
++ int sync = 1;
++
++ if (unlikely(!q))
++ return;
++
++ if (unlikely(!nr_exclusive))
++ sync = 0;
++
++ spin_lock_irqsave(&q->lock, flags);
++ __wake_up_common(q, mode, nr_exclusive, sync, NULL);
++ spin_unlock_irqrestore(&q->lock, flags);
++}
++EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
++
++void fastcall complete(struct completion *x)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&x->wait.lock, flags);
++ x->done++;
++ __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
++ 1, 0, NULL);
++ spin_unlock_irqrestore(&x->wait.lock, flags);
++}
++EXPORT_SYMBOL(complete);
++
++void fastcall complete_all(struct completion *x)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&x->wait.lock, flags);
++ x->done += UINT_MAX/2;
++ __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
++ 0, 0, NULL);
++ spin_unlock_irqrestore(&x->wait.lock, flags);
++}
++EXPORT_SYMBOL(complete_all);
++
++void fastcall __sched wait_for_completion(struct completion *x)
++{
++ might_sleep();
++
++ spin_lock_irq(&x->wait.lock);
++ if (!x->done) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&x->wait, &wait);
++ do {
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ spin_unlock_irq(&x->wait.lock);
++ schedule();
++ spin_lock_irq(&x->wait.lock);
++ } while (!x->done);
++ __remove_wait_queue(&x->wait, &wait);
++ }
++ x->done--;
++ spin_unlock_irq(&x->wait.lock);
++}
++EXPORT_SYMBOL(wait_for_completion);
++
++unsigned long fastcall __sched
++wait_for_completion_timeout(struct completion *x, unsigned long timeout)
++{
++ might_sleep();
++
++ spin_lock_irq(&x->wait.lock);
++ if (!x->done) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&x->wait, &wait);
++ do {
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ spin_unlock_irq(&x->wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&x->wait.lock);
++ if (!timeout) {
++ __remove_wait_queue(&x->wait, &wait);
++ goto out;
++ }
++ } while (!x->done);
++ __remove_wait_queue(&x->wait, &wait);
++ }
++ x->done--;
++out:
++ spin_unlock_irq(&x->wait.lock);
++ return timeout;
++}
++EXPORT_SYMBOL(wait_for_completion_timeout);
++
++int fastcall __sched wait_for_completion_interruptible(struct completion *x)
++{
++ int ret = 0;
++
++ might_sleep();
++
++ spin_lock_irq(&x->wait.lock);
++ if (!x->done) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&x->wait, &wait);
++ do {
++ if (signal_pending(current)) {
++ ret = -ERESTARTSYS;
++ __remove_wait_queue(&x->wait, &wait);
++ goto out;
++ }
++ __set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(&x->wait.lock);
++ schedule();
++ spin_lock_irq(&x->wait.lock);
++ } while (!x->done);
++ __remove_wait_queue(&x->wait, &wait);
++ }
++ x->done--;
++out:
++ spin_unlock_irq(&x->wait.lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(wait_for_completion_interruptible);
++
++unsigned long fastcall __sched
++wait_for_completion_interruptible_timeout(struct completion *x,
++ unsigned long timeout)
++{
++ might_sleep();
++
++ spin_lock_irq(&x->wait.lock);
++ if (!x->done) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&x->wait, &wait);
++ do {
++ if (signal_pending(current)) {
++ timeout = -ERESTARTSYS;
++ __remove_wait_queue(&x->wait, &wait);
++ goto out;
++ }
++ __set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(&x->wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&x->wait.lock);
++ if (!timeout) {
++ __remove_wait_queue(&x->wait, &wait);
++ goto out;
++ }
++ } while (!x->done);
++ __remove_wait_queue(&x->wait, &wait);
++ }
++ x->done--;
++out:
++ spin_unlock_irq(&x->wait.lock);
++ return timeout;
++}
++EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
++
++
++#define SLEEP_ON_VAR \
++ unsigned long flags; \
++ wait_queue_t wait; \
++ init_waitqueue_entry(&wait, current);
++
++#define SLEEP_ON_HEAD \
++ spin_lock_irqsave(&q->lock,flags); \
++ __add_wait_queue(q, &wait); \
++ spin_unlock(&q->lock);
++
++#define SLEEP_ON_TAIL \
++ spin_lock_irq(&q->lock); \
++ __remove_wait_queue(q, &wait); \
++ spin_unlock_irqrestore(&q->lock, flags);
++
++void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
++{
++ SLEEP_ON_VAR
++
++ current->state = TASK_INTERRUPTIBLE;
++
++ SLEEP_ON_HEAD
++ schedule();
++ SLEEP_ON_TAIL
++}
++EXPORT_SYMBOL(interruptible_sleep_on);
++
++long fastcall __sched
++interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
++{
++ SLEEP_ON_VAR
++
++ current->state = TASK_INTERRUPTIBLE;
++
++ SLEEP_ON_HEAD
++ timeout = schedule_timeout(timeout);
++ SLEEP_ON_TAIL
++
++ return timeout;
++}
++EXPORT_SYMBOL(interruptible_sleep_on_timeout);
++
++void fastcall __sched sleep_on(wait_queue_head_t *q)
++{
++ SLEEP_ON_VAR
++
++ current->state = TASK_UNINTERRUPTIBLE;
++
++ SLEEP_ON_HEAD
++ schedule();
++ SLEEP_ON_TAIL
++}
++EXPORT_SYMBOL(sleep_on);
++
++long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
++{
++ SLEEP_ON_VAR
++
++ current->state = TASK_UNINTERRUPTIBLE;
++
++ SLEEP_ON_HEAD
++ timeout = schedule_timeout(timeout);
++ SLEEP_ON_TAIL
++
++ return timeout;
++}
++
++EXPORT_SYMBOL(sleep_on_timeout);
++
++#ifdef CONFIG_RT_MUTEXES
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task
++ * @prio: prio value (kernel-internal form)
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance logic.
++ */
++void rt_mutex_setprio(struct task_struct *p, int prio)
++{
++ struct prio_array *array;
++ unsigned long flags;
++ struct rq *rq;
++ int oldprio;
++
++ BUG_ON(prio < 0 || prio > MAX_PRIO);
++
++ rq = task_rq_lock(p, &flags);
++
++ oldprio = p->prio;
++ array = p->array;
++ if (array)
++ dequeue_task(p, array);
++ p->prio = prio;
++
++ if (array) {
++ /*
++ * If changing to an RT priority then queue it
++ * in the active array!
++ */
++ if (rt_task(p))
++ array = rq->active;
++ enqueue_task(p, array);
++ /*
++ * Reschedule if we are currently running on this runqueue and
++ * our priority decreased, or if we are not currently running on
++ * this runqueue and our priority is higher than the current's
++ */
++ if (task_running(rq, p)) {
++ if (p->prio > oldprio)
++ resched_task(rq->curr);
++ } else if (TASK_PREEMPTS_CURR(p, rq))
++ resched_task(rq->curr);
++ }
++ task_rq_unlock(rq, &flags);
++}
++
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++ struct prio_array *array;
++ int old_prio, delta;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
++ return;
++ /*
++ * We have to be careful, if called from sys_setpriority(),
++ * the task might be in the middle of scheduling on another CPU.
++ */
++ rq = task_rq_lock(p, &flags);
++ /*
++ * The RT priorities are set via sched_setscheduler(), but we still
++ * allow the 'normal' nice value to be set - but as expected
++ * it wont have any effect on scheduling until the task is
++ * not SCHED_NORMAL/SCHED_BATCH:
++ */
++ if (has_rt_policy(p)) {
++ p->static_prio = NICE_TO_PRIO(nice);
++ goto out_unlock;
++ }
++ array = p->array;
++ if (array) {
++ dequeue_task(p, array);
++ dec_raw_weighted_load(rq, p);
++ }
++
++ p->static_prio = NICE_TO_PRIO(nice);
++ set_load_weight(p);
++ old_prio = p->prio;
++ p->prio = effective_prio(p);
++ delta = p->prio - old_prio;
++
++ if (array) {
++ enqueue_task(p, array);
++ inc_raw_weighted_load(rq, p);
++ /*
++ * If the task increased its priority or is running and
++ * lowered its priority, then reschedule its CPU:
++ */
++ if (delta < 0 || (delta > 0 && task_running(rq, p)))
++ resched_task(rq->curr);
++ }
++out_unlock:
++ task_rq_unlock(rq, &flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++ /* convert nice value [19,-20] to rlimit style value [1,40] */
++ int nice_rlim = 20 - nice;
++
++ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
++ capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++asmlinkage long sys_nice(int increment)
++{
++ long nice, retval;
++
++ /*
++ * Setpriority might change our priority at the same moment.
++ * We don't have to worry. Conceptually one call occurs first
++ * and we have a single winner.
++ */
++ if (increment < -40)
++ increment = -40;
++ if (increment > 40)
++ increment = 40;
++
++ nice = PRIO_TO_NICE(current->static_prio) + increment;
++ if (nice < -20)
++ nice = -20;
++ if (nice > 19)
++ nice = 19;
++
++ if (increment < 0 && !can_nice(current, nice))
++ return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
++
++ retval = security_task_setnice(current, nice);
++ if (retval)
++ return retval;
++
++ set_user_nice(current, nice);
++ return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * This is the priority value as seen by users in /proc.
++ * RT tasks are offset by -200. Normal tasks are centered
++ * around 0, value goes from -16 to +15.
++ */
++int task_prio(const struct task_struct *p)
++{
++ return p->prio - MAX_RT_PRIO;
++}
++
++/**
++ * task_nice - return the nice value of a given task.
++ * @p: the task in question.
++ */
++int task_nice(const struct task_struct *p)
++{
++ return TASK_NICE(p);
++}
++EXPORT_SYMBOL_GPL(task_nice);
++
++/**
++ * idle_cpu - is a given cpu idle currently?
++ * @cpu: the processor in question.
++ */
++int idle_cpu(int cpu)
++{
++ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
++}
++
++/**
++ * idle_task - return the idle task for a given cpu.
++ * @cpu: the processor in question.
++ */
++struct task_struct *idle_task(int cpu)
++{
++ return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++ return pid ? find_task_by_pid(pid) : current;
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void __setscheduler(struct task_struct *p, int policy, int prio)
++{
++ BUG_ON(p->array);
++
++ p->policy = policy;
++ p->rt_priority = prio;
++ p->normal_prio = normal_prio(p);
++ /* we are holding p->pi_lock already */
++ p->prio = rt_mutex_getprio(p);
++ /*
++ * SCHED_BATCH tasks are treated as perpetual CPU hogs:
++ */
++ if (policy == SCHED_BATCH)
++ p->sleep_avg = 0;
++ set_load_weight(p);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++ struct sched_param *param)
++{
++ int retval, oldprio, oldpolicy = -1;
++ struct prio_array *array;
++ unsigned long flags;
++ struct rq *rq;
++
++ /* may grab non-irq protected spin_locks */
++ BUG_ON(in_interrupt());
++recheck:
++ /* double check policy once rq lock held */
++ if (policy < 0)
++ policy = oldpolicy = p->policy;
++ else if (policy != SCHED_FIFO && policy != SCHED_RR &&
++ policy != SCHED_NORMAL && policy != SCHED_BATCH)
++ return -EINVAL;
++ /*
++ * Valid priorities for SCHED_FIFO and SCHED_RR are
++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
++ * SCHED_BATCH is 0.
++ */
++ if (param->sched_priority < 0 ||
++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
++ (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
++ return -EINVAL;
++ if (is_rt_policy(policy) != (param->sched_priority != 0))
++ return -EINVAL;
++
++ /*
++ * Allow unprivileged RT tasks to decrease priority:
++ */
++ if (!capable(CAP_SYS_NICE)) {
++ if (is_rt_policy(policy)) {
++ unsigned long rlim_rtprio;
++ unsigned long flags;
++
++ if (!lock_task_sighand(p, &flags))
++ return -ESRCH;
++ rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
++ unlock_task_sighand(p, &flags);
++
++ /* can't set/change the rt policy */
++ if (policy != p->policy && !rlim_rtprio)
++ return -EPERM;
++
++ /* can't increase priority */
++ if (param->sched_priority > p->rt_priority &&
++ param->sched_priority > rlim_rtprio)
++ return -EPERM;
++ }
++
++ /* can't change other user's priorities */
++ if ((current->euid != p->euid) &&
++ (current->euid != p->uid))
++ return -EPERM;
++ }
++
++ retval = security_task_setscheduler(p, policy, param);
++ if (retval)
++ return retval;
++ /*
++ * make sure no PI-waiters arrive (or leave) while we are
++ * changing the priority of the task:
++ */
++ spin_lock_irqsave(&p->pi_lock, flags);
++ /*
++ * To be able to change p->policy safely, the apropriate
++ * runqueue lock must be held.
++ */
++ rq = __task_rq_lock(p);
++ /* recheck policy now with rq lock held */
++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++ policy = oldpolicy = -1;
++ __task_rq_unlock(rq);
++ spin_unlock_irqrestore(&p->pi_lock, flags);
++ goto recheck;
++ }
++ array = p->array;
++ if (array)
++ deactivate_task(p, rq);
++ oldprio = p->prio;
++ __setscheduler(p, policy, param->sched_priority);
++ if (array) {
++ vx_activate_task(p);
++ __activate_task(p, rq);
++ /*
++ * Reschedule if we are currently running on this runqueue and
++ * our priority decreased, or if we are not currently running on
++ * this runqueue and our priority is higher than the current's
++ */
++ if (task_running(rq, p)) {
++ if (p->prio > oldprio)
++ resched_task(rq->curr);
++ } else if (TASK_PREEMPTS_CURR(p, rq))
++ resched_task(rq->curr);
++ }
++ __task_rq_unlock(rq);
++ spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ rt_mutex_adjust_pi(p);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(sched_setscheduler);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++ struct sched_param lparam;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++ return -EFAULT;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setscheduler(p, policy, &lparam);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ */
++asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
++ struct sched_param __user *param)
++{
++ /* negative values for policy are not valid */
++ if (policy < 0)
++ return -EINVAL;
++
++ return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ */
++asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
++{
++ return do_sched_setscheduler(pid, -1, param);
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ */
++asmlinkage long sys_sched_getscheduler(pid_t pid)
++{
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (pid < 0)
++ goto out_nounlock;
++
++ retval = -ESRCH;
++ read_lock(&tasklist_lock);
++ p = find_process_by_pid(pid);
++ if (p) {
++ retval = security_task_getscheduler(p);
++ if (!retval)
++ retval = p->policy;
++ }
++ read_unlock(&tasklist_lock);
++
++out_nounlock:
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ */
++asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
++{
++ struct sched_param lp;
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (!param || pid < 0)
++ goto out_nounlock;
++
++ read_lock(&tasklist_lock);
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ lp.sched_priority = p->rt_priority;
++ read_unlock(&tasklist_lock);
++
++ /*
++ * This one might sleep, we cannot do it with a spinlock held ...
++ */
++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++ return retval;
++
++out_unlock:
++ read_unlock(&tasklist_lock);
++ return retval;
++}
++
++long sched_setaffinity(pid_t pid, cpumask_t new_mask)
++{
++ cpumask_t cpus_allowed;
++ struct task_struct *p;
++ int retval;
++
++ mutex_lock(&sched_hotcpu_mutex);
++ read_lock(&tasklist_lock);
++
++ p = find_process_by_pid(pid);
++ if (!p) {
++ read_unlock(&tasklist_lock);
++ mutex_unlock(&sched_hotcpu_mutex);
++ return -ESRCH;
++ }
++
++ /*
++ * It is not safe to call set_cpus_allowed with the
++ * tasklist_lock held. We will bump the task_struct's
++ * usage count and then drop tasklist_lock.
++ */
++ get_task_struct(p);
++ read_unlock(&tasklist_lock);
++
++ retval = -EPERM;
++ if ((current->euid != p->euid) && (current->euid != p->uid) &&
++ !capable(CAP_SYS_NICE))
++ goto out_unlock;
++
++ retval = security_task_setscheduler(p, 0, NULL);
++ if (retval)
++ goto out_unlock;
++
++ cpus_allowed = cpuset_cpus_allowed(p);
++ cpus_and(new_mask, new_mask, cpus_allowed);
++ retval = set_cpus_allowed(p, new_mask);
++
++out_unlock:
++ put_task_struct(p);
++ mutex_unlock(&sched_hotcpu_mutex);
++ return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++ cpumask_t *new_mask)
++{
++ if (len < sizeof(cpumask_t)) {
++ memset(new_mask, 0, sizeof(cpumask_t));
++ } else if (len > sizeof(cpumask_t)) {
++ len = sizeof(cpumask_t);
++ }
++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the cpu affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new cpu mask
++ */
++asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
++ unsigned long __user *user_mask_ptr)
++{
++ cpumask_t new_mask;
++ int retval;
++
++ retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
++ if (retval)
++ return retval;
++
++ return sched_setaffinity(pid, new_mask);
++}
++
++/*
++ * Represents all cpu's present in the system
++ * In systems capable of hotplug, this map could dynamically grow
++ * as new cpu's are detected in the system via any platform specific
++ * method, such as ACPI for e.g.
++ */
++
++cpumask_t cpu_present_map __read_mostly;
++EXPORT_SYMBOL(cpu_present_map);
++
++#ifndef CONFIG_SMP
++cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
++EXPORT_SYMBOL(cpu_online_map);
++
++cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
++EXPORT_SYMBOL(cpu_possible_map);
++#endif
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++ struct task_struct *p;
++ int retval;
++
++ mutex_lock(&sched_hotcpu_mutex);
++ read_lock(&tasklist_lock);
++
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ cpus_and(*mask, p->cpus_allowed, cpu_online_map);
++
++out_unlock:
++ read_unlock(&tasklist_lock);
++ mutex_unlock(&sched_hotcpu_mutex);
++ if (retval)
++ return retval;
++
++ return 0;
++}
++
++/**
++ * sys_sched_getaffinity - get the cpu affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current cpu mask
++ */
++asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
++ unsigned long __user *user_mask_ptr)
++{
++ int ret;
++ cpumask_t mask;
++
++ if (len < sizeof(cpumask_t))
++ return -EINVAL;
++
++ ret = sched_getaffinity(pid, &mask);
++ if (ret < 0)
++ return ret;
++
++ if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
++ return -EFAULT;
++
++ return sizeof(cpumask_t);
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU by moving the calling thread
++ * to the expired array. If there are no other threads running on this
++ * CPU then this function will return.
++ */
++asmlinkage long sys_sched_yield(void)
++{
++ struct rq *rq = this_rq_lock();
++ struct prio_array *array = current->array, *target = rq->expired;
++
++ schedstat_inc(rq, yld_cnt);
++ /*
++ * We implement yielding by moving the task into the expired
++ * queue.
++ *
++ * (special rule: RT tasks will just roundrobin in the active
++ * array.)
++ */
++ if (rt_task(current))
++ target = rq->active;
++
++ if (array->nr_active == 1) {
++ schedstat_inc(rq, yld_act_empty);
++ if (!rq->expired->nr_active)
++ schedstat_inc(rq, yld_both_empty);
++ } else if (!rq->expired->nr_active)
++ schedstat_inc(rq, yld_exp_empty);
++
++ if (array != target) {
++ dequeue_task(current, array);
++ enqueue_task(current, target);
++ } else
++ /*
++ * requeue_task is cheaper so perform that if possible.
++ */
++ requeue_task(current, array);
++
++ /*
++ * Since we are going to call schedule() anyway, there's
++ * no need to preempt or enable interrupts:
++ */
++ __release(rq->lock);
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++ _raw_spin_unlock(&rq->lock);
++ preempt_enable_no_resched();
++
++ schedule();
++
++ return 0;
++}
++
++static void __cond_resched(void)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
++ __might_sleep(__FILE__, __LINE__);
++#endif
++ /*
++ * The BKS might be reacquired before we have dropped
++ * PREEMPT_ACTIVE, which could trigger a second
++ * cond_resched() call.
++ */
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++ schedule();
++ sub_preempt_count(PREEMPT_ACTIVE);
++ } while (need_resched());
++}
++
++int __sched cond_resched(void)
++{
++ if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
++ system_state == SYSTEM_RUNNING) {
++ __cond_resched();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(cond_resched);
++
++/*
++ * cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int cond_resched_lock(spinlock_t *lock)
++{
++ int ret = 0;
++
++ if (need_lockbreak(lock)) {
++ spin_unlock(lock);
++ cpu_relax();
++ ret = 1;
++ spin_lock(lock);
++ }
++ if (need_resched() && system_state == SYSTEM_RUNNING) {
++ spin_release(&lock->dep_map, 1, _THIS_IP_);
++ _raw_spin_unlock(lock);
++ preempt_enable_no_resched();
++ __cond_resched();
++ ret = 1;
++ spin_lock(lock);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(cond_resched_lock);
++
++int __sched cond_resched_softirq(void)
++{
++ BUG_ON(!in_softirq());
++
++ if (need_resched() && system_state == SYSTEM_RUNNING) {
++ local_bh_enable();
++ __cond_resched();
++ local_bh_disable();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(cond_resched_softirq);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * This is a shortcut for kernel-space yielding - it marks the
++ * thread runnable and calls sys_sched_yield().
++ */
++void __sched yield(void)
++{
++ set_current_state(TASK_RUNNING);
++ sys_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/*
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++void __sched io_schedule(void)
++{
++ struct rq *rq = &__raw_get_cpu_var(runqueues);
++
++ delayacct_blkio_start();
++ atomic_inc(&rq->nr_iowait);
++ schedule();
++ atomic_dec(&rq->nr_iowait);
++ delayacct_blkio_end();
++}
++EXPORT_SYMBOL(io_schedule);
++
++long __sched io_schedule_timeout(long timeout)
++{
++ struct rq *rq = &__raw_get_cpu_var(runqueues);
++ long ret;
++
++ delayacct_blkio_start();
++ atomic_inc(&rq->nr_iowait);
++ ret = schedule_timeout(timeout);
++ atomic_dec(&rq->nr_iowait);
++ delayacct_blkio_end();
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * this syscall returns the maximum rt_priority that can be used
++ * by a given scheduling class.
++ */
++asmlinkage long sys_sched_get_priority_max(int policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = MAX_USER_RT_PRIO-1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * this syscall returns the minimum rt_priority that can be used
++ * by a given scheduling class.
++ */
++asmlinkage long sys_sched_get_priority_min(int policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = 1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ ret = 0;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ * this syscall writes the default timeslice value of a given process
++ * into the user-space timespec buffer. A value of '0' means infinity.
++ */
++asmlinkage
++long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
++{
++ struct task_struct *p;
++ int retval = -EINVAL;
++ struct timespec t;
++
++ if (pid < 0)
++ goto out_nounlock;
++
++ retval = -ESRCH;
++ read_lock(&tasklist_lock);
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ jiffies_to_timespec(p->policy == SCHED_FIFO ?
++ 0 : task_timeslice(p), &t);
++ read_unlock(&tasklist_lock);
++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
++out_nounlock:
++ return retval;
++out_unlock:
++ read_unlock(&tasklist_lock);
++ return retval;
++}
++
++static const char stat_nam[] = "RSDTtZX";
++
++static void show_task(struct task_struct *p)
++{
++ unsigned long free = 0;
++ unsigned state;
++
++ state = p->state ? __ffs(p->state) + 1 : 0;
++ printk("%-13.13s %c", p->comm,
++ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
++#if (BITS_PER_LONG == 32)
++ if (state == TASK_RUNNING)
++ printk(" running ");
++ else
++ printk(" %08lX ", thread_saved_pc(p));
++#else
++ if (state == TASK_RUNNING)
++ printk(" running task ");
++ else
++ printk(" %016lx ", thread_saved_pc(p));
++#endif
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ {
++ unsigned long *n = end_of_stack(p);
++ while (!*n)
++ n++;
++ free = (unsigned long)n - (unsigned long)end_of_stack(p);
++ }
++#endif
++ printk("%5lu %5d %6d", free, p->pid, p->parent->pid);
++ if (!p->mm)
++ printk(" (L-TLB)\n");
++ else
++ printk(" (NOTLB)\n");
++
++ if (state != TASK_RUNNING)
++ show_stack(p, NULL);
++}
++
++void show_state_filter(unsigned long state_filter)
++{
++ struct task_struct *g, *p;
++
++#if (BITS_PER_LONG == 32)
++ printk("\n"
++ " free sibling\n");
++ printk(" task PC stack pid father child younger older\n");
++#else
++ printk("\n"
++ " free sibling\n");
++ printk(" task PC stack pid father child younger older\n");
++#endif
++ read_lock(&tasklist_lock);
++ do_each_thread(g, p) {
++ /*
++ * reset the NMI-timeout, listing all files on a slow
++ * console might take alot of time:
++ */
++ touch_nmi_watchdog();
++ if (!state_filter || (p->state & state_filter))
++ show_task(p);
++ } while_each_thread(g, p);
++
++ touch_all_softlockup_watchdogs();
++
++ read_unlock(&tasklist_lock);
++ /*
++ * Only show locks if all tasks are dumped:
++ */
++ if (state_filter == -1)
++ debug_show_all_locks();
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: cpu the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __cpuinit init_idle(struct task_struct *idle, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ idle->timestamp = sched_clock();
++ idle->sleep_avg = 0;
++ idle->array = NULL;
++ idle->prio = idle->normal_prio = MAX_PRIO;
++ idle->state = TASK_RUNNING;
++ idle->cpus_allowed = cpumask_of_cpu(cpu);
++ set_task_cpu(idle, cpu);
++
++ spin_lock_irqsave(&rq->lock, flags);
++ rq->curr = rq->idle = idle;
++#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
++ idle->oncpu = 1;
++#endif
++ spin_unlock_irqrestore(&rq->lock, flags);
++
++ /* Set the preempt count _outside_ the spinlocks! */
++#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
++ task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
++#else
++ task_thread_info(idle)->preempt_count = 0;
++#endif
++}
++
++/*
++ * In a system that switches off the HZ timer nohz_cpu_mask
++ * indicates which cpus entered this state. This is used
++ * in the rcu update to wait only for active cpus. For system
++ * which do not switch off the HZ timer nohz_cpu_mask should
++ * always be CPU_MASK_NONE.
++ */
++cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
++
++#ifdef CONFIG_SMP
++/*
++ * This is how migration works:
++ *
++ * 1) we queue a struct migration_req structure in the source CPU's
++ * runqueue and wake up that CPU's migration thread.
++ * 2) we down() the locked semaphore => thread blocks.
++ * 3) migration thread wakes up (implicitly it forces the migrated
++ * thread off the CPU)
++ * 4) it gets the migration request and checks whether the migrated
++ * task is still in the wrong runqueue.
++ * 5) if it's in the wrong runqueue then the migration thread removes
++ * it and puts it into the right queue.
++ * 6) migration thread up()s the semaphore.
++ * 7) we wake up and the migration is done.
++ */
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
++{
++ struct migration_req req;
++ unsigned long flags;
++ struct rq *rq;
++ int ret = 0;
++
++ rq = task_rq_lock(p, &flags);
++ if (!cpus_intersects(new_mask, cpu_online_map)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ p->cpus_allowed = new_mask;
++ /* Can the task run on the task's current CPU? If so, we're done */
++ if (cpu_isset(task_cpu(p), new_mask))
++ goto out;
++
++ if (migrate_task(p, any_online_cpu(new_mask), &req)) {
++ /* Need help from migration thread: drop lock and wait. */
++ task_rq_unlock(rq, &flags);
++ wake_up_process(rq->migration_thread);
++ wait_for_completion(&req.done);
++ tlb_migrate_finish(p->mm);
++ return 0;
++ }
++out:
++ task_rq_unlock(rq, &flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed);
++
++/*
++ * Move (not current) task off this cpu, onto dest cpu. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ *
++ * Returns non-zero if task was successfully migrated.
++ */
++static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
++{
++ struct rq *rq_dest, *rq_src;
++ int ret = 0;
++
++ if (unlikely(cpu_is_offline(dest_cpu)))
++ return ret;
++
++ rq_src = cpu_rq(src_cpu);
++ rq_dest = cpu_rq(dest_cpu);
++
++ double_rq_lock(rq_src, rq_dest);
++ /* Already moved. */
++ if (task_cpu(p) != src_cpu)
++ goto out;
++ /* Affinity changed (again). */
++ if (!cpu_isset(dest_cpu, p->cpus_allowed))
++ goto out;
++
++ set_task_cpu(p, dest_cpu);
++ if (p->array) {
++ /*
++ * Sync timestamp with rq_dest's before activating.
++ * The same thing could be achieved by doing this step
++ * afterwards, and pretending it was a local activate.
++ * This way is cleaner and logically correct.
++ */
++ p->timestamp = p->timestamp - rq_src->most_recent_timestamp
++ + rq_dest->most_recent_timestamp;
++ deactivate_task(p, rq_src);
++ vx_activate_task(p);
++ __activate_task(p, rq_dest);
++ if (TASK_PREEMPTS_CURR(p, rq_dest))
++ resched_task(rq_dest->curr);
++ }
++ ret = 1;
++out:
++ double_rq_unlock(rq_src, rq_dest);
++ return ret;
++}
++
++/*
++ * migration_thread - this is a highprio system thread that performs
++ * thread migration by bumping thread off CPU then 'pushing' onto
++ * another runqueue.
++ */
++static int migration_thread(void *data)
++{
++ int cpu = (long)data;
++ struct rq *rq;
++
++ rq = cpu_rq(cpu);
++ BUG_ON(rq->migration_thread != current);
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ struct migration_req *req;
++ struct list_head *head;
++
++ try_to_freeze();
++
++ spin_lock_irq(&rq->lock);
++
++ if (cpu_is_offline(cpu)) {
++ spin_unlock_irq(&rq->lock);
++ goto wait_to_die;
++ }
++
++ if (rq->active_balance) {
++ active_load_balance(rq, cpu);
++ rq->active_balance = 0;
++ }
++
++ head = &rq->migration_queue;
++
++ if (list_empty(head)) {
++ spin_unlock_irq(&rq->lock);
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ continue;
++ }
++ req = list_entry(head->next, struct migration_req, list);
++ list_del_init(head->next);
++
++ spin_unlock(&rq->lock);
++ __migrate_task(req->task, cpu, req->dest_cpu);
++ local_irq_enable();
++
++ complete(&req->done);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Figure out where task on dead CPU should go, use force if neccessary.
++ * NOTE: interrupts should be disabled by the caller
++ */
++static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
++{
++ unsigned long flags;
++ cpumask_t mask;
++ struct rq *rq;
++ int dest_cpu;
++
++restart:
++ /* On same node? */
++ mask = node_to_cpumask(cpu_to_node(dead_cpu));
++ cpus_and(mask, mask, p->cpus_allowed);
++ dest_cpu = any_online_cpu(mask);
++
++ /* On any allowed CPU? */
++ if (dest_cpu == NR_CPUS)
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++
++ /* No more Mr. Nice Guy. */
++ if (dest_cpu == NR_CPUS) {
++ rq = task_rq_lock(p, &flags);
++ cpus_setall(p->cpus_allowed);
++ dest_cpu = any_online_cpu(p->cpus_allowed);
++ task_rq_unlock(rq, &flags);
++
++ /*
++ * Don't tell them about moving exiting tasks or
++ * kernel threads (both mm NULL), since they never
++ * leave kernel.
++ */
++ if (p->mm && printk_ratelimit())
++ printk(KERN_INFO "process %d (%s) no "
++ "longer affine to cpu%d\n",
++ p->pid, p->comm, dead_cpu);
++ }
++ if (!__migrate_task(p, dead_cpu, dest_cpu))
++ goto restart;
++}
++
++/*
++ * While a dead CPU has no uninterruptible tasks queued at this point,
++ * it might still have a nonzero ->nr_uninterruptible counter, because
++ * for performance reasons the counter is not stricly tracking tasks to
++ * their home CPUs. So we just add the counter to another CPU's counter,
++ * to keep the global sum constant after CPU-down:
++ */
++static void migrate_nr_uninterruptible(struct rq *rq_src)
++{
++ struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
++ unsigned long flags;
++
++ local_irq_save(flags);
++ double_rq_lock(rq_src, rq_dest);
++ rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
++ rq_src->nr_uninterruptible = 0;
++ double_rq_unlock(rq_src, rq_dest);
++ local_irq_restore(flags);
++}
++
++/* Run through task list and migrate tasks from the dead cpu. */
++static void migrate_live_tasks(int src_cpu)
++{
++ struct task_struct *p, *t;
++
++ write_lock_irq(&tasklist_lock);
++
++ do_each_thread(t, p) {
++ if (p == current)
++ continue;
++
++ if (task_cpu(p) == src_cpu)
++ move_task_off_dead_cpu(src_cpu, p);
++ } while_each_thread(t, p);
++
++ write_unlock_irq(&tasklist_lock);
++}
++
++/* Schedules idle task to be the next runnable task on current CPU.
++ * It does so by boosting its priority to highest possible and adding it to
++ * the _front_ of the runqueue. Used by CPU offline code.
++ */
++void sched_idle_next(void)
++{
++ int this_cpu = smp_processor_id();
++ struct rq *rq = cpu_rq(this_cpu);
++ struct task_struct *p = rq->idle;
++ unsigned long flags;
++
++ /* cpu has to be offline */
++ BUG_ON(cpu_online(this_cpu));
++
++ /*
++ * Strictly not necessary since rest of the CPUs are stopped by now
++ * and interrupts disabled on the current cpu.
++ */
++ spin_lock_irqsave(&rq->lock, flags);
++
++ __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
++
++ /* Add idle task to the _front_ of its priority queue: */
++ __activate_idle_task(p, rq);
++
++ spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++/*
++ * Ensures that the idle task is using init_mm right before its cpu goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++ struct mm_struct *mm = current->active_mm;
++
++ BUG_ON(cpu_online(smp_processor_id()));
++
++ if (mm != &init_mm)
++ switch_mm(mm, &init_mm, current);
++ mmdrop(mm);
++}
++
++/* called under rq->lock with disabled interrupts */
++static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
++{
++ struct rq *rq = cpu_rq(dead_cpu);
++
++ /* Must be exiting, otherwise would be on tasklist. */
++ BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
++
++ /* Cannot have done final schedule yet: would have vanished. */
++ BUG_ON(p->state == TASK_DEAD);
++
++ get_task_struct(p);
++
++ /*
++ * Drop lock around migration; if someone else moves it,
++ * that's OK. No task can be added to this CPU, so iteration is
++ * fine.
++ * NOTE: interrupts should be left disabled --dev@
++ */
++ spin_unlock(&rq->lock);
++ move_task_off_dead_cpu(dead_cpu, p);
++ spin_lock(&rq->lock);
++
++ put_task_struct(p);
++}
++
++/* release_task() removes task from tasklist, so we won't find dead tasks. */
++static void migrate_dead_tasks(unsigned int dead_cpu)
++{
++ struct rq *rq = cpu_rq(dead_cpu);
++ unsigned int arr, i;
++
++ for (arr = 0; arr < 2; arr++) {
++ for (i = 0; i < MAX_PRIO; i++) {
++ struct list_head *list = &rq->arrays[arr].queue[i];
++
++ while (!list_empty(list))
++ migrate_dead(dead_cpu, list_entry(list->next,
++ struct task_struct, run_list));
++ }
++ }
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * migration_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int __cpuinit
++migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
++{
++ struct task_struct *p;
++ int cpu = (long)hcpu;
++ unsigned long flags;
++ struct rq *rq;
++
++ switch (action) {
++ case CPU_LOCK_ACQUIRE:
++ mutex_lock(&sched_hotcpu_mutex);
++ break;
++
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio: stop_machine expects to yield to it. */
++ rq = task_rq_lock(p, &flags);
++ __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
++ task_rq_unlock(rq, &flags);
++ cpu_rq(cpu)->migration_thread = p;
++ break;
++
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(cpu_rq(cpu)->migration_thread);
++ break;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ if (!cpu_rq(cpu)->migration_thread)
++ break;
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(cpu_rq(cpu)->migration_thread,
++ any_online_cpu(cpu_online_map));
++ kthread_stop(cpu_rq(cpu)->migration_thread);
++ cpu_rq(cpu)->migration_thread = NULL;
++ break;
++
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ migrate_live_tasks(cpu);
++ rq = cpu_rq(cpu);
++ kthread_stop(rq->migration_thread);
++ rq->migration_thread = NULL;
++ /* Idle task back to normal (off runqueue, low prio) */
++ rq = task_rq_lock(rq->idle, &flags);
++ deactivate_task(rq->idle, rq);
++ rq->idle->static_prio = MAX_PRIO;
++ __setscheduler(rq->idle, SCHED_NORMAL, 0);
++ migrate_dead_tasks(cpu);
++ task_rq_unlock(rq, &flags);
++ migrate_nr_uninterruptible(rq);
++ BUG_ON(rq->nr_running != 0);
++
++ /* No need to migrate the tasks: it was best-effort if
++ * they didn't take sched_hotcpu_mutex. Just wake up
++ * the requestors. */
++ spin_lock_irq(&rq->lock);
++ while (!list_empty(&rq->migration_queue)) {
++ struct migration_req *req;
++
++ req = list_entry(rq->migration_queue.next,
++ struct migration_req, list);
++ list_del_init(&req->list);
++ complete(&req->done);
++ }
++ spin_unlock_irq(&rq->lock);
++ break;
++#endif
++ case CPU_LOCK_RELEASE:
++ mutex_unlock(&sched_hotcpu_mutex);
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __cpuinitdata migration_notifier = {
++ .notifier_call = migration_call,
++ .priority = 10
++};
++
++int __init migration_init(void)
++{
++ void *cpu = (void *)(long)smp_processor_id();
++ int err;
++
++ /* Start one for the boot CPU: */
++ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
++ BUG_ON(err == NOTIFY_BAD);
++ migration_call(&migration_notifier, CPU_ONLINE, cpu);
++ register_cpu_notifier(&migration_notifier);
++
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++
++/* Number of possible processor ids */
++int nr_cpu_ids __read_mostly = NR_CPUS;
++EXPORT_SYMBOL(nr_cpu_ids);
++
++#undef SCHED_DOMAIN_DEBUG
++#ifdef SCHED_DOMAIN_DEBUG
++static void sched_domain_debug(struct sched_domain *sd, int cpu)
++{
++ int level = 0;
++
++ if (!sd) {
++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
++ return;
++ }
++
++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
++
++ do {
++ int i;
++ char str[NR_CPUS];
++ struct sched_group *group = sd->groups;
++ cpumask_t groupmask;
++
++ cpumask_scnprintf(str, NR_CPUS, sd->span);
++ cpus_clear(groupmask);
++
++ printk(KERN_DEBUG);
++ for (i = 0; i < level + 1; i++)
++ printk(" ");
++ printk("domain %d: ", level);
++
++ if (!(sd->flags & SD_LOAD_BALANCE)) {
++ printk("does not load-balance\n");
++ if (sd->parent)
++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
++ " has parent");
++ break;
++ }
++
++ printk("span %s\n", str);
++
++ if (!cpu_isset(cpu, sd->span))
++ printk(KERN_ERR "ERROR: domain->span does not contain "
++ "CPU%d\n", cpu);
++ if (!cpu_isset(cpu, group->cpumask))
++ printk(KERN_ERR "ERROR: domain->groups does not contain"
++ " CPU%d\n", cpu);
++
++ printk(KERN_DEBUG);
++ for (i = 0; i < level + 2; i++)
++ printk(" ");
++ printk("groups:");
++ do {
++ if (!group) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: group is NULL\n");
++ break;
++ }
++
++ if (!group->__cpu_power) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: domain->cpu_power not "
++ "set\n");
++ }
++
++ if (!cpus_weight(group->cpumask)) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: empty group\n");
++ }
++
++ if (cpus_intersects(groupmask, group->cpumask)) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: repeated CPUs\n");
++ }
++
++ cpus_or(groupmask, groupmask, group->cpumask);
++
++ cpumask_scnprintf(str, NR_CPUS, group->cpumask);
++ printk(" %s", str);
++
++ group = group->next;
++ } while (group != sd->groups);
++ printk("\n");
++
++ if (!cpus_equal(sd->span, groupmask))
++ printk(KERN_ERR "ERROR: groups don't span "
++ "domain->span\n");
++
++ level++;
++ sd = sd->parent;
++ if (!sd)
++ continue;
++
++ if (!cpus_subset(groupmask, sd->span))
++ printk(KERN_ERR "ERROR: parent span is not a superset "
++ "of domain->span\n");
++
++ } while (sd);
++}
++#else
++# define sched_domain_debug(sd, cpu) do { } while (0)
++#endif
++
++static int sd_degenerate(struct sched_domain *sd)
++{
++ if (cpus_weight(sd->span) == 1)
++ return 1;
++
++ /* Following flags need at least 2 groups */
++ if (sd->flags & (SD_LOAD_BALANCE |
++ SD_BALANCE_NEWIDLE |
++ SD_BALANCE_FORK |
++ SD_BALANCE_EXEC |
++ SD_SHARE_CPUPOWER |
++ SD_SHARE_PKG_RESOURCES)) {
++ if (sd->groups != sd->groups->next)
++ return 0;
++ }
++
++ /* Following flags don't use groups */
++ if (sd->flags & (SD_WAKE_IDLE |
++ SD_WAKE_AFFINE |
++ SD_WAKE_BALANCE))
++ return 0;
++
++ return 1;
++}
++
++static int
++sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
++{
++ unsigned long cflags = sd->flags, pflags = parent->flags;
++
++ if (sd_degenerate(parent))
++ return 1;
++
++ if (!cpus_equal(sd->span, parent->span))
++ return 0;
++
++ /* Does parent contain flags not in child? */
++ /* WAKE_BALANCE is a subset of WAKE_AFFINE */
++ if (cflags & SD_WAKE_AFFINE)
++ pflags &= ~SD_WAKE_BALANCE;
++ /* Flags needing groups don't count if only 1 group in parent */
++ if (parent->groups == parent->groups->next) {
++ pflags &= ~(SD_LOAD_BALANCE |
++ SD_BALANCE_NEWIDLE |
++ SD_BALANCE_FORK |
++ SD_BALANCE_EXEC |
++ SD_SHARE_CPUPOWER |
++ SD_SHARE_PKG_RESOURCES);
++ }
++ if (~cflags & pflags)
++ return 0;
++
++ return 1;
++}
++
++/*
++ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
++ * hold the hotplug lock.
++ */
++static void cpu_attach_domain(struct sched_domain *sd, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ struct sched_domain *tmp;
++
++ /* Remove the sched domains which do not contribute to scheduling. */
++ for (tmp = sd; tmp; tmp = tmp->parent) {
++ struct sched_domain *parent = tmp->parent;
++ if (!parent)
++ break;
++ if (sd_parent_degenerate(tmp, parent)) {
++ tmp->parent = parent->parent;
++ if (parent->parent)
++ parent->parent->child = tmp;
++ }
++ }
++
++ if (sd && sd_degenerate(sd)) {
++ sd = sd->parent;
++ if (sd)
++ sd->child = NULL;
++ }
++
++ sched_domain_debug(sd, cpu);
++
++ rcu_assign_pointer(rq->sd, sd);
++}
++
++/* cpus with isolated domains */
++static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
++
++/* Setup the mask of cpus configured for isolated domains */
++static int __init isolated_cpu_setup(char *str)
++{
++ int ints[NR_CPUS], i;
++
++ str = get_options(str, ARRAY_SIZE(ints), ints);
++ cpus_clear(cpu_isolated_map);
++ for (i = 1; i <= ints[0]; i++)
++ if (ints[i] < NR_CPUS)
++ cpu_set(ints[i], cpu_isolated_map);
++ return 1;
++}
++
++__setup ("isolcpus=", isolated_cpu_setup);
++
++/*
++ * init_sched_build_groups takes the cpumask we wish to span, and a pointer
++ * to a function which identifies what group(along with sched group) a CPU
++ * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
++ * (due to the fact that we keep track of groups covered with a cpumask_t).
++ *
++ * init_sched_build_groups will build a circular linked list of the groups
++ * covered by the given span, and will set each group's ->cpumask correctly,
++ * and ->cpu_power to 0.
++ */
++static void
++init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
++ int (*group_fn)(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg))
++{
++ struct sched_group *first = NULL, *last = NULL;
++ cpumask_t covered = CPU_MASK_NONE;
++ int i;
++
++ for_each_cpu_mask(i, span) {
++ struct sched_group *sg;
++ int group = group_fn(i, cpu_map, &sg);
++ int j;
++
++ if (cpu_isset(i, covered))
++ continue;
++
++ sg->cpumask = CPU_MASK_NONE;
++ sg->__cpu_power = 0;
++
++ for_each_cpu_mask(j, span) {
++ if (group_fn(j, cpu_map, NULL) != group)
++ continue;
++
++ cpu_set(j, covered);
++ cpu_set(j, sg->cpumask);
++ }
++ if (!first)
++ first = sg;
++ if (last)
++ last->next = sg;
++ last = sg;
++ }
++ last->next = first;
++}
++
++#define SD_NODES_PER_DOMAIN 16
++
++/*
++ * Self-tuning task migration cost measurement between source and target CPUs.
++ *
++ * This is done by measuring the cost of manipulating buffers of varying
++ * sizes. For a given buffer-size here are the steps that are taken:
++ *
++ * 1) the source CPU reads+dirties a shared buffer
++ * 2) the target CPU reads+dirties the same shared buffer
++ *
++ * We measure how long they take, in the following 4 scenarios:
++ *
++ * - source: CPU1, target: CPU2 | cost1
++ * - source: CPU2, target: CPU1 | cost2
++ * - source: CPU1, target: CPU1 | cost3
++ * - source: CPU2, target: CPU2 | cost4
++ *
++ * We then calculate the cost3+cost4-cost1-cost2 difference - this is
++ * the cost of migration.
++ *
++ * We then start off from a small buffer-size and iterate up to larger
++ * buffer sizes, in 5% steps - measuring each buffer-size separately, and
++ * doing a maximum search for the cost. (The maximum cost for a migration
++ * normally occurs when the working set size is around the effective cache
++ * size.)
++ */
++#define SEARCH_SCOPE 2
++#define MIN_CACHE_SIZE (64*1024U)
++#define DEFAULT_CACHE_SIZE (5*1024*1024U)
++#define ITERATIONS 1
++#define SIZE_THRESH 130
++#define COST_THRESH 130
++
++/*
++ * The migration cost is a function of 'domain distance'. Domain
++ * distance is the number of steps a CPU has to iterate down its
++ * domain tree to share a domain with the other CPU. The farther
++ * two CPUs are from each other, the larger the distance gets.
++ *
++ * Note that we use the distance only to cache measurement results,
++ * the distance value is not used numerically otherwise. When two
++ * CPUs have the same distance it is assumed that the migration
++ * cost is the same. (this is a simplification but quite practical)
++ */
++#define MAX_DOMAIN_DISTANCE 32
++
++static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
++ { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
++/*
++ * Architectures may override the migration cost and thus avoid
++ * boot-time calibration. Unit is nanoseconds. Mostly useful for
++ * virtualized hardware:
++ */
++#ifdef CONFIG_DEFAULT_MIGRATION_COST
++ CONFIG_DEFAULT_MIGRATION_COST
++#else
++ -1LL
++#endif
++};
++
++/*
++ * Allow override of migration cost - in units of microseconds.
++ * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
++ * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
++ */
++static int __init migration_cost_setup(char *str)
++{
++ int ints[MAX_DOMAIN_DISTANCE+1], i;
++
++ str = get_options(str, ARRAY_SIZE(ints), ints);
++
++ printk("#ints: %d\n", ints[0]);
++ for (i = 1; i <= ints[0]; i++) {
++ migration_cost[i-1] = (unsigned long long)ints[i]*1000;
++ printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
++ }
++ return 1;
++}
++
++__setup ("migration_cost=", migration_cost_setup);
++
++/*
++ * Global multiplier (divisor) for migration-cutoff values,
++ * in percentiles. E.g. use a value of 150 to get 1.5 times
++ * longer cache-hot cutoff times.
++ *
++ * (We scale it from 100 to 128 to long long handling easier.)
++ */
++
++#define MIGRATION_FACTOR_SCALE 128
++
++static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
++
++static int __init setup_migration_factor(char *str)
++{
++ get_option(&str, &migration_factor);
++ migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
++ return 1;
++}
++
++__setup("migration_factor=", setup_migration_factor);
++
++/*
++ * Estimated distance of two CPUs, measured via the number of domains
++ * we have to pass for the two CPUs to be in the same span:
++ */
++static unsigned long domain_distance(int cpu1, int cpu2)
++{
++ unsigned long distance = 0;
++ struct sched_domain *sd;
++
++ for_each_domain(cpu1, sd) {
++ WARN_ON(!cpu_isset(cpu1, sd->span));
++ if (cpu_isset(cpu2, sd->span))
++ return distance;
++ distance++;
++ }
++ if (distance >= MAX_DOMAIN_DISTANCE) {
++ WARN_ON(1);
++ distance = MAX_DOMAIN_DISTANCE-1;
++ }
++
++ return distance;
++}
++
++static unsigned int migration_debug;
++
++static int __init setup_migration_debug(char *str)
++{
++ get_option(&str, &migration_debug);
++ return 1;
++}
++
++__setup("migration_debug=", setup_migration_debug);
++
++/*
++ * Maximum cache-size that the scheduler should try to measure.
++ * Architectures with larger caches should tune this up during
++ * bootup. Gets used in the domain-setup code (i.e. during SMP
++ * bootup).
++ */
++unsigned int max_cache_size;
++
++static int __init setup_max_cache_size(char *str)
++{
++ get_option(&str, &max_cache_size);
++ return 1;
++}
++
++__setup("max_cache_size=", setup_max_cache_size);
++
++/*
++ * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
++ * is the operation that is timed, so we try to generate unpredictable
++ * cachemisses that still end up filling the L2 cache:
++ */
++static void touch_cache(void *__cache, unsigned long __size)
++{
++ unsigned long size = __size / sizeof(long);
++ unsigned long chunk1 = size / 3;
++ unsigned long chunk2 = 2 * size / 3;
++ unsigned long *cache = __cache;
++ int i;
++
++ for (i = 0; i < size/6; i += 8) {
++ switch (i % 6) {
++ case 0: cache[i]++;
++ case 1: cache[size-1-i]++;
++ case 2: cache[chunk1-i]++;
++ case 3: cache[chunk1+i]++;
++ case 4: cache[chunk2-i]++;
++ case 5: cache[chunk2+i]++;
++ }
++ }
++}
++
++/*
++ * Measure the cache-cost of one task migration. Returns in units of nsec.
++ */
++static unsigned long long
++measure_one(void *cache, unsigned long size, int source, int target)
++{
++ cpumask_t mask, saved_mask;
++ unsigned long long t0, t1, t2, t3, cost;
++
++ saved_mask = current->cpus_allowed;
++
++ /*
++ * Flush source caches to RAM and invalidate them:
++ */
++ sched_cacheflush();
++
++ /*
++ * Migrate to the source CPU:
++ */
++ mask = cpumask_of_cpu(source);
++ set_cpus_allowed(current, mask);
++ WARN_ON(smp_processor_id() != source);
++
++ /*
++ * Dirty the working set:
++ */
++ t0 = sched_clock();
++ touch_cache(cache, size);
++ t1 = sched_clock();
++
++ /*
++ * Migrate to the target CPU, dirty the L2 cache and access
++ * the shared buffer. (which represents the working set
++ * of a migrated task.)
++ */
++ mask = cpumask_of_cpu(target);
++ set_cpus_allowed(current, mask);
++ WARN_ON(smp_processor_id() != target);
++
++ t2 = sched_clock();
++ touch_cache(cache, size);
++ t3 = sched_clock();
++
++ cost = t1-t0 + t3-t2;
++
++ if (migration_debug >= 2)
++ printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
++ source, target, t1-t0, t1-t0, t3-t2, cost);
++ /*
++ * Flush target caches to RAM and invalidate them:
++ */
++ sched_cacheflush();
++
++ set_cpus_allowed(current, saved_mask);
++
++ return cost;
++}
++
++/*
++ * Measure a series of task migrations and return the average
++ * result. Since this code runs early during bootup the system
++ * is 'undisturbed' and the average latency makes sense.
++ *
++ * The algorithm in essence auto-detects the relevant cache-size,
++ * so it will properly detect different cachesizes for different
++ * cache-hierarchies, depending on how the CPUs are connected.
++ *
++ * Architectures can prime the upper limit of the search range via
++ * max_cache_size, otherwise the search range defaults to 20MB...64K.
++ */
++static unsigned long long
++measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
++{
++ unsigned long long cost1, cost2;
++ int i;
++
++ /*
++ * Measure the migration cost of 'size' bytes, over an
++ * average of 10 runs:
++ *
++ * (We perturb the cache size by a small (0..4k)
++ * value to compensate size/alignment related artifacts.
++ * We also subtract the cost of the operation done on
++ * the same CPU.)
++ */
++ cost1 = 0;
++
++ /*
++ * dry run, to make sure we start off cache-cold on cpu1,
++ * and to get any vmalloc pagefaults in advance:
++ */
++ measure_one(cache, size, cpu1, cpu2);
++ for (i = 0; i < ITERATIONS; i++)
++ cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2);
++
++ measure_one(cache, size, cpu2, cpu1);
++ for (i = 0; i < ITERATIONS; i++)
++ cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1);
++
++ /*
++ * (We measure the non-migrating [cached] cost on both
++ * cpu1 and cpu2, to handle CPUs with different speeds)
++ */
++ cost2 = 0;
++
++ measure_one(cache, size, cpu1, cpu1);
++ for (i = 0; i < ITERATIONS; i++)
++ cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1);
++
++ measure_one(cache, size, cpu2, cpu2);
++ for (i = 0; i < ITERATIONS; i++)
++ cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2);
++
++ /*
++ * Get the per-iteration migration cost:
++ */
++ do_div(cost1, 2 * ITERATIONS);
++ do_div(cost2, 2 * ITERATIONS);
++
++ return cost1 - cost2;
++}
++
++static unsigned long long measure_migration_cost(int cpu1, int cpu2)
++{
++ unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
++ unsigned int max_size, size, size_found = 0;
++ long long cost = 0, prev_cost;
++ void *cache;
++
++ /*
++ * Search from max_cache_size*5 down to 64K - the real relevant
++ * cachesize has to lie somewhere inbetween.
++ */
++ if (max_cache_size) {
++ max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
++ size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
++ } else {
++ /*
++ * Since we have no estimation about the relevant
++ * search range
++ */
++ max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
++ size = MIN_CACHE_SIZE;
++ }
++
++ if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
++ printk("cpu %d and %d not both online!\n", cpu1, cpu2);
++ return 0;
++ }
++
++ /*
++ * Allocate the working set:
++ */
++ cache = vmalloc(max_size);
++ if (!cache) {
++ printk("could not vmalloc %d bytes for cache!\n", 2 * max_size);
++ return 1000000; /* return 1 msec on very small boxen */
++ }
++
++ while (size <= max_size) {
++ prev_cost = cost;
++ cost = measure_cost(cpu1, cpu2, cache, size);
++
++ /*
++ * Update the max:
++ */
++ if (cost > 0) {
++ if (max_cost < cost) {
++ max_cost = cost;
++ size_found = size;
++ }
++ }
++ /*
++ * Calculate average fluctuation, we use this to prevent
++ * noise from triggering an early break out of the loop:
++ */
++ fluct = abs(cost - prev_cost);
++ avg_fluct = (avg_fluct + fluct)/2;
++
++ if (migration_debug)
++ printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): "
++ "(%8Ld %8Ld)\n",
++ cpu1, cpu2, size,
++ (long)cost / 1000000,
++ ((long)cost / 100000) % 10,
++ (long)max_cost / 1000000,
++ ((long)max_cost / 100000) % 10,
++ domain_distance(cpu1, cpu2),
++ cost, avg_fluct);
++
++ /*
++ * If we iterated at least 20% past the previous maximum,
++ * and the cost has dropped by more than 20% already,
++ * (taking fluctuations into account) then we assume to
++ * have found the maximum and break out of the loop early:
++ */
++ if (size_found && (size*100 > size_found*SIZE_THRESH))
++ if (cost+avg_fluct <= 0 ||
++ max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
++
++ if (migration_debug)
++ printk("-> found max.\n");
++ break;
++ }
++ /*
++ * Increase the cachesize in 10% steps:
++ */
++ size = size * 10 / 9;
++ }
++
++ if (migration_debug)
++ printk("[%d][%d] working set size found: %d, cost: %Ld\n",
++ cpu1, cpu2, size_found, max_cost);
++
++ vfree(cache);
++
++ /*
++ * A task is considered 'cache cold' if at least 2 times
++ * the worst-case cost of migration has passed.
++ *
++ * (this limit is only listened to if the load-balancing
++ * situation is 'nice' - if there is a large imbalance we
++ * ignore it for the sake of CPU utilization and
++ * processing fairness.)
++ */
++ return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
++}
++
++static void calibrate_migration_costs(const cpumask_t *cpu_map)
++{
++ int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
++ unsigned long j0, j1, distance, max_distance = 0;
++ struct sched_domain *sd;
++
++ j0 = jiffies;
++
++ /*
++ * First pass - calculate the cacheflush times:
++ */
++ for_each_cpu_mask(cpu1, *cpu_map) {
++ for_each_cpu_mask(cpu2, *cpu_map) {
++ if (cpu1 == cpu2)
++ continue;
++ distance = domain_distance(cpu1, cpu2);
++ max_distance = max(max_distance, distance);
++ /*
++ * No result cached yet?
++ */
++ if (migration_cost[distance] == -1LL)
++ migration_cost[distance] =
++ measure_migration_cost(cpu1, cpu2);
++ }
++ }
++ /*
++ * Second pass - update the sched domain hierarchy with
++ * the new cache-hot-time estimations:
++ */
++ for_each_cpu_mask(cpu, *cpu_map) {
++ distance = 0;
++ for_each_domain(cpu, sd) {
++ sd->cache_hot_time = migration_cost[distance];
++ distance++;
++ }
++ }
++ /*
++ * Print the matrix:
++ */
++ if (migration_debug)
++ printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
++ max_cache_size,
++#ifdef CONFIG_X86
++ cpu_khz/1000
++#else
++ -1
++#endif
++ );
++ if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) {
++ printk("migration_cost=");
++ for (distance = 0; distance <= max_distance; distance++) {
++ if (distance)
++ printk(",");
++ printk("%ld", (long)migration_cost[distance] / 1000);
++ }
++ printk("\n");
++ }
++ j1 = jiffies;
++ if (migration_debug)
++ printk("migration: %ld seconds\n", (j1-j0) / HZ);
++
++ /*
++ * Move back to the original CPU. NUMA-Q gets confused
++ * if we migrate to another quad during bootup.
++ */
++ if (raw_smp_processor_id() != orig_cpu) {
++ cpumask_t mask = cpumask_of_cpu(orig_cpu),
++ saved_mask = current->cpus_allowed;
++
++ set_cpus_allowed(current, mask);
++ set_cpus_allowed(current, saved_mask);
++ }
++}
++
++#ifdef CONFIG_NUMA
++
++/**
++ * find_next_best_node - find the next node to include in a sched_domain
++ * @node: node whose sched_domain we're building
++ * @used_nodes: nodes already in the sched_domain
++ *
++ * Find the next node to include in a given scheduling domain. Simply
++ * finds the closest node not already in the @used_nodes map.
++ *
++ * Should use nodemask_t.
++ */
++static int find_next_best_node(int node, unsigned long *used_nodes)
++{
++ int i, n, val, min_val, best_node = 0;
++
++ min_val = INT_MAX;
++
++ for (i = 0; i < MAX_NUMNODES; i++) {
++ /* Start at @node */
++ n = (node + i) % MAX_NUMNODES;
++
++ if (!nr_cpus_node(n))
++ continue;
++
++ /* Skip already used nodes */
++ if (test_bit(n, used_nodes))
++ continue;
++
++ /* Simple min distance search */
++ val = node_distance(node, n);
++
++ if (val < min_val) {
++ min_val = val;
++ best_node = n;
++ }
++ }
++
++ set_bit(best_node, used_nodes);
++ return best_node;
++}
++
++/**
++ * sched_domain_node_span - get a cpumask for a node's sched_domain
++ * @node: node whose cpumask we're constructing
++ * @size: number of nodes to include in this span
++ *
++ * Given a node, construct a good cpumask for its sched_domain to span. It
++ * should be one that prevents unnecessary balancing, but also spreads tasks
++ * out optimally.
++ */
++static cpumask_t sched_domain_node_span(int node)
++{
++ DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
++ cpumask_t span, nodemask;
++ int i;
++
++ cpus_clear(span);
++ bitmap_zero(used_nodes, MAX_NUMNODES);
++
++ nodemask = node_to_cpumask(node);
++ cpus_or(span, span, nodemask);
++ set_bit(node, used_nodes);
++
++ for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
++ int next_node = find_next_best_node(node, used_nodes);
++
++ nodemask = node_to_cpumask(next_node);
++ cpus_or(span, span, nodemask);
++ }
++
++ return span;
++}
++#endif
++
++int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
++
++/*
++ * SMT sched-domains:
++ */
++#ifdef CONFIG_SCHED_SMT
++static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
++
++static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg)
++{
++ if (sg)
++ *sg = &per_cpu(sched_group_cpus, cpu);
++ return cpu;
++}
++#endif
++
++/*
++ * multi-core sched-domains:
++ */
++#ifdef CONFIG_SCHED_MC
++static DEFINE_PER_CPU(struct sched_domain, core_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_core);
++#endif
++
++#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
++static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg)
++{
++ int group;
++ cpumask_t mask = cpu_sibling_map[cpu];
++ cpus_and(mask, mask, *cpu_map);
++ group = first_cpu(mask);
++ if (sg)
++ *sg = &per_cpu(sched_group_core, group);
++ return group;
++}
++#elif defined(CONFIG_SCHED_MC)
++static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg)
++{
++ if (sg)
++ *sg = &per_cpu(sched_group_core, cpu);
++ return cpu;
++}
++#endif
++
++static DEFINE_PER_CPU(struct sched_domain, phys_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
++
++static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg)
++{
++ int group;
++#ifdef CONFIG_SCHED_MC
++ cpumask_t mask = cpu_coregroup_map(cpu);
++ cpus_and(mask, mask, *cpu_map);
++ group = first_cpu(mask);
++#elif defined(CONFIG_SCHED_SMT)
++ cpumask_t mask = cpu_sibling_map[cpu];
++ cpus_and(mask, mask, *cpu_map);
++ group = first_cpu(mask);
++#else
++ group = cpu;
++#endif
++ if (sg)
++ *sg = &per_cpu(sched_group_phys, group);
++ return group;
++}
++
++#ifdef CONFIG_NUMA
++/*
++ * The init_sched_build_groups can't handle what we want to do with node
++ * groups, so roll our own. Now each node has its own list of groups which
++ * gets dynamically allocated.
++ */
++static DEFINE_PER_CPU(struct sched_domain, node_domains);
++static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
++
++static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
++static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
++
++static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
++ struct sched_group **sg)
++{
++ cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
++ int group;
++
++ cpus_and(nodemask, nodemask, *cpu_map);
++ group = first_cpu(nodemask);
++
++ if (sg)
++ *sg = &per_cpu(sched_group_allnodes, group);
++ return group;
++}
++
++static void init_numa_sched_groups_power(struct sched_group *group_head)
++{
++ struct sched_group *sg = group_head;
++ int j;
++
++ if (!sg)
++ return;
++next_sg:
++ for_each_cpu_mask(j, sg->cpumask) {
++ struct sched_domain *sd;
++
++ sd = &per_cpu(phys_domains, j);
++ if (j != first_cpu(sd->groups->cpumask)) {
++ /*
++ * Only add "power" once for each
++ * physical package.
++ */
++ continue;
++ }
++
++ sg_inc_cpu_power(sg, sd->groups->__cpu_power);
++ }
++ sg = sg->next;
++ if (sg != group_head)
++ goto next_sg;
++}
++#endif
++
++#ifdef CONFIG_NUMA
++/* Free memory allocated for various sched_group structures */
++static void free_sched_groups(const cpumask_t *cpu_map)
++{
++ int cpu, i;
++
++ for_each_cpu_mask(cpu, *cpu_map) {
++ struct sched_group **sched_group_nodes
++ = sched_group_nodes_bycpu[cpu];
++
++ if (!sched_group_nodes)
++ continue;
++
++ for (i = 0; i < MAX_NUMNODES; i++) {
++ cpumask_t nodemask = node_to_cpumask(i);
++ struct sched_group *oldsg, *sg = sched_group_nodes[i];
++
++ cpus_and(nodemask, nodemask, *cpu_map);
++ if (cpus_empty(nodemask))
++ continue;
++
++ if (sg == NULL)
++ continue;
++ sg = sg->next;
++next_sg:
++ oldsg = sg;
++ sg = sg->next;
++ kfree(oldsg);
++ if (oldsg != sched_group_nodes[i])
++ goto next_sg;
++ }
++ kfree(sched_group_nodes);
++ sched_group_nodes_bycpu[cpu] = NULL;
++ }
++}
++#else
++static void free_sched_groups(const cpumask_t *cpu_map)
++{
++}
++#endif
++
++/*
++ * Initialize sched groups cpu_power.
++ *
++ * cpu_power indicates the capacity of sched group, which is used while
++ * distributing the load between different sched groups in a sched domain.
++ * Typically cpu_power for all the groups in a sched domain will be same unless
++ * there are asymmetries in the topology. If there are asymmetries, group
++ * having more cpu_power will pickup more load compared to the group having
++ * less cpu_power.
++ *
++ * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
++ * the maximum number of tasks a group can handle in the presence of other idle
++ * or lightly loaded groups in the same sched domain.
++ */
++static void init_sched_groups_power(int cpu, struct sched_domain *sd)
++{
++ struct sched_domain *child;
++ struct sched_group *group;
++
++ WARN_ON(!sd || !sd->groups);
++
++ if (cpu != first_cpu(sd->groups->cpumask))
++ return;
++
++ child = sd->child;
++
++ sd->groups->__cpu_power = 0;
++
++ /*
++ * For perf policy, if the groups in child domain share resources
++ * (for example cores sharing some portions of the cache hierarchy
++ * or SMT), then set this domain groups cpu_power such that each group
++ * can handle only one task, when there are other idle groups in the
++ * same sched domain.
++ */
++ if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
++ (child->flags &
++ (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
++ sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
++ return;
++ }
++
++ /*
++ * add cpu_power of each child group to this groups cpu_power
++ */
++ group = child->groups;
++ do {
++ sg_inc_cpu_power(sd->groups, group->__cpu_power);
++ group = group->next;
++ } while (group != child->groups);
++}
++
++/*
++ * Build sched domains for a given set of cpus and attach the sched domains
++ * to the individual cpus
++ */
++static int build_sched_domains(const cpumask_t *cpu_map)
++{
++ int i;
++ struct sched_domain *sd;
++#ifdef CONFIG_NUMA
++ struct sched_group **sched_group_nodes = NULL;
++ int sd_allnodes = 0;
++
++ /*
++ * Allocate the per-node list of sched groups
++ */
++ sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
++ GFP_KERNEL);
++ if (!sched_group_nodes) {
++ printk(KERN_WARNING "Can not alloc sched group node list\n");
++ return -ENOMEM;
++ }
++ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
++#endif
++
++ /*
++ * Set up domains for cpus specified by the cpu_map.
++ */
++ for_each_cpu_mask(i, *cpu_map) {
++ struct sched_domain *sd = NULL, *p;
++ cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
++
++ cpus_and(nodemask, nodemask, *cpu_map);
++
++#ifdef CONFIG_NUMA
++ if (cpus_weight(*cpu_map)
++ > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
++ sd = &per_cpu(allnodes_domains, i);
++ *sd = SD_ALLNODES_INIT;
++ sd->span = *cpu_map;
++ cpu_to_allnodes_group(i, cpu_map, &sd->groups);
++ p = sd;
++ sd_allnodes = 1;
++ } else
++ p = NULL;
++
++ sd = &per_cpu(node_domains, i);
++ *sd = SD_NODE_INIT;
++ sd->span = sched_domain_node_span(cpu_to_node(i));
++ sd->parent = p;
++ if (p)
++ p->child = sd;
++ cpus_and(sd->span, sd->span, *cpu_map);
++#endif
++
++ p = sd;
++ sd = &per_cpu(phys_domains, i);
++ *sd = SD_CPU_INIT;
++ sd->span = nodemask;
++ sd->parent = p;
++ if (p)
++ p->child = sd;
++ cpu_to_phys_group(i, cpu_map, &sd->groups);
++
++#ifdef CONFIG_SCHED_MC
++ p = sd;
++ sd = &per_cpu(core_domains, i);
++ *sd = SD_MC_INIT;
++ sd->span = cpu_coregroup_map(i);
++ cpus_and(sd->span, sd->span, *cpu_map);
++ sd->parent = p;
++ p->child = sd;
++ cpu_to_core_group(i, cpu_map, &sd->groups);
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++ p = sd;
++ sd = &per_cpu(cpu_domains, i);
++ *sd = SD_SIBLING_INIT;
++ sd->span = cpu_sibling_map[i];
++ cpus_and(sd->span, sd->span, *cpu_map);
++ sd->parent = p;
++ p->child = sd;
++ cpu_to_cpu_group(i, cpu_map, &sd->groups);
++#endif
++ }
++
++#ifdef CONFIG_SCHED_SMT
++ /* Set up CPU (sibling) groups */
++ for_each_cpu_mask(i, *cpu_map) {
++ cpumask_t this_sibling_map = cpu_sibling_map[i];
++ cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
++ if (i != first_cpu(this_sibling_map))
++ continue;
++
++ init_sched_build_groups(this_sibling_map, cpu_map, &cpu_to_cpu_group);
++ }
++#endif
++
++#ifdef CONFIG_SCHED_MC
++ /* Set up multi-core groups */
++ for_each_cpu_mask(i, *cpu_map) {
++ cpumask_t this_core_map = cpu_coregroup_map(i);
++ cpus_and(this_core_map, this_core_map, *cpu_map);
++ if (i != first_cpu(this_core_map))
++ continue;
++ init_sched_build_groups(this_core_map, cpu_map, &cpu_to_core_group);
++ }
++#endif
++
++
++ /* Set up physical groups */
++ for (i = 0; i < MAX_NUMNODES; i++) {
++ cpumask_t nodemask = node_to_cpumask(i);
++
++ cpus_and(nodemask, nodemask, *cpu_map);
++ if (cpus_empty(nodemask))
++ continue;
++
++ init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
++ }
++
++#ifdef CONFIG_NUMA
++ /* Set up node groups */
++ if (sd_allnodes)
++ init_sched_build_groups(*cpu_map, cpu_map, &cpu_to_allnodes_group);
++
++ for (i = 0; i < MAX_NUMNODES; i++) {
++ /* Set up node groups */
++ struct sched_group *sg, *prev;
++ cpumask_t nodemask = node_to_cpumask(i);
++ cpumask_t domainspan;
++ cpumask_t covered = CPU_MASK_NONE;
++ int j;
++
++ cpus_and(nodemask, nodemask, *cpu_map);
++ if (cpus_empty(nodemask)) {
++ sched_group_nodes[i] = NULL;
++ continue;
++ }
++
++ domainspan = sched_domain_node_span(i);
++ cpus_and(domainspan, domainspan, *cpu_map);
++
++ sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
++ if (!sg) {
++ printk(KERN_WARNING "Can not alloc domain group for "
++ "node %d\n", i);
++ goto error;
++ }
++ sched_group_nodes[i] = sg;
++ for_each_cpu_mask(j, nodemask) {
++ struct sched_domain *sd;
++ sd = &per_cpu(node_domains, j);
++ sd->groups = sg;
++ }
++ sg->__cpu_power = 0;
++ sg->cpumask = nodemask;
++ sg->next = sg;
++ cpus_or(covered, covered, nodemask);
++ prev = sg;
++
++ for (j = 0; j < MAX_NUMNODES; j++) {
++ cpumask_t tmp, notcovered;
++ int n = (i + j) % MAX_NUMNODES;
++
++ cpus_complement(notcovered, covered);
++ cpus_and(tmp, notcovered, *cpu_map);
++ cpus_and(tmp, tmp, domainspan);
++ if (cpus_empty(tmp))
++ break;
++
++ nodemask = node_to_cpumask(n);
++ cpus_and(tmp, tmp, nodemask);
++ if (cpus_empty(tmp))
++ continue;
++
++ sg = kmalloc_node(sizeof(struct sched_group),
++ GFP_KERNEL, i);
++ if (!sg) {
++ printk(KERN_WARNING
++ "Can not alloc domain group for node %d\n", j);
++ goto error;
++ }
++ sg->__cpu_power = 0;
++ sg->cpumask = tmp;
++ sg->next = prev->next;
++ cpus_or(covered, covered, tmp);
++ prev->next = sg;
++ prev = sg;
++ }
++ }
++#endif
++
++ /* Calculate CPU power for physical packages and nodes */
++#ifdef CONFIG_SCHED_SMT
++ for_each_cpu_mask(i, *cpu_map) {
++ sd = &per_cpu(cpu_domains, i);
++ init_sched_groups_power(i, sd);
++ }
++#endif
++#ifdef CONFIG_SCHED_MC
++ for_each_cpu_mask(i, *cpu_map) {
++ sd = &per_cpu(core_domains, i);
++ init_sched_groups_power(i, sd);
++ }
++#endif
++
++ for_each_cpu_mask(i, *cpu_map) {
++ sd = &per_cpu(phys_domains, i);
++ init_sched_groups_power(i, sd);
++ }
++
++#ifdef CONFIG_NUMA
++ for (i = 0; i < MAX_NUMNODES; i++)
++ init_numa_sched_groups_power(sched_group_nodes[i]);
++
++ if (sd_allnodes) {
++ struct sched_group *sg;
++
++ cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
++ init_numa_sched_groups_power(sg);
++ }
++#endif
++
++ /* Attach the domains */
++ for_each_cpu_mask(i, *cpu_map) {
++ struct sched_domain *sd;
++#ifdef CONFIG_SCHED_SMT
++ sd = &per_cpu(cpu_domains, i);
++#elif defined(CONFIG_SCHED_MC)
++ sd = &per_cpu(core_domains, i);
++#else
++ sd = &per_cpu(phys_domains, i);
++#endif
++ cpu_attach_domain(sd, i);
++ }
++ /*
++ * Tune cache-hot values:
++ */
++ calibrate_migration_costs(cpu_map);
++
++ return 0;
++
++#ifdef CONFIG_NUMA
++error:
++ free_sched_groups(cpu_map);
++ return -ENOMEM;
++#endif
++}
++/*
++ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
++ */
++static int arch_init_sched_domains(const cpumask_t *cpu_map)
++{
++ cpumask_t cpu_default_map;
++ int err;
++
++ /*
++ * Setup mask for cpus without special case scheduling requirements.
++ * For now this just excludes isolated cpus, but could be used to
++ * exclude other special cases in the future.
++ */
++ cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
++
++ err = build_sched_domains(&cpu_default_map);
++
++ return err;
++}
++
++static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
++{
++ free_sched_groups(cpu_map);
++}
++
++/*
++ * Detach sched domains from a group of cpus specified in cpu_map
++ * These cpus will now be attached to the NULL domain
++ */
++static void detach_destroy_domains(const cpumask_t *cpu_map)
++{
++ int i;
++
++ for_each_cpu_mask(i, *cpu_map)
++ cpu_attach_domain(NULL, i);
++ synchronize_sched();
++ arch_destroy_sched_domains(cpu_map);
++}
++
++/*
++ * Partition sched domains as specified by the cpumasks below.
++ * This attaches all cpus from the cpumasks to the NULL domain,
++ * waits for a RCU quiescent period, recalculates sched
++ * domain information and then attaches them back to the
++ * correct sched domains
++ * Call with hotplug lock held
++ */
++int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
++{
++ cpumask_t change_map;
++ int err = 0;
++
++ cpus_and(*partition1, *partition1, cpu_online_map);
++ cpus_and(*partition2, *partition2, cpu_online_map);
++ cpus_or(change_map, *partition1, *partition2);
++
++ /* Detach sched domains from all of the affected cpus */
++ detach_destroy_domains(&change_map);
++ if (!cpus_empty(*partition1))
++ err = build_sched_domains(partition1);
++ if (!err && !cpus_empty(*partition2))
++ err = build_sched_domains(partition2);
++
++ return err;
++}
++
++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
++int arch_reinit_sched_domains(void)
++{
++ int err;
++
++ mutex_lock(&sched_hotcpu_mutex);
++ detach_destroy_domains(&cpu_online_map);
++ err = arch_init_sched_domains(&cpu_online_map);
++ mutex_unlock(&sched_hotcpu_mutex);
++
++ return err;
++}
++
++static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
++{
++ int ret;
++
++ if (buf[0] != '0' && buf[0] != '1')
++ return -EINVAL;
++
++ if (smt)
++ sched_smt_power_savings = (buf[0] == '1');
++ else
++ sched_mc_power_savings = (buf[0] == '1');
++
++ ret = arch_reinit_sched_domains();
++
++ return ret ? ret : count;
++}
++
++int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
++{
++ int err = 0;
++
++#ifdef CONFIG_SCHED_SMT
++ if (smt_capable())
++ err = sysfs_create_file(&cls->kset.kobj,
++ &attr_sched_smt_power_savings.attr);
++#endif
++#ifdef CONFIG_SCHED_MC
++ if (!err && mc_capable())
++ err = sysfs_create_file(&cls->kset.kobj,
++ &attr_sched_mc_power_savings.attr);
++#endif
++ return err;
++}
++#endif
++
++#ifdef CONFIG_SCHED_MC
++static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
++{
++ return sprintf(page, "%u\n", sched_mc_power_savings);
++}
++static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
++ const char *buf, size_t count)
++{
++ return sched_power_savings_store(buf, count, 0);
++}
++SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
++ sched_mc_power_savings_store);
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
++{
++ return sprintf(page, "%u\n", sched_smt_power_savings);
++}
++static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
++ const char *buf, size_t count)
++{
++ return sched_power_savings_store(buf, count, 1);
++}
++SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
++ sched_smt_power_savings_store);
++#endif
++
++/*
++ * Force a reinitialization of the sched domains hierarchy. The domains
++ * and groups cannot be updated in place without racing with the balancing
++ * code, so we temporarily attach all running cpus to the NULL domain
++ * which will prevent rebalancing while the sched domains are recalculated.
++ */
++static int update_sched_domains(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ switch (action) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ detach_destroy_domains(&cpu_online_map);
++ return NOTIFY_OK;
++
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ /*
++ * Fall through and re-initialise the domains.
++ */
++ break;
++ default:
++ return NOTIFY_DONE;
++ }
++
++ /* The hotplug lock is already held by cpu_up/cpu_down */
++ arch_init_sched_domains(&cpu_online_map);
++
++ return NOTIFY_OK;
++}
++
++void __init sched_init_smp(void)
++{
++ cpumask_t non_isolated_cpus;
++
++ mutex_lock(&sched_hotcpu_mutex);
++ arch_init_sched_domains(&cpu_online_map);
++ cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
++ if (cpus_empty(non_isolated_cpus))
++ cpu_set(smp_processor_id(), non_isolated_cpus);
++ mutex_unlock(&sched_hotcpu_mutex);
++ /* XXX: Theoretical race here - CPU may be hotplugged now */
++ hotcpu_notifier(update_sched_domains, 0);
++
++ /* Move init over to a non-isolated CPU */
++ if (set_cpus_allowed(current, non_isolated_cpus) < 0)
++ BUG();
++}
++#else
++void __init sched_init_smp(void)
++{
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++ /* Linker adds these: start and end of __sched functions */
++ extern char __sched_text_start[], __sched_text_end[];
++
++ return in_lock_functions(addr) ||
++ (addr >= (unsigned long)__sched_text_start
++ && addr < (unsigned long)__sched_text_end);
++}
++
++void __init sched_init(void)
++{
++ int i, j, k;
++ int highest_cpu = 0;
++
++ for_each_possible_cpu(i) {
++ struct prio_array *array;
++ struct rq *rq;
++
++ rq = cpu_rq(i);
++ spin_lock_init(&rq->lock);
++ lockdep_set_class(&rq->lock, &rq->rq_lock_key);
++ rq->nr_running = 0;
++ rq->active = rq->arrays;
++ rq->expired = rq->arrays + 1;
++ rq->best_expired_prio = MAX_PRIO;
++
++#ifdef CONFIG_SMP
++ rq->sd = NULL;
++ for (j = 1; j < 3; j++)
++ rq->cpu_load[j] = 0;
++ rq->active_balance = 0;
++ rq->push_cpu = 0;
++ rq->cpu = i;
++ rq->migration_thread = NULL;
++ INIT_LIST_HEAD(&rq->migration_queue);
++#endif
++ atomic_set(&rq->nr_iowait, 0);
++#ifdef CONFIG_VSERVER_HARDCPU
++ INIT_LIST_HEAD(&rq->hold_queue);
++ rq->nr_onhold = 0;
++#endif
++ for (j = 0; j < 2; j++) {
++ array = rq->arrays + j;
++ for (k = 0; k < MAX_PRIO; k++) {
++ INIT_LIST_HEAD(array->queue + k);
++ __clear_bit(k, array->bitmap);
++ }
++ // delimiter for bitsearch
++ __set_bit(MAX_PRIO, array->bitmap);
++ }
++ highest_cpu = i;
++ }
++
++ set_load_weight(&init_task);
++
++#ifdef CONFIG_SMP
++ nr_cpu_ids = highest_cpu + 1;
++ open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
++#endif
++
++#ifdef CONFIG_RT_MUTEXES
++ plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
++#endif
++
++ /*
++ * The boot idle thread does lazy MMU switching as well:
++ */
++ atomic_inc(&init_mm.mm_count);
++ enter_lazy_tlb(&init_mm, current);
++
++ /*
++ * Make us the idle thread. Technically, schedule() should not be
++ * called from this thread, however somewhere below it might be,
++ * but because we are the idle thread, we just pick up running again
++ * when this runqueue becomes "idle".
++ */
++ init_idle(current, smp_processor_id());
++}
++
++#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
++void __might_sleep(char *file, int line)
++{
++#ifdef in_atomic
++ static unsigned long prev_jiffy; /* ratelimiting */
++
++ if ((in_atomic() || irqs_disabled()) &&
++ system_state == SYSTEM_RUNNING && !oops_in_progress) {
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++ printk(KERN_ERR "BUG: sleeping function called from invalid"
++ " context at %s:%d\n", file, line);
++ printk("in_atomic():%d, irqs_disabled():%d\n",
++ in_atomic(), irqs_disabled());
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++ dump_stack();
++ }
++#endif
++}
++EXPORT_SYMBOL(__might_sleep);
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++ struct prio_array *array;
++ struct task_struct *g, *p;
++ unsigned long flags;
++ struct rq *rq;
++
++ read_lock_irq(&tasklist_lock);
++
++ do_each_thread(g, p) {
++ if (!rt_task(p))
++ continue;
++
++ spin_lock_irqsave(&p->pi_lock, flags);
++ rq = __task_rq_lock(p);
++
++ array = p->array;
++ if (array)
++ deactivate_task(p, task_rq(p));
++ __setscheduler(p, SCHED_NORMAL, 0);
++ if (array) {
++ vx_activate_task(p);
++ __activate_task(p, task_rq(p));
++ resched_task(rq->curr);
++ }
++
++ __task_rq_unlock(rq);
++ spin_unlock_irqrestore(&p->pi_lock, flags);
++ } while_each_thread(g, p);
++
++ read_unlock_irq(&tasklist_lock);
++}
++
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#ifdef CONFIG_IA64
++/*
++ * These functions are only useful for the IA64 MCA handling.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given cpu.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++struct task_struct *curr_task(int cpu)
++{
++ return cpu_curr(cpu);
++}
++
++/**
++ * set_curr_task - set the current task for a given cpu.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a cpu in a non-blocking manner. This function
++ * must be called with all CPU's synchronized, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void set_curr_task(int cpu, struct task_struct *p)
++{
++ cpu_curr(cpu) = p;
++}
++
++#endif
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/timer.S linux-2.6.22-590/kernel/timer.S
+--- linux-2.6.22-580/kernel/timer.S 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/kernel/timer.S 2009-02-18 09:57:23.000000000 -0500
+@@ -0,0 +1,32311 @@
++ .file "timer.c"
++ .section .debug_abbrev,"",@progbits
++.Ldebug_abbrev0:
++ .section .debug_info,"",@progbits
++.Ldebug_info0:
++ .section .debug_line,"",@progbits
++.Ldebug_line0:
++ .text
++.Ltext0:
++.globl __round_jiffies
++ .type __round_jiffies, @function
++__round_jiffies:
++.LFB883:
++ .file 1 "kernel/timer.c"
++ .loc 1 138 0
++.LVL0:
++ pushl %edi
++.LCFI0:
++ pushl %esi
++.LCFI1:
++ .loc 1 150 0
++ leal (%edx,%edx,2), %esi
++ .loc 1 152 0
++ movl $250, %edx
++.LVL1:
++ .loc 1 150 0
++ leal (%eax,%esi), %ecx
++.LVL2:
++ .loc 1 152 0
++ movl %edx, %edi
++ .loc 1 138 0
++ pushl %ebx
++.LCFI2:
++ .loc 1 152 0
++ xorl %edx, %edx
++ .loc 1 138 0
++ movl %eax, %ebx
++ .loc 1 152 0
++ movl %ecx, %eax
++.LVL3:
++ divl %edi
++ .loc 1 138 0
++ subl $4, %esp
++.LCFI3:
++ .loc 1 160 0
++ cmpl $61, %edx
++ jg .L2
++ .loc 1 161 0
++ movl %ecx, %eax
++.LVL4:
++ subl %edx, %eax
++ jmp .L4
++.LVL5:
++.L2:
++ .loc 1 163 0
++ subl %edx, %ecx
++ leal 250(%ecx), %eax
++.LVL6:
++.L4:
++ .loc 1 166 0
++ movl %eax, %edx
++.LVL7:
++ .loc 1 168 0
++ movl jiffies, %eax
++.LVL8:
++ .loc 1 166 0
++ subl %esi, %edx
++ .loc 1 168 0
++ cmpl %eax, %edx
++ cmova %edx, %ebx
++ .loc 1 171 0
++ popl %edx
++.LVL9:
++ movl %ebx, %eax
++ popl %ebx
++.LVL10:
++ popl %esi
++ popl %edi
++ ret
++.LFE883:
++ .size __round_jiffies, .-__round_jiffies
++.globl __round_jiffies_relative
++ .type __round_jiffies_relative, @function
++__round_jiffies_relative:
++.LFB884:
++ .loc 1 195 0
++.LVL11:
++ .loc 1 202 0
++ movl jiffies, %ecx
++ addl %ecx, %eax
++.LVL12:
++ call __round_jiffies
++.LVL13:
++ movl jiffies, %edx
++ subl %edx, %eax
++ .loc 1 203 0
++ ret
++.LFE884:
++ .size __round_jiffies_relative, .-__round_jiffies_relative
++.globl round_jiffies
++ .type round_jiffies, @function
++round_jiffies:
++.LFB885:
++ .loc 1 222 0
++.LVL14:
++.LBB179:
++ .loc 1 223 0
++#APP
++ movl %fs:per_cpu__cpu_number,%edx
++.LVL15:
++#NO_APP
++.LBE179:
++ jmp __round_jiffies
++.LVL16:
++.LFE885:
++ .size round_jiffies, .-round_jiffies
++.globl round_jiffies_relative
++ .type round_jiffies_relative, @function
++round_jiffies_relative:
++.LFB886:
++ .loc 1 243 0
++.LVL17:
++.LBB180:
++ .loc 1 244 0
++#APP
++ movl %fs:per_cpu__cpu_number,%edx
++.LVL18:
++#NO_APP
++.LBE180:
++ jmp __round_jiffies_relative
++.LVL19:
++.LFE886:
++ .size round_jiffies_relative, .-round_jiffies_relative
++ .type internal_add_timer, @function
++internal_add_timer:
++.LFB888:
++ .loc 1 258 0
++.LVL20:
++ pushl %esi
++.LCFI4:
++ pushl %ebx
++.LCFI5:
++ .loc 1 258 0
++ movl %eax, %ebx
++ .loc 1 259 0
++ movl 8(%edx), %eax
++.LVL21:
++ .loc 1 260 0
++ movl 8(%ebx), %esi
++ movl %eax, %ecx
++.LVL22:
++ subl %esi, %ecx
++ .loc 1 263 0
++ cmpl $255, %ecx
++ jbe .L28
++ .loc 1 266 0
++ cmpl $16383, %ecx
++ ja .L19
++.LBB181:
++ .loc 1 268 0
++ shrl $5, %eax
++ andl $504, %eax
++ leal 2060(%eax,%ebx), %ecx
++.LVL23:
++ jmp .L18
++.LVL24:
++.L19:
++.LBE181:
++ .loc 1 269 0
++ cmpl $1048575, %ecx
++ ja .L21
++.LBB182:
++ .loc 1 271 0
++ shrl $11, %eax
++ andl $504, %eax
++ leal 2572(%eax,%ebx), %ecx
++.LVL25:
++ jmp .L18
++.LVL26:
++.L21:
++.LBE182:
++ .loc 1 272 0
++ cmpl $67108863, %ecx
++ ja .L23
++.LBB183:
++ .loc 1 274 0
++ shrl $17, %eax
++ andl $504, %eax
++ leal 3084(%eax,%ebx), %ecx
++.LVL27:
++ jmp .L18
++.LVL28:
++.L23:
++.LBE183:
++ .loc 1 275 0
++ testl %ecx, %ecx
++ jns .L25
++ .loc 1 280 0
++ movl %esi, %eax
++.LVL29:
++.L28:
++.LVL30:
++ andl $255, %eax
++ leal 12(%ebx,%eax,8), %ecx
++.LVL31:
++ jmp .L18
++.LVL32:
++.L25:
++.LBB184:
++ .loc 1 291 0
++ shrl $26, %eax
++ leal 3596(%ebx,%eax,8), %ecx
++.LVL33:
++.L18:
++.LBE184:
++.LBB185:
++.LBB186:
++ .file 2 "include/linux/list.h"
++ .loc 2 86 0
++ movl 4(%ecx), %eax
++.LVL34:
++.LBB187:
++.LBB188:
++ .loc 2 48 0
++ movl %ecx, (%edx)
++ .loc 2 47 0
++ movl %edx, 4(%ecx)
++ .loc 2 50 0
++ movl %edx, (%eax)
++.LBE188:
++.LBE187:
++.LBE186:
++.LBE185:
++ .loc 1 297 0
++ popl %ebx
++.LVL35:
++.LBB189:
++.LBB190:
++.LBB191:
++.LBB192:
++ .loc 2 49 0
++ movl %eax, 4(%edx)
++.LBE192:
++.LBE191:
++.LBE190:
++.LBE189:
++ .loc 1 297 0
++ popl %esi
++ ret
++.LFE888:
++ .size internal_add_timer, .-internal_add_timer
++.globl init_timer
++ .type init_timer, @function
++init_timer:
++.LFB889:
++ .loc 1 319 0
++.LVL36:
++ .loc 1 320 0
++ movl $0, (%eax)
++.LBB193:
++ .loc 1 321 0
++ movl $per_cpu__tvec_bases, %edx
++.LBB194:
++#APP
++ movl %fs:per_cpu__this_cpu_off,%ecx
++.LVL37:
++#NO_APP
++.LBE194:
++.LBE193:
++ movl (%edx,%ecx), %edx
++ movl %edx, 20(%eax)
++ .loc 1 327 0
++ ret
++.LFE889:
++ .size init_timer, .-init_timer
++.globl init_timer_deferrable
++ .type init_timer_deferrable, @function
++init_timer_deferrable:
++.LFB890:
++ .loc 1 331 0
++.LVL38:
++ pushl %ebx
++.LCFI6:
++ .loc 1 331 0
++ movl %eax, %ebx
++ .loc 1 332 0
++ call init_timer
++.LVL39:
++.LBB197:
++.LBB198:
++ .loc 1 106 0
++ orl $1, 20(%ebx)
++.LBE198:
++.LBE197:
++ .loc 1 334 0
++ popl %ebx
++.LVL40:
++ ret
++.LFE890:
++ .size init_timer_deferrable, .-init_timer_deferrable
++ .section .rodata.str1.1,"aMS",@progbits,1
++.LC0:
++ .string "kernel/timer.c"
++ .text
++ .type cascade, @function
++cascade:
++.LFB899:
++ .loc 1 581 0
++.LVL41:
++ pushl %edi
++.LCFI7:
++ movl %eax, %edi
++ pushl %esi
++.LCFI8:
++ movl %ecx, %esi
++ pushl %ebx
++.LCFI9:
++ subl $8, %esp
++.LCFI10:
++ .loc 1 581 0
++ leal (%edx,%ecx,8), %eax
++.LVL42:
++.LBB199:
++.LBB200:
++.LBB201:
++.LBB202:
++ .loc 2 218 0
++ movl (%eax), %edx
++.LVL43:
++ .loc 2 219 0
++ movl %esp, 4(%edx)
++ .loc 2 218 0
++ movl %edx, (%esp)
++ .loc 2 220 0
++ movl 4(%eax), %edx
++ .loc 2 221 0
++ movl %esp, (%edx)
++.LBE202:
++.LBE201:
++.LBB203:
++.LBB204:
++ .loc 2 32 0
++ movl %eax, (%eax)
++.LBE204:
++.LBE203:
++.LBB205:
++.LBB206:
++ .loc 2 220 0
++ movl %edx, 4(%esp)
++.LBE206:
++.LBE205:
++.LBE200:
++.LBE199:
++.LBB207:
++ .loc 1 592 0
++ movl (%esp), %edx
++.LVL44:
++.LBE207:
++.LBB208:
++.LBB209:
++.LBB210:
++.LBB211:
++ .loc 2 33 0
++ movl %eax, 4(%eax)
++.LBE211:
++.LBE210:
++.LBE209:
++.LBE208:
++.LBB212:
++ .loc 1 592 0
++ movl (%edx), %ebx
++.LVL45:
++ jmp .L34
++.L35:
++.LBE212:
++ .loc 1 593 0
++ movl 20(%edx), %eax
++.LVL46:
++ andl $-2, %eax
++ cmpl %edi, %eax
++ je .L36
++#APP
++ 1: ud2
++.pushsection __bug_table,"a"
++2: .long 1b, .LC0
++ .word 593, 0
++ .org 2b+12
++.popsection
++#NO_APP
++.L38:
++ jmp .L38
++.L36:
++ .loc 1 594 0
++ movl %edi, %eax
++ call internal_add_timer
++.LVL47:
++.LBB213:
++ .loc 1 592 0
++ movl %ebx, %edx
++.LVL48:
++ movl (%ebx), %ebx
++.LVL49:
++.L34:
++.LBE213:
++ cmpl %esp, %edx
++ jne .L35
++.LVL50:
++ .loc 1 598 0
++ popl %ecx
++.LVL51:
++ movl %esi, %eax
++.LVL52:
++ popl %ebx
++.LVL53:
++ popl %ebx
++ popl %esi
++.LVL54:
++ popl %edi
++.LVL55:
++ ret
++.LFE899:
++ .size cascade, .-cascade
++ .section .rodata.str1.1
++.LC1:
++ .string "WARNING: at %s:%d %s()\n"
++ .section .init.text,"ax",@progbits
++ .type timer_cpu_notify, @function
++timer_cpu_notify:
++.LFB923:
++ .loc 1 1336 0
++.LVL56:
++ pushl %ebp
++.LCFI11:
++ pushl %edi
++.LCFI12:
++ pushl %esi
++.LCFI13:
++ movl %ecx, %esi
++ pushl %ebx
++.LCFI14:
++ subl $16, %esp
++.LCFI15:
++ .loc 1 1338 0
++ cmpl $3, %edx
++ je .L43
++.LVL57:
++ cmpl $19, %edx
++ movl $1, %eax
++.LVL58:
++ jne .L44
++.LVL59:
++.L43:
++.LBB240:
++.LBB241:
++ .loc 1 1238 0
++ cmpb $0, tvec_base_done.19028(%esi)
++ jne .L45
++.LBB242:
++ .loc 1 1241 0
++ cmpb $0, boot_done.19029
++ je .L47
++.L48:
++.LBB243:
++.LBB244:
++.LBB245:
++ .file 3 "include/linux/slab_def.h"
++ .loc 3 49 0
++ movl malloc_sizes+100, %eax
++.LVL60:
++ movl $208, %edx
++.LVL61:
++ call kmem_cache_alloc
++ movl %eax, %edi
++.LBE245:
++.LBE244:
++.LBE243:
++ .loc 1 1247 0
++ movl $32770, %eax
++ testl %edi, %edi
++ je .L44
++.LVL62:
++ .loc 1 1251 0
++ movl %edi, %eax
++.LVL63:
++ .loc 1 1245 0
++ movl %edi, %ebx
++.LVL64:
++ .loc 1 1251 0
++ andl $1, %eax
++ je .L51
++.LBB246:
++ .loc 1 1252 0
++ movl $__func__.19031, 12(%esp)
++ movl $1252, 8(%esp)
++ movl $.LC0, 4(%esp)
++ movl $.LC1, (%esp)
++ call printk
++ call dump_stack
++.LBE246:
++ .loc 1 1253 0
++ movl %edi, %eax
++ call kfree
++ movl $32770, %eax
++ jmp .L44
++.LVL65:
++.L51:
++.LBB247:
++.LBB248:
++.LBB249:
++ .file 4 "include/asm/string.h"
++ .loc 4 447 0
++ movl $1056, %ecx
++.LVL66:
++#APP
++ rep ; stosl
++.LVL67:
++#NO_APP
++.LBE249:
++.LBE248:
++.LBE247:
++.LBE242:
++.LBE241:
++ .loc 1 1257 0
++ movl __per_cpu_offset(,%esi,4), %edx
++.LBB250:
++.LBB251:
++.LBB252:
++ movl $per_cpu__tvec_bases, %eax
++.LBE252:
++.LBE251:
++.LBE250:
++ movl %ebx, (%eax,%edx)
++ jmp .L53
++.LVL68:
++.L47:
++.LBB253:
++.LBB254:
++ .loc 1 1265 0
++ movb $1, boot_done.19029
++ movl $boot_tvec_bases, %ebx
++.LVL69:
++.L53:
++ .loc 1 1268 0
++ movb $1, tvec_base_done.19028(%esi)
++ jmp .L54
++.LVL70:
++.L45:
++.LBE254:
++.LBE253:
++ .loc 1 1270 0
++ movl __per_cpu_offset(,%esi,4), %edx
++.LVL71:
++.LBB255:
++.LBB256:
++ movl $per_cpu__tvec_bases, %eax
++.LVL72:
++.LBE256:
++.LBE255:
++ movl (%eax,%edx), %ebx
++.LVL73:
++.L54:
++ .loc 1 1273 0
++ movl $1, (%ebx)
++ xorl %ebp, %ebp
++.LVL74:
++ leal 2048(%ebx), %edx
++.LVL75:
++ leal 2560(%ebx), %esi
++.LVL76:
++ leal 3072(%ebx), %edi
++.LVL77:
++ leal 3584(%ebx), %ecx
++.LVL78:
++.L55:
++ leal 12(%ecx), %eax
++.LVL79:
++.LBB257:
++ .loc 1 1276 0
++ incl %ebp
++.LBB258:
++.LBB259:
++ .loc 2 32 0
++ movl %eax, 1548(%edx)
++.LBE259:
++.LBE258:
++ .loc 1 1276 0
++ addl $8, %ecx
++.LBB260:
++.LBB261:
++ .loc 2 33 0
++ movl %eax, 1552(%edx)
++.LBE261:
++.LBE260:
++ .loc 1 1278 0
++ leal 12(%edi), %eax
++.LVL80:
++ .loc 1 1276 0
++ addl $8, %edi
++.LBB262:
++.LBB263:
++ .loc 2 32 0
++ movl %eax, 1036(%edx)
++ .loc 2 33 0
++ movl %eax, 1040(%edx)
++.LBE263:
++.LBE262:
++ .loc 1 1279 0
++ leal 12(%esi), %eax
++.LVL81:
++ .loc 1 1276 0
++ addl $8, %esi
++.LBB264:
++.LBB265:
++ .loc 2 32 0
++ movl %eax, 524(%edx)
++ .loc 2 33 0
++ movl %eax, 528(%edx)
++.LBE265:
++.LBE264:
++ .loc 1 1280 0
++ leal 12(%edx), %eax
++.LVL82:
++.LBB266:
++.LBB267:
++ .loc 2 32 0
++ movl %eax, 12(%edx)
++ .loc 2 33 0
++ movl %eax, 16(%edx)
++.LBE267:
++.LBE266:
++ .loc 1 1276 0
++ addl $8, %edx
++ cmpl $64, %ebp
++ jne .L55
++ xorl %ecx, %ecx
++.LVL83:
++ movl %ebx, %edx
++.LVL84:
++.L57:
++ leal 12(%edx), %eax
++.LVL85:
++ .loc 1 1282 0
++ incl %ecx
++.LBB268:
++.LBB269:
++ .loc 2 32 0
++ movl %eax, 12(%edx)
++ .loc 2 33 0
++ movl %eax, 16(%edx)
++.LBE269:
++.LBE268:
++ .loc 1 1282 0
++ addl $8, %edx
++ cmpl $256, %ecx
++ jne .L57
++ .loc 1 1285 0
++ movl jiffies, %eax
++.LVL86:
++ movl %eax, 8(%ebx)
++ movl $1, %eax
++.LVL87:
++.L44:
++.LBE257:
++.LBE240:
++ .loc 1 1354 0
++ addl $16, %esp
++ popl %ebx
++.LVL88:
++ popl %esi
++.LVL89:
++ popl %edi
++.LVL90:
++ popl %ebp
++.LVL91:
++ ret
++.LFE923:
++ .size timer_cpu_notify, .-timer_cpu_notify
++.globl init_timers
++ .type init_timers, @function
++init_timers:
++.LFB924:
++ .loc 1 1362 0
++ .loc 1 1364 0
++ movl $3, %edx
++ movl $timers_nb, %eax
++.LBB273:
++#APP
++ movl %fs:per_cpu__cpu_number,%ecx
++.LVL92:
++#NO_APP
++.LBE273:
++ call timer_cpu_notify
++.LVL93:
++.LVL94:
++ .loc 1 1368 0
++ cmpl $32770, %eax
++ jne .L65
++#APP
++ 1: ud2
++.pushsection __bug_table,"a"
++2: .long 1b, .LC0
++ .word 1368, 0
++ .org 2b+12
++.popsection
++#NO_APP
++.L67:
++ jmp .L67
++.L65:
++ .loc 1 1369 0
++ movl $timers_nb, %eax
++.LVL95:
++ call register_cpu_notifier
++ .loc 1 1370 0
++ xorl %ecx, %ecx
++ movl $run_timer_softirq, %edx
++ movl $1, %eax
++ jmp open_softirq
++.LFE924:
++ .size init_timers, .-init_timers
++ .section .rodata.str1.1
++.LC2:
++ .string "<4>huh, entered %p with preempt_count %08x, exited with %08x?\n"
++ .text
++ .type run_timer_softirq, @function
++run_timer_softirq:
++.LFB904:
++ .loc 1 872 0
++.LVL96:
++ pushl %edi
++.LCFI16:
++.LBB322:
++ .loc 1 873 0
++ movl $per_cpu__tvec_bases, %eax
++.LVL97:
++.LBE322:
++ .loc 1 872 0
++ pushl %esi
++.LCFI17:
++ pushl %ebx
++.LCFI18:
++ subl $24, %esp
++.LCFI19:
++.LBB323:
++ .loc 1 873 0
++.LBB324:
++#APP
++ movl %fs:per_cpu__this_cpu_off,%edx
++.LVL98:
++#NO_APP
++.LBE324:
++.LBE323:
++ movl (%eax,%edx), %esi
++.LVL99:
++ .loc 1 875 0
++ call hrtimer_run_queues
++.LVL100:
++ .loc 1 877 0
++ movl jiffies, %eax
++ cmpl 8(%esi), %eax
++ js .L85
++.LBB325:
++.LBB326:
++ .loc 1 613 0
++ movl %esi, %eax
++ call _spin_lock_irq
++ jmp .L72
++.LVL101:
++.L73:
++.LBB327:
++ .loc 1 622 0
++ movl %ecx, %ebx
++.LVL102:
++ andl $255, %ebx
++ jne .L74
++.LBE327:
++.LBE326:
++ shrl $8, %ecx
++ movl %esi, %eax
++ andl $63, %ecx
++ leal 2060(%esi), %edx
++.LVL103:
++ call cascade
++.LBB328:
++.LBB329:
++ testl %eax, %eax
++ jne .L74
++.LBE329:
++.LBE328:
++ movl 8(%esi), %ecx
++ leal 2572(%esi), %edx
++ movl %esi, %eax
++ shrl $14, %ecx
++ andl $63, %ecx
++ call cascade
++.LBB330:
++.LBB331:
++ testl %eax, %eax
++ jne .L74
++.LBE331:
++.LBE330:
++ movl 8(%esi), %ecx
++ leal 3084(%esi), %edx
++ movl %esi, %eax
++ shrl $20, %ecx
++ andl $63, %ecx
++ call cascade
++.LBB332:
++.LBB333:
++ testl %eax, %eax
++ jne .L74
++ .loc 1 626 0
++ movl 8(%esi), %ecx
++ leal 3596(%esi), %edx
++ movl %esi, %eax
++ shrl $26, %ecx
++ call cascade
++.LVL104:
++.L74:
++ .loc 1 627 0
++ incl 8(%esi)
++ leal (%esi,%ebx,8), %ecx
++.LBB334:
++.LBB335:
++.LBB336:
++.LBB337:
++ .loc 2 219 0
++ leal 16(%esp), %ebx
++.LVL105:
++ .loc 2 218 0
++ movl 12(%ecx), %eax
++.LBE337:
++.LBE336:
++.LBE335:
++.LBE334:
++ .loc 1 627 0
++ leal 12(%ecx), %edx
++.LBB338:
++.LBB339:
++.LBB340:
++.LBB341:
++ .loc 2 219 0
++ movl %ebx, 4(%eax)
++ .loc 2 218 0
++ movl %eax, 16(%esp)
++ .loc 2 220 0
++ movl 4(%edx), %eax
++ movl %eax, 20(%esp)
++ .loc 2 221 0
++ movl %ebx, (%eax)
++.LBE341:
++.LBE340:
++.LBB342:
++.LBB343:
++ .loc 2 33 0
++ movl %edx, 4(%edx)
++ .loc 2 32 0
++ movl %edx, 12(%ecx)
++ jmp .L79
++.L80:
++.LBE343:
++.LBE342:
++.LBE339:
++.LBE338:
++.LBB344:
++ .loc 1 634 0
++ movl 12(%ebx), %edi
++ .loc 1 635 0
++ movl 16(%ebx), %eax
++.LVL106:
++.LBB345:
++.LBB346:
++ .loc 1 253 0
++ movl %ebx, 4(%esi)
++.LBE346:
++.LBE345:
++.LBB347:
++.LBB348:
++ .loc 1 342 0
++ movl (%ebx), %ecx
++.LVL107:
++ movl 4(%ebx), %edx
++.LVL108:
++.LBB349:
++.LBB350:
++ .loc 2 157 0
++ movl %edx, 4(%ecx)
++ .loc 2 158 0
++ movl %ecx, (%edx)
++.LBE350:
++.LBE349:
++ .loc 1 345 0
++ movl $2097664, 4(%ebx)
++ .loc 1 344 0
++ movl $0, (%ebx)
++.LBE348:
++.LBE347:
++.LBB351:
++.LBB352:
++ .file 5 "include/asm/spinlock.h"
++ .loc 5 108 0
++#APP
++ movb $1,(%esi)
++#NO_APP
++.LBE352:
++.LBE351:
++.LBB353:
++.LBB354:
++.LBB355:
++.LBB356:
++ .file 6 "include/asm/irqflags.h"
++ .loc 6 36 0
++#APP
++ sti
++#NO_APP
++.LBE356:
++.LBE355:
++.LBE354:
++.LBE353:
++.LBB357:
++ .loc 1 643 0
++ movl %esp, %edx
++.LVL109:
++ andl $-8192, %edx
++ movl 20(%edx), %ebx
++.LVL110:
++ .loc 1 644 0
++ call *%edi
++.LVL111:
++ .loc 1 645 0
++ movl %esp, %eax
++ andl $-8192, %eax
++ movl 20(%eax), %eax
++ cmpl %eax, %ebx
++ je .L81
++ .loc 1 646 0
++ movl %ebx, 8(%esp)
++ movl %edi, 4(%esp)
++ movl %eax, 12(%esp)
++ movl $.LC2, (%esp)
++ call printk
++ .loc 1 651 0
++#APP
++ 1: ud2
++.pushsection __bug_table,"a"
++2: .long 1b, .LC0
++ .word 651, 0
++ .org 2b+12
++.popsection
++#NO_APP
++.L83:
++ jmp .L83
++.L81:
++.LBE357:
++ .loc 1 654 0
++ movl %esi, %eax
++ call _spin_lock_irq
++.LVL112:
++.L79:
++.LBE344:
++.LBB358:
++.LBB359:
++ .loc 2 300 0
++ movl 16(%esp), %ebx
++.LVL113:
++.LBE359:
++.LBE358:
++ .loc 1 629 0
++ leal 16(%esp), %eax
++ cmpl %eax, %ebx
++ jne .L80
++.L72:
++.LBE333:
++ .loc 1 614 0
++ movl jiffies, %eax
++ movl 8(%esi), %ecx
++ cmpl %ecx, %eax
++ jns .L73
++.LBB360:
++.LBB361:
++ .loc 1 253 0
++ movl $0, 4(%esi)
++.LBE361:
++.LBE360:
++.LBB362:
++.LBB363:
++ .loc 5 108 0
++#APP
++ movb $1,(%esi)
++#NO_APP
++.LBE363:
++.LBE362:
++.LBB364:
++.LBB365:
++.LBB366:
++.LBB367:
++ .loc 6 36 0
++#APP
++ sti
++#NO_APP
++.L85:
++.LBE367:
++.LBE366:
++.LBE365:
++.LBE364:
++.LBE332:
++.LBE325:
++ .loc 1 879 0
++ addl $24, %esp
++ popl %ebx
++ popl %esi
++.LVL114:
++ popl %edi
++.LVL115:
++ ret
++.LFE904:
++ .size run_timer_softirq, .-run_timer_softirq
++.globl do_sysinfo
++ .type do_sysinfo, @function
++do_sysinfo:
++.LFB920:
++ .loc 1 1132 0
++.LVL116:
++ pushl %edi
++.LCFI20:
++.LBB368:
++.LBB369:
++.LBB370:
++ .loc 4 447 0
++ movl $16, %ecx
++.LBE370:
++.LBE369:
++.LBE368:
++ .loc 1 1132 0
++ pushl %ebx
++.LCFI21:
++ movl %eax, %ebx
++ subl $8, %esp
++.LCFI22:
++.LBB371:
++.LBB372:
++.LBB373:
++ .loc 4 447 0
++ xorl %eax, %eax
++.LVL117:
++ movl %ebx, %edi
++.LVL118:
++#APP
++ rep ; stosl
++.LVL119:
++.LVL120:
++#NO_APP
++.L87:
++.LBE373:
++.LBE372:
++.LBE371:
++.LBB374:
++.LBB375:
++.LBB376:
++ .file 7 "include/linux/seqlock.h"
++ .loc 7 88 0
++ movl xtime_lock, %edi
++.LVL121:
++ .loc 7 89 0
++#APP
++ 661:
++ lock; addl $0,0(%esp)
++662:
++.section .altinstructions,"a"
++ .align 4
++ .long 661b
++ .long 663f
++ .byte 26
++ .byte 662b-661b
++ .byte 664f-663f
++.previous
++.section .altinstr_replacement,"ax"
++663:
++ lfence
++664:
++.previous
++#NO_APP
++.LBE376:
++.LBE375:
++ .loc 1 1150 0
++ movl %esp, %eax
++ call getnstimeofday
++.LVL122:
++ .loc 1 1152 0
++ movl wall_to_monotonic+4, %eax
++ .loc 1 1151 0
++ movl wall_to_monotonic, %edx
++ .loc 1 1152 0
++ addl 4(%esp), %eax
++ .loc 1 1151 0
++ addl (%esp), %edx
++ .loc 1 1153 0
++ cmpl $999999999, %eax
++ .loc 1 1151 0
++ movl %edx, (%esp)
++ .loc 1 1152 0
++ movl %eax, 4(%esp)
++ .loc 1 1153 0
++ jle .L88
++ .loc 1 1154 0
++ subl $1000000000, %eax
++ movl %eax, 4(%esp)
++ .loc 1 1155 0
++ leal 1(%edx), %eax
++ movl %eax, (%esp)
++.L88:
++ .loc 1 1157 0
++ xorl %eax, %eax
++ cmpl $0, 4(%esp)
++ setne %al
++ addl (%esp), %eax
++ movl %eax, (%ebx)
++ .loc 1 1159 0
++ movl avenrun, %eax
++ sall $5, %eax
++ movl %eax, 4(%ebx)
++ .loc 1 1160 0
++ movl avenrun+4, %eax
++ sall $5, %eax
++ movl %eax, 8(%ebx)
++ .loc 1 1161 0
++ movl avenrun+8, %eax
++ sall $5, %eax
++ movl %eax, 12(%ebx)
++ .loc 1 1163 0
++ movl nr_threads, %eax
++ movw %ax, 40(%ebx)
++.LBE374:
++.LBB377:
++.LBB378:
++ .loc 7 103 0
++#APP
++ 661:
++ lock; addl $0,0(%esp)
++662:
++.section .altinstructions,"a"
++ .align 4
++ .long 661b
++ .long 663f
++ .byte 26
++ .byte 662b-661b
++ .byte 664f-663f
++.previous
++.section .altinstr_replacement,"ax"
++663:
++ lfence
++664:
++.previous
++#NO_APP
++.LBE378:
++.LBE377:
++ .loc 1 1164 0
++ movl %edi, %eax
++ xorl xtime_lock, %edi
++ andl $1, %eax
++ orl %edi, %eax
++ jne .L87
++ .loc 1 1166 0
++ movl %ebx, %eax
++ call si_meminfo
++ .loc 1 1167 0
++ movl %ebx, %eax
++ call si_swapinfo
++ .loc 1 1178 0
++ movl 16(%ebx), %eax
++ movl 32(%ebx), %ecx
++ leal (%eax,%ecx), %edx
++.LVL123:
++ .loc 1 1179 0
++ cmpl %eax, %edx
++ jb .L91
++ cmpl %ecx, %edx
++ jb .L91
++ .loc 1 1182 0
++ movl 52(%ebx), %eax
++.LVL124:
++ xorl %ecx, %ecx
++.LVL125:
++ jmp .L94
++.LVL126:
++.L95:
++ .loc 1 1187 0
++ leal (%edx,%edx), %edi
++.LVL127:
++ .loc 1 1188 0
++ cmpl %edx, %edi
++ jb .L91
++ .loc 1 1184 0
++ incl %ecx
++ .loc 1 1185 0
++ movl %edi, %edx
++ shrl %eax
++.LVL128:
++.L94:
++ .loc 1 1183 0
++ cmpl $1, %eax
++ ja .L95
++ .loc 1 1200 0
++ sall %cl, 16(%ebx)
++ .loc 1 1201 0
++ sall %cl, 20(%ebx)
++ .loc 1 1202 0
++ sall %cl, 24(%ebx)
++ .loc 1 1203 0
++ sall %cl, 28(%ebx)
++ .loc 1 1204 0
++ sall %cl, 32(%ebx)
++ .loc 1 1205 0
++ sall %cl, 36(%ebx)
++ .loc 1 1206 0
++ sall %cl, 44(%ebx)
++ .loc 1 1207 0
++ sall %cl, 48(%ebx)
++ .loc 1 1199 0
++ movl $1, 52(%ebx)
++.L91:
++ .loc 1 1211 0
++ popl %edi
++.LVL129:
++ xorl %eax, %eax
++.LVL130:
++ popl %edx
++.LVL131:
++ popl %ebx
++.LVL132:
++ popl %edi
++ ret
++.LFE920:
++ .size do_sysinfo, .-do_sysinfo
++.globl sys_sysinfo
++ .type sys_sysinfo, @function
++sys_sysinfo:
++.LFB921:
++ .loc 1 1214 0
++.LVL133:
++ pushl %ebx
++.LCFI23:
++ subl $64, %esp
++.LCFI24:
++ .loc 1 1217 0
++ movl %esp, %eax
++ call do_sysinfo
++ .loc 1 1219 0
++ movl $64, %ecx
++ movl 72(%esp), %eax
++ movl %esp, %edx
++ call copy_to_user
++ cmpl $1, %eax
++ sbbl %eax, %eax
++ .loc 1 1223 0
++ addl $64, %esp
++ popl %ebx
++ .loc 1 1219 0
++ notl %eax
++ andl $-14, %eax
++ .loc 1 1223 0
++ ret
++.LFE921:
++ .size sys_sysinfo, .-sys_sysinfo
++ .type process_timeout, @function
++process_timeout:
++.LFB915:
++ .loc 1 1025 0
++.LVL134:
++ .loc 1 1026 0
++ jmp wake_up_process
++.LVL135:
++.LFE915:
++ .size process_timeout, .-process_timeout
++.globl sys_alarm
++ .type sys_alarm, @function
++sys_alarm:
++.LFB908:
++ .loc 1 919 0
++.LVL136:
++ .loc 1 919 0
++ movl 4(%esp), %eax
++ .loc 1 920 0
++ jmp alarm_setitimer
++.LFE908:
++ .size sys_alarm, .-sys_alarm
++.globl do_timer
++ .type do_timer, @function
++do_timer:
++.LFB907:
++ .loc 1 907 0
++.LVL137:
++ pushl %ebp
++.LCFI25:
++ .loc 1 908 0
++ xorl %edx, %edx
++.LVL138:
++ .loc 1 907 0
++ pushl %edi
++.LCFI26:
++ pushl %esi
++.LCFI27:
++ pushl %ebx
++.LCFI28:
++ movl %eax, %ebx
++ subl $4, %esp
++.LCFI29:
++ .loc 1 908 0
++ addl %eax, jiffies_64
++ adcl %edx, jiffies_64+4
++.LBB385:
++.LBB386:
++ .loc 1 896 0
++ call update_wall_time
++.LVL139:
++.LBB387:
++.LBB388:
++ .loc 1 856 0
++ movl count.18791, %eax
++ subl %ebx, %eax
++ .loc 1 857 0
++ testl %eax, %eax
++ .loc 1 856 0
++ movl %eax, count.18791
++ .loc 1 857 0
++ jns .L115
++.LBB389:
++ .loc 1 832 0
++ call nr_active
++.LBE389:
++ .loc 1 862 0
++ movl count.18791, %esi
++ movl avenrun, %ebx
++.LVL140:
++ movl avenrun+4, %ecx
++.LVL141:
++.LBB390:
++.LBB391:
++ .loc 1 832 0
++ sall $11, %eax
++.LBE391:
++.LBE390:
++ .loc 1 860 0
++ imull $164, %eax, %edx
++ .loc 1 861 0
++ imull $34, %eax, %ebp
++ .loc 1 862 0
++ imull $11, %eax, %edi
++ .loc 1 860 0
++ movl %edx, (%esp)
++ movl avenrun+8, %edx
++.L113:
++ .loc 1 861 0
++ imull $2014, %ecx, %eax
++.LVL142:
++ .loc 1 860 0
++ imull $1884, %ebx, %ebx
++ .loc 1 861 0
++ leal (%eax,%ebp), %ecx
++ .loc 1 862 0
++ imull $2037, %edx, %eax
++ .loc 1 860 0
++ addl (%esp), %ebx
++ .loc 1 861 0
++ shrl $11, %ecx
++ .loc 1 862 0
++ leal (%eax,%edi), %edx
++ .loc 1 860 0
++ shrl $11, %ebx
++ .loc 1 862 0
++ shrl $11, %edx
++ .loc 1 864 0
++ addl $1250, %esi
++ js .L113
++ movl %edx, avenrun+8
++ movl %ecx, avenrun+4
++ movl %ebx, avenrun
++ movl %esi, count.18791
++.LVL143:
++.L115:
++.LBE388:
++.LBE387:
++.LBE386:
++.LBE385:
++ .loc 1 910 0
++ popl %ecx
++ popl %ebx
++.LVL144:
++ popl %esi
++ popl %edi
++ popl %ebp
++ ret
++.LFE907:
++ .size do_timer, .-do_timer
++.globl run_local_timers
++ .type run_local_timers, @function
++run_local_timers:
++.LFB905:
++ .loc 1 885 0
++ .loc 1 886 0
++ movl $1, %eax
++ call raise_softirq
++ .loc 1 887 0
++ jmp softlockup_tick
++.LFE905:
++ .size run_local_timers, .-run_local_timers
++ .type lock_timer_base, @function
++lock_timer_base:
++.LFB892:
++ .loc 1 363 0
++.LVL145:
++ pushl %ebp
++.LCFI30:
++ movl %edx, %ebp
++ pushl %edi
++.LCFI31:
++ movl %eax, %edi
++ pushl %esi
++.LCFI32:
++ pushl %ebx
++.LCFI33:
++.LVL146:
++.L123:
++.LBB392:
++ .loc 1 367 0
++ movl 20(%edi), %ebx
++ .loc 1 369 0
++ movl %ebx, %esi
++ andl $-2, %esi
++ je .L124
++ .loc 1 370 0
++ movl %esi, %eax
++ call _spin_lock_irqsave
++ movl %eax, (%ebp)
++ .loc 1 371 0
++ cmpl 20(%edi), %ebx
++ je .L129
++ .loc 1 374 0
++ movl %eax, %edx
++ movl %esi, %eax
++ call _spin_unlock_irqrestore
++.LVL147:
++.L124:
++.LBB393:
++.LBB394:
++ .file 8 "include/asm/processor.h"
++ .loc 8 497 0
++#APP
++ rep;nop
++#NO_APP
++ jmp .L123
++.LVL148:
++.L129:
++.LBE394:
++.LBE393:
++.LBE392:
++ .loc 1 378 0
++ popl %ebx
++.LVL149:
++ movl %esi, %eax
++ popl %esi
++.LVL150:
++ popl %edi
++.LVL151:
++ popl %ebp
++.LVL152:
++ ret
++.LFE892:
++ .size lock_timer_base, .-lock_timer_base
++.globl try_to_del_timer_sync
++ .type try_to_del_timer_sync, @function
++try_to_del_timer_sync:
++.LFB897:
++ .loc 1 527 0
++.LVL153:
++ pushl %esi
++.LCFI34:
++ .loc 1 534 0
++ orl $-1, %esi
++.LVL154:
++ .loc 1 527 0
++ pushl %ebx
++.LCFI35:
++ movl %eax, %ebx
++ subl $4, %esp
++.LCFI36:
++ .loc 1 532 0
++ movl %esp, %edx
++ call lock_timer_base
++.LVL155:
++.LVL156:
++ .loc 1 534 0
++ cmpl %ebx, 4(%eax)
++.LVL157:
++ .loc 1 532 0
++ movl %eax, %ecx
++.LVL158:
++ .loc 1 534 0
++ je .L133
++ .loc 1 538 0
++ xorl %esi, %esi
++ cmpl $0, (%ebx)
++ je .L133
++.LBB395:
++.LBB396:
++ .loc 1 342 0
++ movl (%ebx), %edx
++.LVL159:
++ .loc 1 345 0
++ movw $1, %si
++.LVL160:
++ .loc 1 342 0
++ movl 4(%ebx), %eax
++.LVL161:
++.LBB397:
++.LBB398:
++ .loc 2 157 0
++ movl %eax, 4(%edx)
++ .loc 2 158 0
++ movl %edx, (%eax)
++.LBE398:
++.LBE397:
++ .loc 1 345 0
++ movl $2097664, 4(%ebx)
++ .loc 1 344 0
++ movl $0, (%ebx)
++.LVL162:
++.L133:
++.LVL163:
++.LBE396:
++.LBE395:
++ .loc 1 543 0
++ movl (%esp), %edx
++.LVL164:
++ movl %ecx, %eax
++.LVL165:
++ call _spin_unlock_irqrestore
++.LVL166:
++ .loc 1 546 0
++ movl %esi, %eax
++ popl %ebx
++.LVL167:
++ popl %ebx
++ popl %esi
++.LVL168:
++ ret
++.LFE897:
++ .size try_to_del_timer_sync, .-try_to_del_timer_sync
++.globl del_timer_sync
++ .type del_timer_sync, @function
++del_timer_sync:
++.LFB898:
++ .loc 1 568 0
++.LVL169:
++ pushl %ebx
++.LCFI37:
++ movl %eax, %ebx
++.LVL170:
++.L139:
++.LBB399:
++ .loc 1 570 0
++ movl %ebx, %eax
++.LVL171:
++ call try_to_del_timer_sync
++.LVL172:
++ .loc 1 571 0
++ testl %eax, %eax
++ jns .L143
++.LBB400:
++.LBB401:
++ .loc 8 497 0
++#APP
++ rep;nop
++#NO_APP
++ jmp .L139
++.L143:
++.LBE401:
++.LBE400:
++.LBE399:
++ .loc 1 575 0
++ popl %ebx
++.LVL173:
++ ret
++.LFE898:
++ .size del_timer_sync, .-del_timer_sync
++.globl __mod_timer
++ .type __mod_timer, @function
++__mod_timer:
++.LFB893:
++ .loc 1 381 0
++.LVL174:
++ pushl %ebp
++.LCFI38:
++ movl %edx, %ebp
++ pushl %edi
++.LCFI39:
++ pushl %esi
++.LCFI40:
++ pushl %ebx
++.LCFI41:
++ movl %eax, %ebx
++ subl $8, %esp
++.LCFI42:
++ .loc 1 387 0
++ cmpl $0, 12(%eax)
++ jne .L145
++.LVL175:
++#APP
++ 1: ud2
++.pushsection __bug_table,"a"
++2: .long 1b, .LC0
++ .word 387, 0
++ .org 2b+12
++.popsection
++#NO_APP
++.L147:
++ jmp .L147
++.L145:
++ .loc 1 389 0
++ leal 4(%esp), %edx
++ call lock_timer_base
++.LVL176:
++ .loc 1 391 0
++ cmpl $0, (%ebx)
++ .loc 1 389 0
++ movl %eax, %esi
++.LVL177:
++ .loc 1 391 0
++ movl $0, (%esp)
++.LVL178:
++ je .L150
++.LBB402:
++.LBB403:
++ .loc 1 342 0
++ movl 4(%ebx), %eax
++.LVL179:
++ movl (%ebx), %edx
++.LVL180:
++.LBB404:
++.LBB405:
++ .loc 2 157 0
++ movl %eax, 4(%edx)
++ .loc 2 158 0
++ movl %edx, (%eax)
++.LBE405:
++.LBE404:
++ .loc 1 345 0
++ movl $2097664, 4(%ebx)
++ movl $1, (%esp)
++.L150:
++.LBE403:
++.LBE402:
++.LBB406:
++.LBB407:
++ .loc 1 396 0
++#APP
++ movl %fs:per_cpu__this_cpu_off,%edx
++.LVL181:
++#NO_APP
++.LBE407:
++ movl $per_cpu__tvec_bases, %eax
++.LVL182:
++.LBE406:
++ movl (%eax,%edx), %edi
++.LVL183:
++ .loc 1 398 0
++ cmpl %edi, %esi
++ je .L151
++ .loc 1 406 0
++ cmpl %ebx, 4(%esi)
++ je .L151
++.LBB408:
++.LBB409:
++ .loc 1 113 0
++ andl $1, 20(%ebx)
++.LBE409:
++.LBE408:
++.LBB410:
++.LBB411:
++ .loc 5 108 0
++#APP
++ movb $1,(%esi)
++#NO_APP
++.LBE411:
++.LBE410:
++ .loc 1 411 0
++ movl %edi, %eax
++.LBB412:
++.LBB413:
++ .loc 1 113 0
++ movl %edi, %esi
++.LBE413:
++.LBE412:
++ .loc 1 411 0
++ call _spin_lock
++.LVL184:
++.LBB414:
++.LBB415:
++ .loc 1 113 0
++ movl 20(%ebx), %eax
++ andl $1, %eax
++ orl %edi, %eax
++ movl %eax, 20(%ebx)
++.LVL185:
++.L151:
++.LBE415:
++.LBE414:
++ .loc 1 416 0
++ movl %ebp, 8(%ebx)
++ .loc 1 417 0
++ movl %ebx, %edx
++.LVL186:
++ movl %esi, %eax
++ call internal_add_timer
++ .loc 1 418 0
++ movl %esi, %eax
++.LVL187:
++ movl 4(%esp), %edx
++ call _spin_unlock_irqrestore
++ .loc 1 421 0
++ movl (%esp), %eax
++ popl %esi
++.LVL188:
++ popl %edi
++.LVL189:
++ popl %ebx
++.LVL190:
++ popl %esi
++ popl %edi
++ popl %ebp
++.LVL191:
++ ret
++.LFE893:
++ .size __mod_timer, .-__mod_timer
++ .section .rodata.str1.1
++.LC3:
++ .string "<3>schedule_timeout: wrong timeout value %lx\n"
++ .section .sched.text,"ax",@progbits
++.globl schedule_timeout
++ .type schedule_timeout, @function
++schedule_timeout:
++.LFB916:
++ .loc 1 1056 0
++.LVL192:
++ pushl %esi
++.LCFI43:
++ pushl %ebx
++.LCFI44:
++ movl %eax, %ebx
++ subl $32, %esp
++.LCFI45:
++ .loc 1 1060 0
++ cmpl $2147483647, %eax
++ jne .L156
++ .loc 1 1070 0
++ call schedule
++.LVL193:
++ jmp .L158
++.LVL194:
++.L156:
++ .loc 1 1080 0
++ testl %eax, %eax
++ jns .L159
++ .loc 1 1081 0
++ movl %eax, 4(%esp)
++ movl $.LC3, (%esp)
++ call printk
++.LVL195:
++ .loc 1 1083 0
++ call dump_stack
++.LBB416:
++.LBB417:
++.LBB418:
++ .file 9 "include/asm/current.h"
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL196:
++#NO_APP
++.LBE418:
++.LBE417:
++.LBE416:
++ .loc 1 1084 0
++ movl $0, (%eax)
++ jmp .L158
++.LVL197:
++.L159:
++ .loc 1 1089 0
++ movl jiffies, %esi
++.LBB419:
++.LBB420:
++ .file 10 "include/linux/timer.h"
++ .loc 10 48 0
++ leal 8(%esp), %ebx
++ .loc 10 46 0
++ movl $process_timeout, 20(%esp)
++.LBE420:
++.LBE419:
++ .loc 1 1089 0
++ leal (%eax,%esi), %esi
++.LVL198:
++.LBB421:
++.LBB422:
++.LBB423:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL199:
++#NO_APP
++.LBE423:
++.LBE422:
++.LBE421:
++.LBB424:
++.LBB425:
++ .loc 10 47 0
++ movl %eax, 24(%esp)
++ .loc 10 48 0
++ movl %ebx, %eax
++.LVL200:
++ call init_timer
++.LBE425:
++.LBE424:
++ .loc 1 1092 0
++ movl %esi, %edx
++ movl %ebx, %eax
++ call __mod_timer
++ .loc 1 1093 0
++ call schedule
++ .loc 1 1094 0
++ movl %ebx, %eax
++ .loc 1 1096 0
++ movl %esi, %ebx
++.LVL201:
++ .loc 1 1094 0
++ call del_timer_sync
++ .loc 1 1096 0
++ movl jiffies, %eax
++ subl %eax, %ebx
++.LVL202:
++.L158:
++ .loc 1 1098 0
++ xorl %eax, %eax
++.LVL203:
++ testl %ebx, %ebx
++ cmovns %ebx, %eax
++ .loc 1 1100 0
++ addl $32, %esp
++ popl %ebx
++.LVL204:
++ popl %esi
++.LVL205:
++ ret
++.LFE916:
++ .size schedule_timeout, .-schedule_timeout
++.globl schedule_timeout_uninterruptible
++ .type schedule_timeout_uninterruptible, @function
++schedule_timeout_uninterruptible:
++.LFB918:
++ .loc 1 1115 0
++.LVL206:
++.LBB426:
++.LBB427:
++.LBB428:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%edx
++.LVL207:
++#NO_APP
++.LBE428:
++.LBE427:
++.LBE426:
++ .loc 1 1116 0
++ movl $2, (%edx)
++ .loc 1 1117 0
++ jmp schedule_timeout
++.LVL208:
++.LFE918:
++ .size schedule_timeout_uninterruptible, .-schedule_timeout_uninterruptible
++ .text
++.globl msleep
++ .type msleep, @function
++msleep:
++.LFB925:
++ .loc 1 1566 0
++.LVL209:
++ .loc 1 1567 0
++ call msecs_to_jiffies
++.LVL210:
++ incl %eax
++.LVL211:
++ jmp .L165
++.L166:
++ .loc 1 1570 0
++ call schedule_timeout_uninterruptible
++.LVL212:
++.L165:
++ .loc 1 1569 0
++ testl %eax, %eax
++ jne .L166
++ .loc 1 1571 0
++ ret
++.LFE925:
++ .size msleep, .-msleep
++ .section .sched.text
++.globl schedule_timeout_interruptible
++ .type schedule_timeout_interruptible, @function
++schedule_timeout_interruptible:
++.LFB917:
++ .loc 1 1108 0
++.LVL213:
++.LBB429:
++.LBB430:
++.LBB431:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%edx
++.LVL214:
++#NO_APP
++.LBE431:
++.LBE430:
++.LBE429:
++ .loc 1 1109 0
++ movl $1, (%edx)
++ .loc 1 1110 0
++ jmp schedule_timeout
++.LVL215:
++.LFE917:
++ .size schedule_timeout_interruptible, .-schedule_timeout_interruptible
++ .text
++.globl msleep_interruptible
++ .type msleep_interruptible, @function
++msleep_interruptible:
++.LFB926:
++ .loc 1 1580 0
++.LVL216:
++ .loc 1 1581 0
++ call msecs_to_jiffies
++.LVL217:
++ leal 1(%eax), %edx
++.LVL218:
++ jmp .L172
++.L173:
++ .loc 1 1584 0
++ movl %edx, %eax
++ call schedule_timeout_interruptible
++.LVL219:
++ movl %eax, %edx
++.LVL220:
++.L172:
++ .loc 1 1583 0
++ testl %edx, %edx
++ je .L174
++.LBB445:
++.LBB446:
++.LBB447:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL221:
++#NO_APP
++.LBE447:
++.LBE446:
++.LBE445:
++.LBB448:
++.LBB449:
++.LBB450:
++.LBB451:
++ .file 11 "include/linux/sched.h"
++ .loc 11 1569 0
++ movl 4(%eax), %eax
++.LVL222:
++.LBB452:
++.LBB453:
++.LBB454:
++.LBB455:
++ .file 12 "include/asm/bitops.h"
++ .loc 12 246 0
++ movl 8(%eax), %eax
++.LBE455:
++.LBE454:
++.LBE453:
++.LBE452:
++.LBE451:
++.LBE450:
++.LBE449:
++.LBE448:
++ .loc 1 1583 0
++ testb $4, %al
++ je .L173
++.L174:
++ .loc 1 1585 0
++ movl %edx, %eax
++ jmp jiffies_to_msecs
++.LVL223:
++.LFE926:
++ .size msleep_interruptible, .-msleep_interruptible
++.globl update_process_times
++ .type update_process_times, @function
++update_process_times:
++.LFB901:
++ .loc 1 811 0
++.LVL224:
++ pushl %edi
++.LCFI46:
++ movl %eax, %edi
++ pushl %esi
++.LCFI47:
++ pushl %ebx
++.LCFI48:
++.LBB460:
++ .loc 1 813 0
++#APP
++ movl %fs:per_cpu__cpu_number,%esi
++.LVL225:
++#NO_APP
++.LBE460:
++.LBB461:
++.LBB462:
++.LBB463:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%ebx
++.LVL226:
++#NO_APP
++.LBE463:
++.LBE462:
++.LBE461:
++ .loc 1 816 0
++ testl %eax, %eax
++ je .L178
++ .loc 1 817 0
++ movl $1, %edx
++ movl %ebx, %eax
++.LVL227:
++ call account_user_time
++ jmp .L180
++.LVL228:
++.L178:
++ .loc 1 819 0
++ movl $1, %ecx
++ movl $65536, %edx
++ movl %ebx, %eax
++.LVL229:
++ call account_system_time
++.L180:
++ .loc 1 820 0
++ call run_local_timers
++ .loc 1 821 0
++ movl %esi, %eax
++ call rcu_pending
++ testl %eax, %eax
++ je .L181
++ .loc 1 822 0
++ movl %edi, %edx
++ movl %esi, %eax
++ call rcu_check_callbacks
++.L181:
++ .loc 1 823 0
++ call scheduler_tick
++ .loc 1 824 0
++ movl %ebx, %eax
++ .loc 1 825 0
++ popl %ebx
++.LVL230:
++ popl %esi
++.LVL231:
++ popl %edi
++.LVL232:
++ .loc 1 824 0
++ jmp run_posix_cpu_timers
++.LVL233:
++.LFE901:
++ .size update_process_times, .-update_process_times
++.globl sys_getpid
++ .type sys_getpid, @function
++sys_getpid:
++.LFB909:
++ .loc 1 957 0
++ pushl %ebx
++.LCFI49:
++ subl $40, %esp
++.LCFI50:
++ .loc 1 959 0
++ movl rec_event, %ebx
++ testl %ebx, %ebx
++ je .L185
++.LBB474:
++ .loc 1 964 0
++ movl $666, 36(%esp)
++ .loc 1 966 0
++ leal 24(%esp), %eax
++.LBB475:
++.LBB476:
++.LBB477:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%edx
++.LVL234:
++#NO_APP
++.LBE477:
++.LBE476:
++.LBE475:
++ .loc 1 965 0
++ movl 468(%edx), %ecx
++.LVL235:
++ .loc 1 966 0
++ movl %eax, 8(%esp)
++ .loc 1 972 0
++ movl %esp, %eax
++ .loc 1 967 0
++ movl %edx, 20(%esp)
++ .loc 1 972 0
++ movl $1, %edx
++.LVL236:
++ .loc 1 969 0
++ movl $7, 16(%esp)
++ .loc 1 965 0
++ andl $4095, %ecx
++ .loc 1 968 0
++ movl %ecx, 24(%esp)
++ .loc 1 972 0
++ call *%ebx
++.LVL237:
++.L185:
++.LBE474:
++.LBB478:
++.LBB479:
++.LBB480:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL238:
++#NO_APP
++ movl 176(%eax), %eax
++.LVL239:
++.LBE480:
++.LBE479:
++.LBE478:
++ .loc 1 977 0
++ addl $40, %esp
++ popl %ebx
++ ret
++.LFE909:
++ .size sys_getpid, .-sys_getpid
++.globl sys_getppid
++ .type sys_getppid, @function
++sys_getppid:
++.LFB910:
++ .loc 1 986 0
++.LBB485:
++.LBB486:
++.LBB487:
++.LBB488:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL240:
++#NO_APP
++ movl 180(%eax), %eax
++.LVL241:
++ movl 176(%eax), %eax
++.LBE488:
++.LBE487:
++.LBE486:
++.LBE485:
++ .loc 1 996 0
++ ret
++.LFE910:
++ .size sys_getppid, .-sys_getppid
++.globl sys_getuid
++ .type sys_getuid, @function
++sys_getuid:
++.LFB911:
++ .loc 1 999 0
++.LBB492:
++.LBB493:
++.LBB494:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL242:
++#NO_APP
++ movl 340(%eax), %eax
++.LVL243:
++.LBE494:
++.LBE493:
++.LBE492:
++ .loc 1 1002 0
++ ret
++.LFE911:
++ .size sys_getuid, .-sys_getuid
++.globl sys_geteuid
++ .type sys_geteuid, @function
++sys_geteuid:
++.LFB912:
++ .loc 1 1005 0
++.LBB498:
++.LBB499:
++.LBB500:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL244:
++#NO_APP
++ movl 344(%eax), %eax
++.LVL245:
++.LBE500:
++.LBE499:
++.LBE498:
++ .loc 1 1008 0
++ ret
++.LFE912:
++ .size sys_geteuid, .-sys_geteuid
++.globl sys_getgid
++ .type sys_getgid, @function
++sys_getgid:
++.LFB913:
++ .loc 1 1011 0
++.LBB504:
++.LBB505:
++.LBB506:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL246:
++#NO_APP
++ movl 356(%eax), %eax
++.LVL247:
++.LBE506:
++.LBE505:
++.LBE504:
++ .loc 1 1014 0
++ ret
++.LFE913:
++ .size sys_getgid, .-sys_getgid
++.globl sys_getegid
++ .type sys_getegid, @function
++sys_getegid:
++.LFB914:
++ .loc 1 1017 0
++.LBB510:
++.LBB511:
++.LBB512:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL248:
++#NO_APP
++ movl 360(%eax), %eax
++.LVL249:
++.LBE512:
++.LBE511:
++.LBE510:
++ .loc 1 1020 0
++ ret
++.LFE914:
++ .size sys_getegid, .-sys_getegid
++.globl sys_gettid
++ .type sys_gettid, @function
++sys_gettid:
++.LFB919:
++ .loc 1 1123 0
++.LBB516:
++.LBB517:
++.LBB518:
++ .loc 9 12 0
++#APP
++ movl %fs:per_cpu__current_task,%eax
++.LVL250:
++#NO_APP
++ movl 172(%eax), %eax
++.LVL251:
++.LBE518:
++.LBE517:
++.LBE516:
++ .loc 1 1125 0
++ ret
++.LFE919:
++ .size sys_gettid, .-sys_gettid
++.globl mod_timer
++ .type mod_timer, @function
++mod_timer:
++.LFB895:
++ .loc 1 467 0
++.LVL252:
++ .loc 1 468 0
++ cmpl $0, 12(%eax)
++ jne .L201
++#APP
++ 1: ud2
++.pushsection __bug_table,"a"
++2: .long 1b, .LC0
++ .word 468, 0
++ .org 2b+12
++.popsection
++#NO_APP
++.L203:
++ jmp .L203
++.L201:
++ .loc 1 476 0
++ cmpl %edx, 8(%eax)
++ jne .L204
++ cmpl $0, (%eax)
++ jne .L209
++.L204:
++ .loc 1 479 0
++ jmp __mod_timer
++.LVL253:
++.L209:
++ .loc 1 480 0
++ movl $1, %eax
++.LVL254:
++ ret
++.LFE895:
++ .size mod_timer, .-mod_timer
++.globl del_timer
++ .type del_timer, @function
++del_timer:
++.LFB896:
++ .loc 1 496 0
++.LVL255:
++ pushl %esi
++.LCFI51:
++ .loc 1 502 0
++ xorl %esi, %esi
++.LVL256:
++ .loc 1 496 0
++ pushl %ebx
++.LCFI52:
++ movl %eax, %ebx
++ subl $4, %esp
++.LCFI53:
++ .loc 1 502 0
++ cmpl $0, (%eax)
++ je .L213
++ .loc 1 503 0
++ movl %esp, %edx
++ call lock_timer_base
++.LVL257:
++ .loc 1 504 0
++ cmpl $0, (%ebx)
++ .loc 1 503 0
++ movl %eax, %ecx
++.LVL258:
++ .loc 1 504 0
++ je .L216
++.LBB533:
++.LBB534:
++ .loc 1 342 0
++ movl (%ebx), %edx
++.LVL259:
++ .loc 1 345 0
++ movw $1, %si
++.LVL260:
++ .loc 1 342 0
++ movl 4(%ebx), %eax
++.LVL261:
++.LBB535:
++.LBB536:
++ .loc 2 157 0
++ movl %eax, 4(%edx)
++ .loc 2 158 0
++ movl %edx, (%eax)
++.LBE536:
++.LBE535:
++ .loc 1 345 0
++ movl $2097664, 4(%ebx)
++ .loc 1 344 0
++ movl $0, (%ebx)
++.LVL262:
++.L216:
++.LVL263:
++.LBE534:
++.LBE533:
++ .loc 1 508 0
++ movl (%esp), %edx
++.LVL264:
++ movl %ecx, %eax
++.LVL265:
++ call _spin_unlock_irqrestore
++.LVL266:
++.L213:
++ .loc 1 512 0
++ popl %edx
++ movl %esi, %eax
++ popl %ebx
++.LVL267:
++ popl %esi
++.LVL268:
++ ret
++.LFE896:
++ .size del_timer, .-del_timer
++.globl add_timer_on
++ .type add_timer_on, @function
++add_timer_on:
++.LFB894:
++ .loc 1 433 0
++.LVL269:
++ pushl %edi
++.LCFI54:
++ .loc 1 434 0
++ movl __per_cpu_offset(,%edx,4), %edx
++.LVL270:
++ .loc 1 433 0
++ pushl %esi
++.LCFI55:
++ movl %eax, %esi
++ pushl %ebx
++.LCFI56:
++.LBB546:
++ .loc 1 434 0
++ movl $per_cpu__tvec_bases, %eax
++.LVL271:
++.LBE546:
++ .loc 1 438 0
++ cmpl $0, (%esi)
++ .loc 1 434 0
++ movl (%eax,%edx), %edi
++.LVL272:
++ .loc 1 438 0
++ jne .L226
++ cmpl $0, 12(%esi)
++ jne .L222
++.L226:
++#APP
++ 1: ud2
++.pushsection __bug_table,"a"
++2: .long 1b, .LC0
++ .word 438, 0
++ .org 2b+12
++.popsection
++#NO_APP
++.L224:
++ jmp .L224
++.L222:
++ .loc 1 439 0
++ movl %edi, %eax
++ call _spin_lock_irqsave
++ .loc 1 441 0
++ movl %esi, %edx
++ .loc 1 439 0
++ movl %eax, %ebx
++.LVL273:
++.LBB547:
++.LBB548:
++ .loc 1 113 0
++ movl 20(%esi), %eax
++ andl $1, %eax
++ orl %edi, %eax
++ movl %eax, 20(%esi)
++.LBE548:
++.LBE547:
++ .loc 1 441 0
++ movl %edi, %eax
++ call internal_add_timer
++ .loc 1 442 0
++ movl %ebx, %edx
++ movl %edi, %eax
++ .loc 1 443 0
++ popl %ebx
++.LVL274:
++ popl %esi
++.LVL275:
++ popl %edi
++.LVL276:
++ .loc 1 442 0
++ jmp _spin_unlock_irqrestore
++.LVL277:
++.LFE894:
++ .size add_timer_on, .-add_timer_on
++.globl jiffies_64
++ .section .data.cacheline_aligned,"aw",@progbits
++ .align 128
++ .type jiffies_64, @object
++ .size jiffies_64, 8
++jiffies_64:
++ .long -75000
++ .long 0
++ .section __ksymtab,"a",@progbits
++ .align 4
++ .type __ksymtab_jiffies_64, @object
++ .size __ksymtab_jiffies_64, 8
++__ksymtab_jiffies_64:
++ .long jiffies_64
++ .long __kstrtab_jiffies_64
++ .align 4
++ .type __ksymtab_boot_tvec_bases, @object
++ .size __ksymtab_boot_tvec_bases, 8
++__ksymtab_boot_tvec_bases:
++ .long boot_tvec_bases
++ .long __kstrtab_boot_tvec_bases
++ .section __ksymtab_gpl,"a",@progbits
++ .align 4
++ .type __ksymtab___round_jiffies, @object
++ .size __ksymtab___round_jiffies, 8
++__ksymtab___round_jiffies:
++ .long __round_jiffies
++ .long __kstrtab___round_jiffies
++ .align 4
++ .type __ksymtab___round_jiffies_relative, @object
++ .size __ksymtab___round_jiffies_relative, 8
++__ksymtab___round_jiffies_relative:
++ .long __round_jiffies_relative
++ .long __kstrtab___round_jiffies_relative
++ .align 4
++ .type __ksymtab_round_jiffies, @object
++ .size __ksymtab_round_jiffies, 8
++__ksymtab_round_jiffies:
++ .long round_jiffies
++ .long __kstrtab_round_jiffies
++ .align 4
++ .type __ksymtab_round_jiffies_relative, @object
++ .size __ksymtab_round_jiffies_relative, 8
++__ksymtab_round_jiffies_relative:
++ .long round_jiffies_relative
++ .long __kstrtab_round_jiffies_relative
++ .section __ksymtab
++ .align 4
++ .type __ksymtab_init_timer, @object
++ .size __ksymtab_init_timer, 8
++__ksymtab_init_timer:
++ .long init_timer
++ .long __kstrtab_init_timer
++ .align 4
++ .type __ksymtab_init_timer_deferrable, @object
++ .size __ksymtab_init_timer_deferrable, 8
++__ksymtab_init_timer_deferrable:
++ .long init_timer_deferrable
++ .long __kstrtab_init_timer_deferrable
++ .align 4
++ .type __ksymtab___mod_timer, @object
++ .size __ksymtab___mod_timer, 8
++__ksymtab___mod_timer:
++ .long __mod_timer
++ .long __kstrtab___mod_timer
++ .align 4
++ .type __ksymtab_mod_timer, @object
++ .size __ksymtab_mod_timer, 8
++__ksymtab_mod_timer:
++ .long mod_timer
++ .long __kstrtab_mod_timer
++ .align 4
++ .type __ksymtab_del_timer, @object
++ .size __ksymtab_del_timer, 8
++__ksymtab_del_timer:
++ .long del_timer
++ .long __kstrtab_del_timer
++ .align 4
++ .type __ksymtab_try_to_del_timer_sync, @object
++ .size __ksymtab_try_to_del_timer_sync, 8
++__ksymtab_try_to_del_timer_sync:
++ .long try_to_del_timer_sync
++ .long __kstrtab_try_to_del_timer_sync
++ .align 4
++ .type __ksymtab_del_timer_sync, @object
++ .size __ksymtab_del_timer_sync, 8
++__ksymtab_del_timer_sync:
++ .long del_timer_sync
++ .long __kstrtab_del_timer_sync
++ .align 4
++ .type __ksymtab_avenrun, @object
++ .size __ksymtab_avenrun, 8
++__ksymtab_avenrun:
++ .long avenrun
++ .long __kstrtab_avenrun
++ .align 4
++ .type __ksymtab_schedule_timeout, @object
++ .size __ksymtab_schedule_timeout, 8
++__ksymtab_schedule_timeout:
++ .long schedule_timeout
++ .long __kstrtab_schedule_timeout
++ .align 4
++ .type __ksymtab_schedule_timeout_interruptible, @object
++ .size __ksymtab_schedule_timeout_interruptible, 8
++__ksymtab_schedule_timeout_interruptible:
++ .long schedule_timeout_interruptible
++ .long __kstrtab_schedule_timeout_interruptible
++ .align 4
++ .type __ksymtab_schedule_timeout_uninterruptible, @object
++ .size __ksymtab_schedule_timeout_uninterruptible, 8
++__ksymtab_schedule_timeout_uninterruptible:
++ .long schedule_timeout_uninterruptible
++ .long __kstrtab_schedule_timeout_uninterruptible
++ .align 4
++ .type __ksymtab_msleep, @object
++ .size __ksymtab_msleep, 8
++__ksymtab_msleep:
++ .long msleep
++ .long __kstrtab_msleep
++ .align 4
++ .type __ksymtab_msleep_interruptible, @object
++ .size __ksymtab_msleep_interruptible, 8
++__ksymtab_msleep_interruptible:
++ .long msleep_interruptible
++ .long __kstrtab_msleep_interruptible
++ .section .init.data,"aw",@progbits
++ .align 4
++ .type timers_nb, @object
++ .size timers_nb, 12
++timers_nb:
++ .long timer_cpu_notify
++ .zero 8
++ .section .data.percpu,"aw",@progbits
++ .align 4
++ .type per_cpu__tvec_bases, @object
++ .size per_cpu__tvec_bases, 4
++per_cpu__tvec_bases:
++ .long boot_tvec_bases
++ .local boot_done.19029
++ .comm boot_done.19029,1,1
++ .section .rodata
++ .type __func__.19031, @object
++ .size __func__.19031, 16
++__func__.19031:
++ .string "init_timers_cpu"
++ .local tvec_base_done.19028
++ .comm tvec_base_done.19028,32,32
++ .data
++ .align 4
++ .type count.18791, @object
++ .size count.18791, 4
++count.18791:
++ .long 1250
++.globl boot_tvec_bases
++ .bss
++ .align 128
++ .type boot_tvec_bases, @object
++ .size boot_tvec_bases, 4224
++boot_tvec_bases:
++ .zero 4224
++.globl avenrun
++ .align 4
++ .type avenrun, @object
++ .size avenrun, 12
++avenrun:
++ .zero 12
++.globl rec_event
++ .align 4
++ .type rec_event, @object
++ .size rec_event, 4
++rec_event:
++ .zero 4
++ .section __ksymtab_strings,"a",@progbits
++ .type __kstrtab_jiffies_64, @object
++ .size __kstrtab_jiffies_64, 11
++__kstrtab_jiffies_64:
++ .string "jiffies_64"
++ .type __kstrtab_boot_tvec_bases, @object
++ .size __kstrtab_boot_tvec_bases, 16
++__kstrtab_boot_tvec_bases:
++ .string "boot_tvec_bases"
++ .type __kstrtab___round_jiffies, @object
++ .size __kstrtab___round_jiffies, 16
++__kstrtab___round_jiffies:
++ .string "__round_jiffies"
++ .type __kstrtab___round_jiffies_relative, @object
++ .size __kstrtab___round_jiffies_relative, 25
++__kstrtab___round_jiffies_relative:
++ .string "__round_jiffies_relative"
++ .type __kstrtab_round_jiffies, @object
++ .size __kstrtab_round_jiffies, 14
++__kstrtab_round_jiffies:
++ .string "round_jiffies"
++ .type __kstrtab_round_jiffies_relative, @object
++ .size __kstrtab_round_jiffies_relative, 23
++__kstrtab_round_jiffies_relative:
++ .string "round_jiffies_relative"
++ .type __kstrtab_init_timer, @object
++ .size __kstrtab_init_timer, 11
++__kstrtab_init_timer:
++ .string "init_timer"
++ .type __kstrtab_init_timer_deferrable, @object
++ .size __kstrtab_init_timer_deferrable, 22
++__kstrtab_init_timer_deferrable:
++ .string "init_timer_deferrable"
++ .type __kstrtab___mod_timer, @object
++ .size __kstrtab___mod_timer, 12
++__kstrtab___mod_timer:
++ .string "__mod_timer"
++ .type __kstrtab_mod_timer, @object
++ .size __kstrtab_mod_timer, 10
++__kstrtab_mod_timer:
++ .string "mod_timer"
++ .type __kstrtab_del_timer, @object
++ .size __kstrtab_del_timer, 10
++__kstrtab_del_timer:
++ .string "del_timer"
++ .type __kstrtab_try_to_del_timer_sync, @object
++ .size __kstrtab_try_to_del_timer_sync, 22
++__kstrtab_try_to_del_timer_sync:
++ .string "try_to_del_timer_sync"
++ .type __kstrtab_del_timer_sync, @object
++ .size __kstrtab_del_timer_sync, 15
++__kstrtab_del_timer_sync:
++ .string "del_timer_sync"
++ .type __kstrtab_avenrun, @object
++ .size __kstrtab_avenrun, 8
++__kstrtab_avenrun:
++ .string "avenrun"
++ .type __kstrtab_schedule_timeout, @object
++ .size __kstrtab_schedule_timeout, 17
++__kstrtab_schedule_timeout:
++ .string "schedule_timeout"
++ .type __kstrtab_schedule_timeout_interruptible, @object
++ .size __kstrtab_schedule_timeout_interruptible, 31
++__kstrtab_schedule_timeout_interruptible:
++ .string "schedule_timeout_interruptible"
++ .align 32
++ .type __kstrtab_schedule_timeout_uninterruptible, @object
++ .size __kstrtab_schedule_timeout_uninterruptible, 33
++__kstrtab_schedule_timeout_uninterruptible:
++ .string "schedule_timeout_uninterruptible"
++ .type __kstrtab_msleep, @object
++ .size __kstrtab_msleep, 7
++__kstrtab_msleep:
++ .string "msleep"
++ .type __kstrtab_msleep_interruptible, @object
++ .size __kstrtab_msleep_interruptible, 21
++__kstrtab_msleep_interruptible:
++ .string "msleep_interruptible"
++ .weak xtime_lock
++ .section .debug_frame,"",@progbits
++.Lframe0:
++ .long .LECIE0-.LSCIE0
++.LSCIE0:
++ .long 0xffffffff
++ .byte 0x1
++ .string ""
++ .uleb128 0x1
++ .sleb128 -4
++ .byte 0x8
++ .byte 0xc
++ .uleb128 0x4
++ .uleb128 0x4
++ .byte 0x88
++ .uleb128 0x1
++ .align 4
++.LECIE0:
++.LSFDE0:
++ .long .LEFDE0-.LASFDE0
++.LASFDE0:
++ .long .Lframe0
++ .long .LFB883
++ .long .LFE883-.LFB883
++ .byte 0x4
++ .long .LCFI0-.LFB883
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI1-.LCFI0
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x86
++ .uleb128 0x3
++ .byte 0x87
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI2-.LCFI1
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x83
++ .uleb128 0x4
++ .byte 0x4
++ .long .LCFI3-.LCFI2
++ .byte 0xe
++ .uleb128 0x14
++ .align 4
++.LEFDE0:
++.LSFDE2:
++ .long .LEFDE2-.LASFDE2
++.LASFDE2:
++ .long .Lframe0
++ .long .LFB884
++ .long .LFE884-.LFB884
++ .align 4
++.LEFDE2:
++.LSFDE4:
++ .long .LEFDE4-.LASFDE4
++.LASFDE4:
++ .long .Lframe0
++ .long .LFB885
++ .long .LFE885-.LFB885
++ .align 4
++.LEFDE4:
++.LSFDE6:
++ .long .LEFDE6-.LASFDE6
++.LASFDE6:
++ .long .Lframe0
++ .long .LFB886
++ .long .LFE886-.LFB886
++ .align 4
++.LEFDE6:
++.LSFDE8:
++ .long .LEFDE8-.LASFDE8
++.LASFDE8:
++ .long .Lframe0
++ .long .LFB888
++ .long .LFE888-.LFB888
++ .byte 0x4
++ .long .LCFI4-.LFB888
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI5-.LCFI4
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x83
++ .uleb128 0x3
++ .byte 0x86
++ .uleb128 0x2
++ .align 4
++.LEFDE8:
++.LSFDE10:
++ .long .LEFDE10-.LASFDE10
++.LASFDE10:
++ .long .Lframe0
++ .long .LFB889
++ .long .LFE889-.LFB889
++ .align 4
++.LEFDE10:
++.LSFDE12:
++ .long .LEFDE12-.LASFDE12
++.LASFDE12:
++ .long .Lframe0
++ .long .LFB890
++ .long .LFE890-.LFB890
++ .byte 0x4
++ .long .LCFI6-.LFB890
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x83
++ .uleb128 0x2
++ .align 4
++.LEFDE12:
++.LSFDE14:
++ .long .LEFDE14-.LASFDE14
++.LASFDE14:
++ .long .Lframe0
++ .long .LFB899
++ .long .LFE899-.LFB899
++ .byte 0x4
++ .long .LCFI7-.LFB899
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x87
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI8-.LCFI7
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x86
++ .uleb128 0x3
++ .byte 0x4
++ .long .LCFI9-.LCFI8
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x4
++ .long .LCFI10-.LCFI9
++ .byte 0xe
++ .uleb128 0x18
++ .byte 0x83
++ .uleb128 0x4
++ .align 4
++.LEFDE14:
++.LSFDE16:
++ .long .LEFDE16-.LASFDE16
++.LASFDE16:
++ .long .Lframe0
++ .long .LFB923
++ .long .LFE923-.LFB923
++ .byte 0x4
++ .long .LCFI11-.LFB923
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI12-.LCFI11
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x4
++ .long .LCFI13-.LCFI12
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x86
++ .uleb128 0x4
++ .byte 0x87
++ .uleb128 0x3
++ .byte 0x85
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI14-.LCFI13
++ .byte 0xe
++ .uleb128 0x14
++ .byte 0x4
++ .long .LCFI15-.LCFI14
++ .byte 0xe
++ .uleb128 0x24
++ .byte 0x83
++ .uleb128 0x5
++ .align 4
++.LEFDE16:
++.LSFDE18:
++ .long .LEFDE18-.LASFDE18
++.LASFDE18:
++ .long .Lframe0
++ .long .LFB924
++ .long .LFE924-.LFB924
++ .align 4
++.LEFDE18:
++.LSFDE20:
++ .long .LEFDE20-.LASFDE20
++.LASFDE20:
++ .long .Lframe0
++ .long .LFB904
++ .long .LFE904-.LFB904
++ .byte 0x4
++ .long .LCFI16-.LFB904
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI17-.LCFI16
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x4
++ .long .LCFI18-.LCFI17
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x4
++ .long .LCFI19-.LCFI18
++ .byte 0xe
++ .uleb128 0x28
++ .byte 0x83
++ .uleb128 0x4
++ .byte 0x86
++ .uleb128 0x3
++ .byte 0x87
++ .uleb128 0x2
++ .align 4
++.LEFDE20:
++.LSFDE22:
++ .long .LEFDE22-.LASFDE22
++.LASFDE22:
++ .long .Lframe0
++ .long .LFB920
++ .long .LFE920-.LFB920
++ .byte 0x4
++ .long .LCFI20-.LFB920
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI21-.LCFI20
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x83
++ .uleb128 0x3
++ .byte 0x87
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI22-.LCFI21
++ .byte 0xe
++ .uleb128 0x14
++ .align 4
++.LEFDE22:
++.LSFDE24:
++ .long .LEFDE24-.LASFDE24
++.LASFDE24:
++ .long .Lframe0
++ .long .LFB921
++ .long .LFE921-.LFB921
++ .byte 0x4
++ .long .LCFI23-.LFB921
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI24-.LCFI23
++ .byte 0xe
++ .uleb128 0x48
++ .byte 0x83
++ .uleb128 0x2
++ .align 4
++.LEFDE24:
++.LSFDE26:
++ .long .LEFDE26-.LASFDE26
++.LASFDE26:
++ .long .Lframe0
++ .long .LFB915
++ .long .LFE915-.LFB915
++ .align 4
++.LEFDE26:
++.LSFDE28:
++ .long .LEFDE28-.LASFDE28
++.LASFDE28:
++ .long .Lframe0
++ .long .LFB908
++ .long .LFE908-.LFB908
++ .align 4
++.LEFDE28:
++.LSFDE30:
++ .long .LEFDE30-.LASFDE30
++.LASFDE30:
++ .long .Lframe0
++ .long .LFB907
++ .long .LFE907-.LFB907
++ .byte 0x4
++ .long .LCFI25-.LFB907
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI26-.LCFI25
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x4
++ .long .LCFI27-.LCFI26
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x4
++ .long .LCFI28-.LCFI27
++ .byte 0xe
++ .uleb128 0x14
++ .byte 0x83
++ .uleb128 0x5
++ .byte 0x86
++ .uleb128 0x4
++ .byte 0x87
++ .uleb128 0x3
++ .byte 0x85
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI29-.LCFI28
++ .byte 0xe
++ .uleb128 0x18
++ .align 4
++.LEFDE30:
++.LSFDE32:
++ .long .LEFDE32-.LASFDE32
++.LASFDE32:
++ .long .Lframe0
++ .long .LFB905
++ .long .LFE905-.LFB905
++ .align 4
++.LEFDE32:
++.LSFDE34:
++ .long .LEFDE34-.LASFDE34
++.LASFDE34:
++ .long .Lframe0
++ .long .LFB892
++ .long .LFE892-.LFB892
++ .byte 0x4
++ .long .LCFI30-.LFB892
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x85
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI31-.LCFI30
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x87
++ .uleb128 0x3
++ .byte 0x4
++ .long .LCFI32-.LCFI31
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x4
++ .long .LCFI33-.LCFI32
++ .byte 0xe
++ .uleb128 0x14
++ .byte 0x83
++ .uleb128 0x5
++ .byte 0x86
++ .uleb128 0x4
++ .align 4
++.LEFDE34:
++.LSFDE36:
++ .long .LEFDE36-.LASFDE36
++.LASFDE36:
++ .long .Lframe0
++ .long .LFB897
++ .long .LFE897-.LFB897
++ .byte 0x4
++ .long .LCFI34-.LFB897
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x86
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI35-.LCFI34
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x83
++ .uleb128 0x3
++ .byte 0x4
++ .long .LCFI36-.LCFI35
++ .byte 0xe
++ .uleb128 0x10
++ .align 4
++.LEFDE36:
++.LSFDE38:
++ .long .LEFDE38-.LASFDE38
++.LASFDE38:
++ .long .Lframe0
++ .long .LFB898
++ .long .LFE898-.LFB898
++ .byte 0x4
++ .long .LCFI37-.LFB898
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x83
++ .uleb128 0x2
++ .align 4
++.LEFDE38:
++.LSFDE40:
++ .long .LEFDE40-.LASFDE40
++.LASFDE40:
++ .long .Lframe0
++ .long .LFB893
++ .long .LFE893-.LFB893
++ .byte 0x4
++ .long .LCFI38-.LFB893
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x85
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI39-.LCFI38
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x4
++ .long .LCFI40-.LCFI39
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x4
++ .long .LCFI41-.LCFI40
++ .byte 0xe
++ .uleb128 0x14
++ .byte 0x83
++ .uleb128 0x5
++ .byte 0x86
++ .uleb128 0x4
++ .byte 0x87
++ .uleb128 0x3
++ .byte 0x4
++ .long .LCFI42-.LCFI41
++ .byte 0xe
++ .uleb128 0x1c
++ .align 4
++.LEFDE40:
++.LSFDE42:
++ .long .LEFDE42-.LASFDE42
++.LASFDE42:
++ .long .Lframe0
++ .long .LFB916
++ .long .LFE916-.LFB916
++ .byte 0x4
++ .long .LCFI43-.LFB916
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI44-.LCFI43
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x83
++ .uleb128 0x3
++ .byte 0x86
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI45-.LCFI44
++ .byte 0xe
++ .uleb128 0x2c
++ .align 4
++.LEFDE42:
++.LSFDE44:
++ .long .LEFDE44-.LASFDE44
++.LASFDE44:
++ .long .Lframe0
++ .long .LFB918
++ .long .LFE918-.LFB918
++ .align 4
++.LEFDE44:
++.LSFDE46:
++ .long .LEFDE46-.LASFDE46
++.LASFDE46:
++ .long .Lframe0
++ .long .LFB925
++ .long .LFE925-.LFB925
++ .align 4
++.LEFDE46:
++.LSFDE48:
++ .long .LEFDE48-.LASFDE48
++.LASFDE48:
++ .long .Lframe0
++ .long .LFB917
++ .long .LFE917-.LFB917
++ .align 4
++.LEFDE48:
++.LSFDE50:
++ .long .LEFDE50-.LASFDE50
++.LASFDE50:
++ .long .Lframe0
++ .long .LFB926
++ .long .LFE926-.LFB926
++ .align 4
++.LEFDE50:
++.LSFDE52:
++ .long .LEFDE52-.LASFDE52
++.LASFDE52:
++ .long .Lframe0
++ .long .LFB901
++ .long .LFE901-.LFB901
++ .byte 0x4
++ .long .LCFI46-.LFB901
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x87
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI47-.LCFI46
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x4
++ .long .LCFI48-.LCFI47
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x83
++ .uleb128 0x4
++ .byte 0x86
++ .uleb128 0x3
++ .align 4
++.LEFDE52:
++.LSFDE54:
++ .long .LEFDE54-.LASFDE54
++.LASFDE54:
++ .long .Lframe0
++ .long .LFB909
++ .long .LFE909-.LFB909
++ .byte 0x4
++ .long .LCFI49-.LFB909
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI50-.LCFI49
++ .byte 0xe
++ .uleb128 0x30
++ .byte 0x83
++ .uleb128 0x2
++ .align 4
++.LEFDE54:
++.LSFDE56:
++ .long .LEFDE56-.LASFDE56
++.LASFDE56:
++ .long .Lframe0
++ .long .LFB910
++ .long .LFE910-.LFB910
++ .align 4
++.LEFDE56:
++.LSFDE58:
++ .long .LEFDE58-.LASFDE58
++.LASFDE58:
++ .long .Lframe0
++ .long .LFB911
++ .long .LFE911-.LFB911
++ .align 4
++.LEFDE58:
++.LSFDE60:
++ .long .LEFDE60-.LASFDE60
++.LASFDE60:
++ .long .Lframe0
++ .long .LFB912
++ .long .LFE912-.LFB912
++ .align 4
++.LEFDE60:
++.LSFDE62:
++ .long .LEFDE62-.LASFDE62
++.LASFDE62:
++ .long .Lframe0
++ .long .LFB913
++ .long .LFE913-.LFB913
++ .align 4
++.LEFDE62:
++.LSFDE64:
++ .long .LEFDE64-.LASFDE64
++.LASFDE64:
++ .long .Lframe0
++ .long .LFB914
++ .long .LFE914-.LFB914
++ .align 4
++.LEFDE64:
++.LSFDE66:
++ .long .LEFDE66-.LASFDE66
++.LASFDE66:
++ .long .Lframe0
++ .long .LFB919
++ .long .LFE919-.LFB919
++ .align 4
++.LEFDE66:
++.LSFDE68:
++ .long .LEFDE68-.LASFDE68
++.LASFDE68:
++ .long .Lframe0
++ .long .LFB895
++ .long .LFE895-.LFB895
++ .align 4
++.LEFDE68:
++.LSFDE70:
++ .long .LEFDE70-.LASFDE70
++.LASFDE70:
++ .long .Lframe0
++ .long .LFB896
++ .long .LFE896-.LFB896
++ .byte 0x4
++ .long .LCFI51-.LFB896
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x86
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI52-.LCFI51
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x83
++ .uleb128 0x3
++ .byte 0x4
++ .long .LCFI53-.LCFI52
++ .byte 0xe
++ .uleb128 0x10
++ .align 4
++.LEFDE70:
++.LSFDE72:
++ .long .LEFDE72-.LASFDE72
++.LASFDE72:
++ .long .Lframe0
++ .long .LFB894
++ .long .LFE894-.LFB894
++ .byte 0x4
++ .long .LCFI54-.LFB894
++ .byte 0xe
++ .uleb128 0x8
++ .byte 0x4
++ .long .LCFI55-.LCFI54
++ .byte 0xe
++ .uleb128 0xc
++ .byte 0x86
++ .uleb128 0x3
++ .byte 0x87
++ .uleb128 0x2
++ .byte 0x4
++ .long .LCFI56-.LCFI55
++ .byte 0xe
++ .uleb128 0x10
++ .byte 0x83
++ .uleb128 0x4
++ .align 4
++.LEFDE72:
++ .file 13 "include/linux/spinlock_types.h"
++ .file 14 "include/asm/spinlock_types.h"
++ .file 15 "include/linux/thread_info.h"
++ .file 16 "include/asm/thread_info.h"
++ .file 17 "include/linux/capability.h"
++ .file 18 "include/asm/atomic.h"
++ .file 19 "include/linux/cpumask.h"
++ .file 20 "include/asm/page.h"
++ .file 21 "include/linux/mm.h"
++ .file 22 "include/linux/rbtree.h"
++ .file 23 "include/linux/prio_tree.h"
++ .file 24 "include/linux/mmzone.h"
++ .file 25 "include/linux/mm_types.h"
++ .file 26 "include/linux/fs.h"
++ .file 27 "include/linux/futex.h"
++ .file 28 "include/linux/types.h"
++ .file 29 "include/asm/posix_types.h"
++ .file 30 "include/asm/types.h"
++ .file 31 "include/linux/time.h"
++ .file 32 "include/linux/mutex.h"
++ .file 33 "include/linux/rwsem.h"
++ .file 34 "include/asm/rwsem.h"
++ .file 35 "include/linux/fs_struct.h"
++ .file 36 "include/linux/dcache.h"
++ .file 37 "include/linux/rcupdate.h"
++ .file 38 "include/linux/sysfs.h"
++ .file 39 "include/linux/namei.h"
++ .file 40 "include/asm/alternative.h"
++ .file 41 "include/linux/module.h"
++ .file 42 "include/linux/kobject.h"
++ .file 43 "include/linux/kref.h"
++ .file 44 "include/linux/wait.h"
++ .file 45 "include/asm/uaccess.h"
++ .file 46 "include/asm/module.h"
++ .file 47 "include/asm-generic/bug.h"
++ .file 48 "include/asm/local.h"
++ .file 49 "include/asm-generic/atomic.h"
++ .file 50 "include/linux/elf.h"
++ .file 51 "include/linux/aio.h"
++ .file 52 "include/linux/workqueue.h"
++ .file 53 "include/linux/aio_abi.h"
++ .file 54 "include/linux/uio.h"
++ .file 55 "include/linux/nfs_fs_i.h"
++ .file 56 "include/linux/kernel.h"
++ .file 57 "include/linux/pid.h"
++ .file 58 "include/linux/lockdep.h"
++ .file 59 "include/linux/quota.h"
++ .file 60 "include/linux/dqblk_xfs.h"
++ .file 61 "include/asm/semaphore.h"
++ .file 62 "include/linux/backing-dev.h"
++ .file 63 "include/linux/dqblk_v1.h"
++ .file 64 "include/linux/dqblk_v2.h"
++ .file 65 "include/linux/stat.h"
++ .file 66 "include/linux/radix-tree.h"
++ .file 67 "include/asm/mmu.h"
++ .file 68 "include/linux/completion.h"
++ .file 69 "include/asm-generic/cputime.h"
++ .file 70 "include/linux/signal.h"
++ .file 71 "include/linux/sem.h"
++ .file 72 "include/asm/math_emu.h"
++ .file 73 "include/asm/vm86.h"
++ .file 74 "include/asm/signal.h"
++ .file 75 "include/linux/hrtimer.h"
++ .file 76 "include/linux/ktime.h"
++ .file 77 "include/linux/resource.h"
++ .file 78 "include/asm-generic/signal.h"
++ .file 79 "include/linux/seccomp.h"
++ .file 80 "include/linux/plist.h"
++ .file 81 "include/linux/swap.h"
++ .file 82 "include/asm-generic/siginfo.h"
++ .file 83 "include/linux/task_io_accounting.h"
++ .file 84 "include/linux/slab.h"
++ .file 85 "include/linux/notifier.h"
++ .file 86 "include/linux/interrupt.h"
++ .file 87 "include/linux/arrays.h"
++ .file 88 "include/asm/percpu.h"
++ .file 89 "include/asm/smp.h"
++ .file 90 "include/linux/timex.h"
++ .file 91 "include/linux/jiffies.h"
++ .file 92 "include/linux/pm.h"
++ .file 93 "include/linux/device.h"
++ .file 94 "include/linux/klist.h"
++ .file 95 "include/asm/device.h"
++ .file 96 "include/asm/fixmap.h"
++ .file 97 "include/asm/acpi.h"
++ .file 98 "include/asm/io_apic.h"
++ .file 99 "include/asm/genapic.h"
++ .file 100 "include/asm/mpspec.h"
++ .file 101 "include/asm/mpspec_def.h"
++ .file 102 "include/linux/kernel_stat.h"
++ .file 103 "include/asm/desc.h"
++ .file 104 "include/asm/irq_regs.h"
++ .file 105 "include/asm/ptrace.h"
++ .file 106 "include/linux/irq.h"
++ .file 107 "include/linux/irqreturn.h"
++ .file 108 "include/linux/profile.h"
++ .file 109 "include/linux/ioport.h"
++ .file 110 "include/linux/vmstat.h"
++ .text
++.Letext0:
++ .section .debug_loc,"",@progbits
++.Ldebug_loc0:
++.LLST0:
++ .long .LFB883
++ .long .LCFI0
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI0
++ .long .LCFI1
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI1
++ .long .LCFI2
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI2
++ .long .LCFI3
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long .LCFI3
++ .long .LFE883
++ .value 0x2
++ .byte 0x74
++ .sleb128 20
++ .long 0x0
++ .long 0x0
++.LLST1:
++ .long .LVL0
++ .long .LVL2
++ .value 0x1
++ .byte 0x50
++ .long .LVL2
++ .long .LVL3
++ .value 0x1
++ .byte 0x51
++ .long .LVL3
++ .long .LVL4
++ .value 0x1
++ .byte 0x53
++ .long .LVL4
++ .long .LVL5
++ .value 0x1
++ .byte 0x50
++ .long .LVL6
++ .long .LVL7
++ .value 0x1
++ .byte 0x50
++ .long .LVL7
++ .long .LVL8
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST2:
++ .long .LVL0
++ .long .LVL1
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST4:
++ .long .LVL11
++ .long .LVL12
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST5:
++ .long .LVL11
++ .long .LVL13
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST7:
++ .long .LVL14
++ .long .LVL16
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST8:
++ .long .LVL15
++ .long .LVL16
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST10:
++ .long .LVL17
++ .long .LVL19
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST11:
++ .long .LVL18
++ .long .LVL19
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST12:
++ .long .LFB888
++ .long .LCFI4
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI4
++ .long .LCFI5
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI5
++ .long .LFE888
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long 0x0
++ .long 0x0
++.LLST13:
++ .long .LVL20
++ .long .LVL21
++ .value 0x1
++ .byte 0x50
++ .long .LVL21
++ .long .LVL35
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST14:
++ .long .LVL21
++ .long .LVL29
++ .value 0x1
++ .byte 0x50
++ .long .LVL29
++ .long .LVL30
++ .value 0x1
++ .byte 0x50
++ .long .LVL32
++ .long .LVL34
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST15:
++ .long .LVL22
++ .long .LVL23
++ .value 0x1
++ .byte 0x51
++ .long .LVL24
++ .long .LVL25
++ .value 0x1
++ .byte 0x51
++ .long .LVL26
++ .long .LVL27
++ .value 0x1
++ .byte 0x51
++ .long .LVL28
++ .long .LVL31
++ .value 0x1
++ .byte 0x51
++ .long .LVL32
++ .long .LVL33
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST16:
++ .long .LVL23
++ .long .LVL24
++ .value 0x1
++ .byte 0x51
++ .long .LVL25
++ .long .LVL26
++ .value 0x1
++ .byte 0x51
++ .long .LVL27
++ .long .LVL28
++ .value 0x1
++ .byte 0x51
++ .long .LVL31
++ .long .LVL32
++ .value 0x1
++ .byte 0x51
++ .long .LVL33
++ .long .LFE888
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST18:
++ .long .LFB890
++ .long .LCFI6
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI6
++ .long .LFE890
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long 0x0
++ .long 0x0
++.LLST19:
++ .long .LVL38
++ .long .LVL39
++ .value 0x1
++ .byte 0x50
++ .long .LVL39
++ .long .LVL40
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST20:
++ .long .LFB899
++ .long .LCFI7
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI7
++ .long .LCFI8
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI8
++ .long .LCFI9
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI9
++ .long .LCFI10
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long .LCFI10
++ .long .LFE899
++ .value 0x2
++ .byte 0x74
++ .sleb128 24
++ .long 0x0
++ .long 0x0
++.LLST21:
++ .long .LVL41
++ .long .LVL42
++ .value 0x1
++ .byte 0x50
++ .long .LVL42
++ .long .LVL55
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST22:
++ .long .LVL41
++ .long .LVL43
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST23:
++ .long .LVL41
++ .long .LVL47
++ .value 0x1
++ .byte 0x51
++ .long .LVL47
++ .long .LVL50
++ .value 0x1
++ .byte 0x56
++ .long .LVL50
++ .long .LVL51
++ .value 0x1
++ .byte 0x51
++ .long .LVL51
++ .long .LVL54
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST24:
++ .long .LVL44
++ .long .LVL47
++ .value 0x1
++ .byte 0x52
++ .long .LVL48
++ .long .LFE899
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST25:
++ .long .LVL45
++ .long .LVL53
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST26:
++ .long .LVL42
++ .long .LVL46
++ .value 0x1
++ .byte 0x50
++ .long .LVL49
++ .long .LVL52
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST27:
++ .long .LFB923
++ .long .LCFI11
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI11
++ .long .LCFI12
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI12
++ .long .LCFI13
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI13
++ .long .LCFI14
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long .LCFI14
++ .long .LCFI15
++ .value 0x2
++ .byte 0x74
++ .sleb128 20
++ .long .LCFI15
++ .long .LFE923
++ .value 0x2
++ .byte 0x74
++ .sleb128 36
++ .long 0x0
++ .long 0x0
++.LLST28:
++ .long .LVL56
++ .long .LVL58
++ .value 0x1
++ .byte 0x50
++ .long .LVL59
++ .long .LVL60
++ .value 0x1
++ .byte 0x50
++ .long .LVL68
++ .long .LVL72
++ .value 0x1
++ .byte 0x50
++ .long .LVL73
++ .long .LVL79
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST29:
++ .long .LVL56
++ .long .LVL61
++ .value 0x1
++ .byte 0x52
++ .long .LVL68
++ .long .LVL71
++ .value 0x1
++ .byte 0x52
++ .long .LVL73
++ .long .LVL75
++ .value 0x1
++ .byte 0x52
++ .long .LVL87
++ .long .LFE923
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST30:
++ .long .LVL56
++ .long .LVL57
++ .value 0x1
++ .byte 0x51
++ .long .LVL57
++ .long .LVL76
++ .value 0x1
++ .byte 0x56
++ .long .LVL76
++ .long .LVL78
++ .value 0x1
++ .byte 0x51
++ .long .LVL87
++ .long .LVL89
++ .value 0x1
++ .byte 0x56
++ .long .LVL89
++ .long .LFE923
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST31:
++ .long .LVL74
++ .long .LVL83
++ .value 0x1
++ .byte 0x55
++ .long .LVL83
++ .long .LVL91
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST32:
++ .long .LVL62
++ .long .LVL63
++ .value 0x1
++ .byte 0x57
++ .long .LVL64
++ .long .LVL68
++ .value 0x1
++ .byte 0x53
++ .long .LVL69
++ .long .LVL70
++ .value 0x1
++ .byte 0x53
++ .long .LVL73
++ .long .LVL84
++ .value 0x1
++ .byte 0x53
++ .long .LVL84
++ .long .LVL87
++ .value 0x1
++ .byte 0x52
++ .long .LVL87
++ .long .LVL88
++ .value 0x1
++ .byte 0x53
++ .long .LVL88
++ .long .LVL90
++ .value 0x1
++ .byte 0x57
++ .long .LVL90
++ .long .LFE923
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST33:
++ .long .LVL67
++ .long .LVL68
++ .value 0x1
++ .byte 0x51
++ .long .LVL69
++ .long .LVL70
++ .value 0x1
++ .byte 0x51
++ .long .LVL73
++ .long .LVL78
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST34:
++ .long .LVL66
++ .long .LVL68
++ .value 0x1
++ .byte 0x57
++ .long .LVL69
++ .long .LVL70
++ .value 0x1
++ .byte 0x57
++ .long .LVL73
++ .long .LVL77
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST35:
++ .long .LVL79
++ .long .LVL80
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST36:
++ .long .LVL80
++ .long .LVL81
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST37:
++ .long .LVL81
++ .long .LVL82
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST38:
++ .long .LVL82
++ .long .LVL85
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST39:
++ .long .LVL85
++ .long .LVL86
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST41:
++ .long .LVL94
++ .long .LVL95
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST42:
++ .long .LVL92
++ .long .LVL93
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST43:
++ .long .LFB904
++ .long .LCFI16
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI16
++ .long .LCFI17
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI17
++ .long .LCFI18
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI18
++ .long .LCFI19
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long .LCFI19
++ .long .LFE904
++ .value 0x2
++ .byte 0x74
++ .sleb128 40
++ .long 0x0
++ .long 0x0
++.LLST44:
++ .long .LVL96
++ .long .LVL97
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST45:
++ .long .LVL99
++ .long .LVL114
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST46:
++ .long .LVL98
++ .long .LVL100
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST47:
++ .long .LVL102
++ .long .LVL105
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST48:
++ .long .LVL101
++ .long .LVL103
++ .value 0x1
++ .byte 0x52
++ .long .LVL104
++ .long .LVL108
++ .value 0x1
++ .byte 0x52
++ .long .LVL112
++ .long .LFE904
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST49:
++ .long .LVL101
++ .long .LVL115
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST50:
++ .long .LVL106
++ .long .LVL111
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST51:
++ .long .LVL107
++ .long .LVL111
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST52:
++ .long .LVL108
++ .long .LVL109
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST53:
++ .long .LVL110
++ .long .LVL113
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST54:
++ .long .LFB920
++ .long .LCFI20
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI20
++ .long .LCFI21
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI21
++ .long .LCFI22
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI22
++ .long .LFE920
++ .value 0x2
++ .byte 0x74
++ .sleb128 20
++ .long 0x0
++ .long 0x0
++.LLST55:
++ .long .LVL116
++ .long .LVL117
++ .value 0x1
++ .byte 0x50
++ .long .LVL117
++ .long .LVL132
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST56:
++ .long .LVL123
++ .long .LVL126
++ .value 0x1
++ .byte 0x52
++ .long .LVL126
++ .long .LVL129
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST57:
++ .long .LVL124
++ .long .LVL130
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST58:
++ .long .LVL119
++ .long .LVL122
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST59:
++ .long .LVL118
++ .long .LVL121
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST60:
++ .long .LVL120
++ .long .LVL127
++ .value 0x1
++ .byte 0x57
++ .long .LVL128
++ .long .LVL129
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST61:
++ .long .LFB921
++ .long .LCFI23
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI23
++ .long .LCFI24
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI24
++ .long .LFE921
++ .value 0x3
++ .byte 0x74
++ .sleb128 72
++ .long 0x0
++ .long 0x0
++.LLST63:
++ .long .LVL134
++ .long .LVL135
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST65:
++ .long .LFB907
++ .long .LCFI25
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI25
++ .long .LCFI26
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI26
++ .long .LCFI27
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI27
++ .long .LCFI28
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long .LCFI28
++ .long .LCFI29
++ .value 0x2
++ .byte 0x74
++ .sleb128 20
++ .long .LCFI29
++ .long .LFE907
++ .value 0x2
++ .byte 0x74
++ .sleb128 24
++ .long 0x0
++ .long 0x0
++.LLST66:
++ .long .LVL137
++ .long .LVL138
++ .value 0x1
++ .byte 0x50
++ .long .LVL138
++ .long .LVL139
++ .value 0x6
++ .byte 0x50
++ .byte 0x93
++ .uleb128 0x4
++ .byte 0x52
++ .byte 0x93
++ .uleb128 0x4
++ .long .LVL139
++ .long .LVL140
++ .value 0x1
++ .byte 0x53
++ .long .LVL143
++ .long .LVL144
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST67:
++ .long .LVL141
++ .long .LVL142
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST69:
++ .long .LFB892
++ .long .LCFI30
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI30
++ .long .LCFI31
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI31
++ .long .LCFI32
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI32
++ .long .LCFI33
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long .LCFI33
++ .long .LFE892
++ .value 0x2
++ .byte 0x74
++ .sleb128 20
++ .long 0x0
++ .long 0x0
++.LLST70:
++ .long .LVL145
++ .long .LVL146
++ .value 0x1
++ .byte 0x50
++ .long .LVL146
++ .long .LVL151
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST71:
++ .long .LVL145
++ .long .LVL146
++ .value 0x1
++ .byte 0x52
++ .long .LVL146
++ .long .LVL152
++ .value 0x1
++ .byte 0x55
++ .long 0x0
++ .long 0x0
++.LLST72:
++ .long .LVL146
++ .long .LVL150
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST73:
++ .long .LVL146
++ .long .LVL149
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST74:
++ .long .LFB897
++ .long .LCFI34
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI34
++ .long .LCFI35
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI35
++ .long .LCFI36
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI36
++ .long .LFE897
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long 0x0
++ .long 0x0
++.LLST75:
++ .long .LVL153
++ .long .LVL155
++ .value 0x1
++ .byte 0x50
++ .long .LVL155
++ .long .LVL167
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST76:
++ .long .LVL156
++ .long .LVL157
++ .value 0x1
++ .byte 0x50
++ .long .LVL158
++ .long .LVL166
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST77:
++ .long .LVL154
++ .long .LVL160
++ .value 0x1
++ .byte 0x56
++ .long .LVL162
++ .long .LVL168
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST78:
++ .long .LVL159
++ .long .LVL164
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST79:
++ .long .LVL161
++ .long .LVL165
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST80:
++ .long .LFB898
++ .long .LCFI37
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI37
++ .long .LFE898
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long 0x0
++ .long 0x0
++.LLST81:
++ .long .LVL169
++ .long .LVL170
++ .value 0x1
++ .byte 0x50
++ .long .LVL170
++ .long .LVL173
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST82:
++ .long .LVL170
++ .long .LVL171
++ .value 0x1
++ .byte 0x50
++ .long .LVL172
++ .long .LVL173
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST83:
++ .long .LFB893
++ .long .LCFI38
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI38
++ .long .LCFI39
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI39
++ .long .LCFI40
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI40
++ .long .LCFI41
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long .LCFI41
++ .long .LCFI42
++ .value 0x2
++ .byte 0x74
++ .sleb128 20
++ .long .LCFI42
++ .long .LFE893
++ .value 0x2
++ .byte 0x74
++ .sleb128 28
++ .long 0x0
++ .long 0x0
++.LLST84:
++ .long .LVL174
++ .long .LVL176
++ .value 0x1
++ .byte 0x50
++ .long .LVL176
++ .long .LVL190
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST85:
++ .long .LVL174
++ .long .LVL175
++ .value 0x1
++ .byte 0x52
++ .long .LVL175
++ .long .LVL191
++ .value 0x1
++ .byte 0x55
++ .long 0x0
++ .long 0x0
++.LLST86:
++ .long .LVL177
++ .long .LVL188
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST87:
++ .long .LVL183
++ .long .LVL189
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST88:
++ .long .LVL180
++ .long .LVL181
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST89:
++ .long .LVL179
++ .long .LVL182
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST90:
++ .long .LVL181
++ .long .LVL184
++ .value 0x1
++ .byte 0x52
++ .long .LVL185
++ .long .LVL186
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST91:
++ .long .LFB916
++ .long .LCFI43
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI43
++ .long .LCFI44
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI44
++ .long .LCFI45
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI45
++ .long .LFE916
++ .value 0x2
++ .byte 0x74
++ .sleb128 44
++ .long 0x0
++ .long 0x0
++.LLST92:
++ .long .LVL192
++ .long .LVL193
++ .value 0x1
++ .byte 0x50
++ .long .LVL193
++ .long .LVL194
++ .value 0x1
++ .byte 0x53
++ .long .LVL194
++ .long .LVL195
++ .value 0x1
++ .byte 0x50
++ .long .LVL195
++ .long .LVL197
++ .value 0x1
++ .byte 0x53
++ .long .LVL197
++ .long .LVL199
++ .value 0x1
++ .byte 0x50
++ .long .LVL201
++ .long .LVL204
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST93:
++ .long .LVL198
++ .long .LVL205
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST94:
++ .long .LVL196
++ .long .LVL197
++ .value 0x1
++ .byte 0x50
++ .long .LVL202
++ .long .LVL203
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST95:
++ .long .LVL199
++ .long .LVL200
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST97:
++ .long .LVL206
++ .long .LVL208
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST98:
++ .long .LVL207
++ .long .LVL208
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST100:
++ .long .LVL209
++ .long .LVL210
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST101:
++ .long .LVL211
++ .long .LVL212
++ .value 0x1
++ .byte 0x50
++ .long .LVL212
++ .long .LFE925
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST103:
++ .long .LVL213
++ .long .LVL215
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST104:
++ .long .LVL214
++ .long .LVL215
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST106:
++ .long .LVL216
++ .long .LVL217
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST107:
++ .long .LVL218
++ .long .LVL219
++ .value 0x1
++ .byte 0x52
++ .long .LVL220
++ .long .LVL223
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST108:
++ .long .LVL221
++ .long .LVL222
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST109:
++ .long .LFB901
++ .long .LCFI46
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI46
++ .long .LCFI47
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI47
++ .long .LCFI48
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI48
++ .long .LFE901
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long 0x0
++ .long 0x0
++.LLST110:
++ .long .LVL224
++ .long .LVL227
++ .value 0x1
++ .byte 0x50
++ .long .LVL227
++ .long .LVL228
++ .value 0x1
++ .byte 0x57
++ .long .LVL228
++ .long .LVL229
++ .value 0x1
++ .byte 0x50
++ .long .LVL229
++ .long .LVL232
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST111:
++ .long .LVL226
++ .long .LVL230
++ .value 0x1
++ .byte 0x53
++ .long .LVL230
++ .long .LVL233
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST112:
++ .long .LVL225
++ .long .LVL231
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST113:
++ .long .LFB909
++ .long .LCFI49
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI49
++ .long .LCFI50
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI50
++ .long .LFE909
++ .value 0x2
++ .byte 0x74
++ .sleb128 48
++ .long 0x0
++ .long 0x0
++.LLST114:
++ .long .LVL235
++ .long .LVL237
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST115:
++ .long .LVL234
++ .long .LVL236
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST116:
++ .long .LVL238
++ .long .LVL239
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST118:
++ .long .LVL240
++ .long .LVL241
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST120:
++ .long .LVL242
++ .long .LVL243
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST122:
++ .long .LVL244
++ .long .LVL245
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST124:
++ .long .LVL246
++ .long .LVL247
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST126:
++ .long .LVL248
++ .long .LVL249
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST128:
++ .long .LVL250
++ .long .LVL251
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST130:
++ .long .LVL252
++ .long .LVL253
++ .value 0x1
++ .byte 0x50
++ .long .LVL253
++ .long .LVL254
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST131:
++ .long .LVL252
++ .long .LVL253
++ .value 0x1
++ .byte 0x52
++ .long .LVL253
++ .long .LFE895
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST132:
++ .long .LFB896
++ .long .LCFI51
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI51
++ .long .LCFI52
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI52
++ .long .LCFI53
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI53
++ .long .LFE896
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long 0x0
++ .long 0x0
++.LLST133:
++ .long .LVL255
++ .long .LVL257
++ .value 0x1
++ .byte 0x50
++ .long .LVL257
++ .long .LVL267
++ .value 0x1
++ .byte 0x53
++ .long 0x0
++ .long 0x0
++.LLST134:
++ .long .LVL258
++ .long .LVL266
++ .value 0x1
++ .byte 0x51
++ .long 0x0
++ .long 0x0
++.LLST135:
++ .long .LVL256
++ .long .LVL260
++ .value 0x1
++ .byte 0x56
++ .long .LVL262
++ .long .LVL268
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST136:
++ .long .LVL259
++ .long .LVL264
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST137:
++ .long .LVL261
++ .long .LVL265
++ .value 0x1
++ .byte 0x50
++ .long 0x0
++ .long 0x0
++.LLST138:
++ .long .LFB894
++ .long .LCFI54
++ .value 0x2
++ .byte 0x74
++ .sleb128 4
++ .long .LCFI54
++ .long .LCFI55
++ .value 0x2
++ .byte 0x74
++ .sleb128 8
++ .long .LCFI55
++ .long .LCFI56
++ .value 0x2
++ .byte 0x74
++ .sleb128 12
++ .long .LCFI56
++ .long .LFE894
++ .value 0x2
++ .byte 0x74
++ .sleb128 16
++ .long 0x0
++ .long 0x0
++.LLST139:
++ .long .LVL269
++ .long .LVL271
++ .value 0x1
++ .byte 0x50
++ .long .LVL271
++ .long .LVL275
++ .value 0x1
++ .byte 0x56
++ .long 0x0
++ .long 0x0
++.LLST140:
++ .long .LVL269
++ .long .LVL270
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++.LLST141:
++ .long .LVL272
++ .long .LVL276
++ .value 0x1
++ .byte 0x57
++ .long 0x0
++ .long 0x0
++.LLST142:
++ .long .LVL273
++ .long .LVL274
++ .value 0x1
++ .byte 0x53
++ .long .LVL274
++ .long .LVL277
++ .value 0x1
++ .byte 0x52
++ .long 0x0
++ .long 0x0
++ .section .debug_info
++ .long 0xaa89
++ .value 0x2
++ .long .Ldebug_abbrev0
++ .byte 0x4
++ .uleb128 0x1
++ .long .Ldebug_line0
++ .long 0x0
++ .long .LASF1718
++ .byte 0x1
++ .long .LASF1719
++ .long .LASF1720
++ .uleb128 0x2
++ .string "int"
++ .byte 0x4
++ .byte 0x5
++ .uleb128 0x3
++ .long .LASF0
++ .byte 0x4
++ .byte 0x7
++ .uleb128 0x3
++ .long .LASF1
++ .byte 0x4
++ .byte 0x7
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3c
++ .uleb128 0x5
++ .long 0x48
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF4
++ .byte 0x1d
++ .byte 0xb
++ .long 0x53
++ .uleb128 0x3
++ .long .LASF2
++ .byte 0x2
++ .byte 0x7
++ .uleb128 0x3
++ .long .LASF3
++ .byte 0x4
++ .byte 0x5
++ .uleb128 0x7
++ .long .LASF5
++ .byte 0x1d
++ .byte 0xe
++ .long 0x21
++ .uleb128 0x7
++ .long .LASF6
++ .byte 0x1d
++ .byte 0x12
++ .long 0x77
++ .uleb128 0x3
++ .long .LASF0
++ .byte 0x4
++ .byte 0x7
++ .uleb128 0x7
++ .long .LASF7
++ .byte 0x1d
++ .byte 0x13
++ .long 0x21
++ .uleb128 0x7
++ .long .LASF8
++ .byte 0x1d
++ .byte 0x15
++ .long 0x5a
++ .uleb128 0x7
++ .long .LASF9
++ .byte 0x1d
++ .byte 0x17
++ .long 0x5a
++ .uleb128 0x7
++ .long .LASF10
++ .byte 0x1d
++ .byte 0x18
++ .long 0x21
++ .uleb128 0x7
++ .long .LASF11
++ .byte 0x1d
++ .byte 0x19
++ .long 0x21
++ .uleb128 0x4
++ .byte 0x4
++ .long 0xbb
++ .uleb128 0x3
++ .long .LASF12
++ .byte 0x1
++ .byte 0x6
++ .uleb128 0x7
++ .long .LASF13
++ .byte 0x1d
++ .byte 0x1e
++ .long 0x77
++ .uleb128 0x7
++ .long .LASF14
++ .byte 0x1d
++ .byte 0x1f
++ .long 0x77
++ .uleb128 0x7
++ .long .LASF15
++ .byte 0x1d
++ .byte 0x26
++ .long 0xe3
++ .uleb128 0x3
++ .long .LASF16
++ .byte 0x8
++ .byte 0x5
++ .uleb128 0x7
++ .long .LASF17
++ .byte 0x1e
++ .byte 0x6
++ .long 0x53
++ .uleb128 0x7
++ .long .LASF18
++ .byte 0x1e
++ .byte 0xd
++ .long 0x100
++ .uleb128 0x3
++ .long .LASF19
++ .byte 0x1
++ .byte 0x6
++ .uleb128 0x7
++ .long .LASF20
++ .byte 0x1e
++ .byte 0xe
++ .long 0x112
++ .uleb128 0x3
++ .long .LASF21
++ .byte 0x1
++ .byte 0x8
++ .uleb128 0x7
++ .long .LASF22
++ .byte 0x1e
++ .byte 0x10
++ .long 0x124
++ .uleb128 0x3
++ .long .LASF23
++ .byte 0x2
++ .byte 0x5
++ .uleb128 0x7
++ .long .LASF24
++ .byte 0x1e
++ .byte 0x11
++ .long 0x53
++ .uleb128 0x7
++ .long .LASF25
++ .byte 0x1e
++ .byte 0x13
++ .long 0x21
++ .uleb128 0x7
++ .long .LASF26
++ .byte 0x1e
++ .byte 0x14
++ .long 0x77
++ .uleb128 0x7
++ .long .LASF27
++ .byte 0x1e
++ .byte 0x17
++ .long 0xe3
++ .uleb128 0x7
++ .long .LASF28
++ .byte 0x1e
++ .byte 0x18
++ .long 0x162
++ .uleb128 0x3
++ .long .LASF29
++ .byte 0x8
++ .byte 0x7
++ .uleb128 0x8
++ .string "s8"
++ .byte 0x1e
++ .byte 0x27
++ .long 0x100
++ .uleb128 0x8
++ .string "u32"
++ .byte 0x1e
++ .byte 0x2e
++ .long 0x77
++ .uleb128 0x8
++ .string "s64"
++ .byte 0x1e
++ .byte 0x30
++ .long 0xe3
++ .uleb128 0x8
++ .string "u64"
++ .byte 0x1e
++ .byte 0x31
++ .long 0x162
++ .uleb128 0x7
++ .long .LASF30
++ .byte 0x1c
++ .byte 0x13
++ .long 0x141
++ .uleb128 0x7
++ .long .LASF31
++ .byte 0x1c
++ .byte 0x16
++ .long 0x194
++ .uleb128 0x7
++ .long .LASF32
++ .byte 0x1c
++ .byte 0x18
++ .long 0x48
++ .uleb128 0x7
++ .long .LASF33
++ .byte 0x1c
++ .byte 0x1b
++ .long 0x61
++ .uleb128 0x7
++ .long .LASF34
++ .byte 0x1c
++ .byte 0x1f
++ .long 0x9f
++ .uleb128 0x7
++ .long .LASF35
++ .byte 0x1c
++ .byte 0x20
++ .long 0xaa
++ .uleb128 0x3
++ .long .LASF36
++ .byte 0x1
++ .byte 0x2
++ .uleb128 0x7
++ .long .LASF37
++ .byte 0x1c
++ .byte 0x26
++ .long 0xc2
++ .uleb128 0x7
++ .long .LASF38
++ .byte 0x1c
++ .byte 0x27
++ .long 0xcd
++ .uleb128 0x7
++ .long .LASF39
++ .byte 0x1c
++ .byte 0x3a
++ .long 0xd8
++ .uleb128 0x7
++ .long .LASF40
++ .byte 0x1c
++ .byte 0x43
++ .long 0x6c
++ .uleb128 0x7
++ .long .LASF41
++ .byte 0x1c
++ .byte 0x48
++ .long 0x7e
++ .uleb128 0x7
++ .long .LASF42
++ .byte 0x1c
++ .byte 0x52
++ .long 0x89
++ .uleb128 0x7
++ .long .LASF43
++ .byte 0x1c
++ .byte 0x57
++ .long 0x94
++ .uleb128 0x7
++ .long .LASF44
++ .byte 0x1c
++ .byte 0x8d
++ .long 0x189
++ .uleb128 0x7
++ .long .LASF45
++ .byte 0x1c
++ .byte 0x98
++ .long 0x2f
++ .uleb128 0x7
++ .long .LASF46
++ .byte 0x1c
++ .byte 0xc1
++ .long 0x77
++ .uleb128 0x7
++ .long .LASF47
++ .byte 0x1c
++ .byte 0xc4
++ .long 0x189
++ .uleb128 0x9
++ .long 0x297
++ .byte 0x10
++ .byte 0xf
++ .byte 0x12
++ .uleb128 0xa
++ .long .LASF48
++ .byte 0xf
++ .byte 0x13
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF49
++ .byte 0xf
++ .byte 0x13
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF50
++ .byte 0xf
++ .byte 0x13
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF51
++ .byte 0xf
++ .byte 0x13
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x2d8
++ .byte 0x14
++ .byte 0xf
++ .byte 0x16
++ .uleb128 0xa
++ .long .LASF52
++ .byte 0xf
++ .byte 0x17
++ .long 0x2d8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "val"
++ .byte 0xf
++ .byte 0x18
++ .long 0x173
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0xf
++ .byte 0x19
++ .long 0x173
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF54
++ .byte 0xf
++ .byte 0x1a
++ .long 0x189
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x173
++ .uleb128 0xc
++ .long 0x2f7
++ .byte 0x14
++ .byte 0xf
++ .byte 0x11
++ .uleb128 0xd
++ .long 0x256
++ .uleb128 0xe
++ .long .LASF55
++ .byte 0xf
++ .byte 0x1b
++ .long 0x297
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x319
++ .long .LASF59
++ .byte 0x18
++ .byte 0xf
++ .byte 0xf
++ .uleb128 0xb
++ .string "fn"
++ .byte 0xf
++ .byte 0x10
++ .long 0x32f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x10
++ .long 0x2de
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x329
++ .byte 0x1
++ .long 0x5a
++ .uleb128 0x6
++ .long 0x329
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2f7
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x319
++ .uleb128 0x9
++ .long 0x34c
++ .byte 0x4
++ .byte 0x14
++ .byte 0x5b
++ .uleb128 0xb
++ .string "pgd"
++ .byte 0x14
++ .byte 0x5b
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF56
++ .byte 0x14
++ .byte 0x5b
++ .long 0x335
++ .uleb128 0x9
++ .long 0x36e
++ .byte 0x4
++ .byte 0x14
++ .byte 0x5c
++ .uleb128 0xa
++ .long .LASF57
++ .byte 0x14
++ .byte 0x5c
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF58
++ .byte 0x14
++ .byte 0x5c
++ .long 0x357
++ .uleb128 0xf
++ .long 0x4fa
++ .long .LASF60
++ .byte 0x54
++ .byte 0x49
++ .byte 0x48
++ .uleb128 0xb
++ .string "ebx"
++ .byte 0x49
++ .byte 0x4c
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "ecx"
++ .byte 0x49
++ .byte 0x4d
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xb
++ .string "edx"
++ .byte 0x49
++ .byte 0x4e
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xb
++ .string "esi"
++ .byte 0x49
++ .byte 0x4f
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xb
++ .string "edi"
++ .byte 0x49
++ .byte 0x50
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xb
++ .string "ebp"
++ .byte 0x49
++ .byte 0x51
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xb
++ .string "eax"
++ .byte 0x49
++ .byte 0x52
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF61
++ .byte 0x49
++ .byte 0x53
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF62
++ .byte 0x49
++ .byte 0x54
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF63
++ .byte 0x49
++ .byte 0x55
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF64
++ .byte 0x49
++ .byte 0x56
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF65
++ .byte 0x49
++ .byte 0x57
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xb
++ .string "eip"
++ .byte 0x49
++ .byte 0x58
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xb
++ .string "cs"
++ .byte 0x49
++ .byte 0x59
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF66
++ .byte 0x49
++ .byte 0x59
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x36
++ .uleb128 0xa
++ .long .LASF67
++ .byte 0x49
++ .byte 0x5a
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xb
++ .string "esp"
++ .byte 0x49
++ .byte 0x5b
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xb
++ .string "ss"
++ .byte 0x49
++ .byte 0x5c
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF68
++ .byte 0x49
++ .byte 0x5c
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x42
++ .uleb128 0xb
++ .string "es"
++ .byte 0x49
++ .byte 0x60
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0xa
++ .long .LASF69
++ .byte 0x49
++ .byte 0x60
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x46
++ .uleb128 0xb
++ .string "ds"
++ .byte 0x49
++ .byte 0x61
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF70
++ .byte 0x49
++ .byte 0x61
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4a
++ .uleb128 0xb
++ .string "fs"
++ .byte 0x49
++ .byte 0x62
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0xa
++ .long .LASF71
++ .byte 0x49
++ .byte 0x62
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4e
++ .uleb128 0xb
++ .string "gs"
++ .byte 0x49
++ .byte 0x63
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0xa
++ .long .LASF72
++ .byte 0x49
++ .byte 0x63
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x52
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x515
++ .long .LASF73
++ .byte 0x20
++ .byte 0x49
++ .byte 0x66
++ .uleb128 0xa
++ .long .LASF74
++ .byte 0x49
++ .byte 0x67
++ .long 0x515
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x525
++ .long 0x2f
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x7
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x587
++ .long .LASF75
++ .byte 0xa0
++ .byte 0x49
++ .byte 0x6a
++ .uleb128 0xa
++ .long .LASF76
++ .byte 0x49
++ .byte 0x6b
++ .long 0x379
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x49
++ .byte 0x6c
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0xa
++ .long .LASF77
++ .byte 0x49
++ .byte 0x6d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0xa
++ .long .LASF78
++ .byte 0x49
++ .byte 0x6e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0xa
++ .long .LASF79
++ .byte 0x49
++ .byte 0x6f
++ .long 0x4fa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0xa
++ .long .LASF80
++ .byte 0x49
++ .byte 0x70
++ .long 0x4fa
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x674
++ .long .LASF81
++ .byte 0x40
++ .byte 0x69
++ .byte 0x9
++ .uleb128 0xb
++ .string "ebx"
++ .byte 0x69
++ .byte 0xa
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "ecx"
++ .byte 0x69
++ .byte 0xb
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xb
++ .string "edx"
++ .byte 0x69
++ .byte 0xc
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xb
++ .string "esi"
++ .byte 0x69
++ .byte 0xd
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xb
++ .string "edi"
++ .byte 0x69
++ .byte 0xe
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xb
++ .string "ebp"
++ .byte 0x69
++ .byte 0xf
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xb
++ .string "eax"
++ .byte 0x69
++ .byte 0x10
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xb
++ .string "xds"
++ .byte 0x69
++ .byte 0x11
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xb
++ .string "xes"
++ .byte 0x69
++ .byte 0x12
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xb
++ .string "xfs"
++ .byte 0x69
++ .byte 0x13
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF65
++ .byte 0x69
++ .byte 0x15
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xb
++ .string "eip"
++ .byte 0x69
++ .byte 0x16
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xb
++ .string "xcs"
++ .byte 0x69
++ .byte 0x17
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF67
++ .byte 0x69
++ .byte 0x18
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xb
++ .string "esp"
++ .byte 0x69
++ .byte 0x19
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xb
++ .string "xss"
++ .byte 0x69
++ .byte 0x1a
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x587
++ .uleb128 0xf
++ .long 0x7ad
++ .long .LASF82
++ .byte 0x54
++ .byte 0x48
++ .byte 0xd
++ .uleb128 0xa
++ .long .LASF83
++ .byte 0x48
++ .byte 0xe
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF84
++ .byte 0x48
++ .byte 0xf
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF85
++ .byte 0x48
++ .byte 0x10
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF86
++ .byte 0x48
++ .byte 0x11
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF87
++ .byte 0x48
++ .byte 0x12
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF88
++ .byte 0x48
++ .byte 0x13
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF89
++ .byte 0x48
++ .byte 0x14
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF90
++ .byte 0x48
++ .byte 0x15
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF91
++ .byte 0x48
++ .byte 0x16
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF92
++ .byte 0x48
++ .byte 0x17
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF93
++ .byte 0x48
++ .byte 0x18
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF94
++ .byte 0x48
++ .byte 0x19
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xa
++ .long .LASF95
++ .byte 0x48
++ .byte 0x1a
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF96
++ .byte 0x48
++ .byte 0x1b
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF97
++ .byte 0x48
++ .byte 0x1c
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF98
++ .byte 0x48
++ .byte 0x1d
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xa
++ .long .LASF99
++ .byte 0x48
++ .byte 0x1e
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF100
++ .byte 0x48
++ .byte 0x1f
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0xa
++ .long .LASF101
++ .byte 0x48
++ .byte 0x20
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF102
++ .byte 0x48
++ .byte 0x21
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0xa
++ .long .LASF103
++ .byte 0x48
++ .byte 0x22
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x7f2
++ .long .LASF104
++ .byte 0xc
++ .byte 0x2f
++ .byte 0xa
++ .uleb128 0xa
++ .long .LASF105
++ .byte 0x2f
++ .byte 0xb
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF106
++ .byte 0x2f
++ .byte 0xd
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF107
++ .byte 0x2f
++ .byte 0xe
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x2f
++ .byte 0x10
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xa
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7f8
++ .uleb128 0x14
++ .long 0xbb
++ .uleb128 0x15
++ .long 0x8dc
++ .long .LASF108
++ .byte 0x40
++ .byte 0x38
++ .value 0x154
++ .uleb128 0x16
++ .long .LASF109
++ .byte 0x38
++ .value 0x15b
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF110
++ .byte 0x38
++ .value 0x15c
++ .long 0x8dc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF111
++ .byte 0x38
++ .value 0x15d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF112
++ .byte 0x38
++ .value 0x15e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF113
++ .byte 0x38
++ .value 0x15f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF114
++ .byte 0x38
++ .value 0x160
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF115
++ .byte 0x38
++ .value 0x161
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF116
++ .byte 0x38
++ .value 0x162
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF117
++ .byte 0x38
++ .value 0x163
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x17
++ .string "pad"
++ .byte 0x38
++ .value 0x164
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2a
++ .uleb128 0x16
++ .long .LASF118
++ .byte 0x38
++ .value 0x165
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF119
++ .byte 0x38
++ .value 0x166
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF120
++ .byte 0x38
++ .value 0x167
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x17
++ .string "_f"
++ .byte 0x38
++ .value 0x168
++ .long 0x8ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x8ec
++ .long 0x2f
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x8fc
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x7
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x913
++ .byte 0x4
++ .byte 0x13
++ .byte 0x58
++ .uleb128 0xa
++ .long .LASF121
++ .byte 0x13
++ .byte 0x58
++ .long 0x913
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x923
++ .long 0x2f
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF122
++ .byte 0x13
++ .byte 0x58
++ .long 0x8fc
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x934
++ .uleb128 0x18
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x940
++ .uleb128 0x19
++ .byte 0x1
++ .uleb128 0xf
++ .long 0x967
++ .long .LASF123
++ .byte 0x8
++ .byte 0x8
++ .byte 0x1d
++ .uleb128 0xb
++ .string "a"
++ .byte 0x8
++ .byte 0x1e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "b"
++ .byte 0x8
++ .byte 0x1e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x977
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xf
++ .byte 0x0
++ .uleb128 0xf
++ .long 0xa02
++ .long .LASF124
++ .byte 0x70
++ .byte 0x8
++ .byte 0xf0
++ .uleb128 0xb
++ .string "cwd"
++ .byte 0x8
++ .byte 0xf1
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "swd"
++ .byte 0x8
++ .byte 0xf2
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xb
++ .string "twd"
++ .byte 0x8
++ .byte 0xf3
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xb
++ .string "fip"
++ .byte 0x8
++ .byte 0xf4
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xb
++ .string "fcs"
++ .byte 0x8
++ .byte 0xf5
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xb
++ .string "foo"
++ .byte 0x8
++ .byte 0xf6
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xb
++ .string "fos"
++ .byte 0x8
++ .byte 0xf7
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF125
++ .byte 0x8
++ .byte 0xf8
++ .long 0xa02
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF126
++ .byte 0x8
++ .byte 0xf9
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .byte 0x0
++ .uleb128 0x12
++ .long 0xa12
++ .long 0x5a
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x13
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0xae2
++ .long .LASF127
++ .value 0x200
++ .byte 0x8
++ .byte 0xfc
++ .uleb128 0xb
++ .string "cwd"
++ .byte 0x8
++ .byte 0xfd
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "swd"
++ .byte 0x8
++ .byte 0xfe
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2
++ .uleb128 0xb
++ .string "twd"
++ .byte 0x8
++ .byte 0xff
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x17
++ .string "fop"
++ .byte 0x8
++ .value 0x100
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6
++ .uleb128 0x17
++ .string "fip"
++ .byte 0x8
++ .value 0x101
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x17
++ .string "fcs"
++ .byte 0x8
++ .value 0x102
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x17
++ .string "foo"
++ .byte 0x8
++ .value 0x103
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x17
++ .string "fos"
++ .byte 0x8
++ .value 0x104
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF128
++ .byte 0x8
++ .value 0x105
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF129
++ .byte 0x8
++ .value 0x106
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF125
++ .byte 0x8
++ .value 0x107
++ .long 0xae2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF130
++ .byte 0x8
++ .value 0x108
++ .long 0xae2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0
++ .uleb128 0x16
++ .long .LASF131
++ .byte 0x8
++ .value 0x109
++ .long 0xaf2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x120
++ .byte 0x0
++ .uleb128 0x12
++ .long 0xaf2
++ .long 0x5a
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1f
++ .byte 0x0
++ .uleb128 0x12
++ .long 0xb02
++ .long 0x5a
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x37
++ .byte 0x0
++ .uleb128 0x15
++ .long 0xbff
++ .long .LASF132
++ .byte 0x7c
++ .byte 0x8
++ .value 0x10c
++ .uleb128 0x17
++ .string "cwd"
++ .byte 0x8
++ .value 0x10d
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x17
++ .string "swd"
++ .byte 0x8
++ .value 0x10e
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x17
++ .string "twd"
++ .byte 0x8
++ .value 0x10f
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x17
++ .string "fip"
++ .byte 0x8
++ .value 0x110
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x17
++ .string "fcs"
++ .byte 0x8
++ .value 0x111
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x17
++ .string "foo"
++ .byte 0x8
++ .value 0x112
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x17
++ .string "fos"
++ .byte 0x8
++ .value 0x113
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF125
++ .byte 0x8
++ .value 0x114
++ .long 0xa02
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF133
++ .byte 0x8
++ .value 0x115
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0x16
++ .long .LASF134
++ .byte 0x8
++ .value 0x115
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6d
++ .uleb128 0x16
++ .long .LASF135
++ .byte 0x8
++ .value 0x115
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6e
++ .uleb128 0x16
++ .long .LASF136
++ .byte 0x8
++ .value 0x115
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6f
++ .uleb128 0x17
++ .string "rm"
++ .byte 0x8
++ .value 0x115
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0x16
++ .long .LASF137
++ .byte 0x8
++ .value 0x115
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x71
++ .uleb128 0x16
++ .long .LASF82
++ .byte 0x8
++ .value 0x116
++ .long 0xbff
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0x16
++ .long .LASF138
++ .byte 0x8
++ .value 0x117
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x67a
++ .uleb128 0x1b
++ .long 0xc38
++ .long .LASF472
++ .value 0x200
++ .byte 0x8
++ .value 0x11a
++ .uleb128 0x1c
++ .long .LASF139
++ .byte 0x8
++ .value 0x11b
++ .long 0x977
++ .uleb128 0x1c
++ .long .LASF140
++ .byte 0x8
++ .value 0x11c
++ .long 0xa12
++ .uleb128 0x1c
++ .long .LASF141
++ .byte 0x8
++ .value 0x11d
++ .long 0xb02
++ .byte 0x0
++ .uleb128 0x1d
++ .long 0xc51
++ .byte 0x4
++ .byte 0x8
++ .value 0x120
++ .uleb128 0x17
++ .string "seg"
++ .byte 0x8
++ .value 0x121
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x1e
++ .long .LASF142
++ .byte 0x8
++ .value 0x122
++ .long 0xc38
++ .uleb128 0x1f
++ .long 0xdbe
++ .long .LASF143
++ .value 0x290
++ .byte 0x8
++ .value 0x124
++ .uleb128 0x16
++ .long .LASF144
++ .byte 0x8
++ .value 0x15e
++ .long 0xdbe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF145
++ .byte 0x8
++ .value 0x15f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF146
++ .byte 0x8
++ .value 0x160
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x17
++ .string "eip"
++ .byte 0x8
++ .value 0x161
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x17
++ .string "esp"
++ .byte 0x8
++ .value 0x162
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x17
++ .string "fs"
++ .byte 0x8
++ .value 0x163
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x17
++ .string "gs"
++ .byte 0x8
++ .value 0x164
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF147
++ .byte 0x8
++ .value 0x166
++ .long 0x515
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x17
++ .string "cr2"
++ .byte 0x8
++ .value 0x168
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0x16
++ .long .LASF148
++ .byte 0x8
++ .value 0x168
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0x16
++ .long .LASF149
++ .byte 0x8
++ .value 0x168
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0x16
++ .long .LASF150
++ .byte 0x8
++ .value 0x16a
++ .long 0xc05
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0x16
++ .long .LASF151
++ .byte 0x8
++ .value 0x16c
++ .long 0xdce
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x260
++ .uleb128 0x16
++ .long .LASF77
++ .byte 0x8
++ .value 0x16d
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x264
++ .uleb128 0x16
++ .long .LASF152
++ .byte 0x8
++ .value 0x16e
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x268
++ .uleb128 0x16
++ .long .LASF153
++ .byte 0x8
++ .value 0x16e
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x26c
++ .uleb128 0x16
++ .long .LASF154
++ .byte 0x8
++ .value 0x16e
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x270
++ .uleb128 0x16
++ .long .LASF155
++ .byte 0x8
++ .value 0x16f
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x274
++ .uleb128 0x16
++ .long .LASF156
++ .byte 0x8
++ .value 0x16f
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x278
++ .uleb128 0x16
++ .long .LASF157
++ .byte 0x8
++ .value 0x171
++ .long 0xdd4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x27c
++ .uleb128 0x16
++ .long .LASF158
++ .byte 0x8
++ .value 0x172
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x280
++ .uleb128 0x16
++ .long .LASF159
++ .byte 0x8
++ .value 0x174
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x284
++ .byte 0x0
++ .uleb128 0x12
++ .long 0xdce
++ .long 0x942
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x525
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2f
++ .uleb128 0xf
++ .long 0xe81
++ .long .LASF160
++ .byte 0x3c
++ .byte 0x10
++ .byte 0x1b
++ .uleb128 0xa
++ .long .LASF161
++ .byte 0x10
++ .byte 0x1c
++ .long 0x15f9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF162
++ .byte 0x10
++ .byte 0x1d
++ .long 0x1605
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x10
++ .byte 0x1e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF126
++ .byte 0x10
++ .byte 0x1f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xb
++ .string "cpu"
++ .byte 0x10
++ .byte 0x20
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF163
++ .byte 0x10
++ .byte 0x21
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF164
++ .byte 0x10
++ .byte 0x24
++ .long 0xc51
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF165
++ .byte 0x10
++ .byte 0x28
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF59
++ .byte 0x10
++ .byte 0x29
++ .long 0x2f7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF166
++ .byte 0x10
++ .byte 0x2b
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF167
++ .byte 0x10
++ .byte 0x2e
++ .long 0x160d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x15f9
++ .long .LASF168
++ .value 0x510
++ .byte 0x11
++ .byte 0x13
++ .uleb128 0x16
++ .long .LASF169
++ .byte 0xb
++ .value 0x336
++ .long 0x43e6
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF170
++ .byte 0xb
++ .value 0x337
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF171
++ .byte 0xb
++ .value 0x338
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF53
++ .byte 0xb
++ .value 0x339
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF172
++ .byte 0xb
++ .value 0x33a
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF173
++ .byte 0xb
++ .value 0x33c
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF174
++ .byte 0xb
++ .value 0x343
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF175
++ .byte 0xb
++ .value 0x344
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF176
++ .byte 0xb
++ .value 0x344
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF177
++ .byte 0xb
++ .value 0x344
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF178
++ .byte 0xb
++ .value 0x345
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF179
++ .byte 0xb
++ .value 0x346
++ .long 0x43f1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF180
++ .byte 0xb
++ .value 0x348
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF181
++ .byte 0xb
++ .value 0x34c
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF182
++ .byte 0xb
++ .value 0x34d
++ .long 0x162
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF183
++ .byte 0xb
++ .value 0x34d
++ .long 0x162
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF184
++ .byte 0xb
++ .value 0x34f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0x16
++ .long .LASF185
++ .byte 0xb
++ .value 0x34f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0x16
++ .long .LASF186
++ .byte 0xb
++ .value 0x352
++ .long 0x162
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0x16
++ .long .LASF187
++ .byte 0xb
++ .value 0x353
++ .long 0x43c0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0x16
++ .long .LASF188
++ .byte 0xb
++ .value 0x355
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0x16
++ .long .LASF189
++ .byte 0xb
++ .value 0x356
++ .long 0x923
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0x16
++ .long .LASF190
++ .byte 0xb
++ .value 0x357
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0x16
++ .long .LASF191
++ .byte 0xb
++ .value 0x357
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0x16
++ .long .LASF192
++ .byte 0xb
++ .value 0x35d
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0x16
++ .long .LASF193
++ .byte 0xb
++ .value 0x362
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0x16
++ .long .LASF194
++ .byte 0xb
++ .value 0x363
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .uleb128 0x17
++ .string "mm"
++ .byte 0xb
++ .value 0x365
++ .long 0x36ad
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x88
++ .uleb128 0x16
++ .long .LASF195
++ .byte 0xb
++ .value 0x365
++ .long 0x36ad
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x8c
++ .uleb128 0x16
++ .long .LASF196
++ .byte 0xb
++ .value 0x368
++ .long 0x43fd
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x90
++ .uleb128 0x16
++ .long .LASF197
++ .byte 0xb
++ .value 0x369
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x94
++ .uleb128 0x16
++ .long .LASF198
++ .byte 0xb
++ .value 0x36a
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x98
++ .uleb128 0x16
++ .long .LASF199
++ .byte 0xb
++ .value 0x36a
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x9c
++ .uleb128 0x16
++ .long .LASF200
++ .byte 0xb
++ .value 0x36b
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0
++ .uleb128 0x16
++ .long .LASF201
++ .byte 0xb
++ .value 0x36d
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa4
++ .uleb128 0x20
++ .long .LASF202
++ .byte 0xb
++ .value 0x36e
++ .long 0x77
++ .byte 0x4
++ .byte 0x1
++ .byte 0x1f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa8
++ .uleb128 0x17
++ .string "pid"
++ .byte 0xb
++ .value 0x36f
++ .long 0x1b5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xac
++ .uleb128 0x16
++ .long .LASF203
++ .byte 0xb
++ .value 0x370
++ .long 0x1b5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb0
++ .uleb128 0x16
++ .long .LASF204
++ .byte 0xb
++ .value 0x37b
++ .long 0x15f9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb4
++ .uleb128 0x16
++ .long .LASF205
++ .byte 0xb
++ .value 0x37c
++ .long 0x15f9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb8
++ .uleb128 0x16
++ .long .LASF206
++ .byte 0xb
++ .value 0x381
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xbc
++ .uleb128 0x16
++ .long .LASF207
++ .byte 0xb
++ .value 0x382
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc4
++ .uleb128 0x16
++ .long .LASF208
++ .byte 0xb
++ .value 0x383
++ .long 0x15f9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xcc
++ .uleb128 0x16
++ .long .LASF209
++ .byte 0xb
++ .value 0x386
++ .long 0x4403
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd0
++ .uleb128 0x16
++ .long .LASF210
++ .byte 0xb
++ .value 0x387
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xf4
++ .uleb128 0x16
++ .long .LASF211
++ .byte 0xb
++ .value 0x389
++ .long 0x3ff9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xfc
++ .uleb128 0x16
++ .long .LASF212
++ .byte 0xb
++ .value 0x38a
++ .long 0x4413
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x100
++ .uleb128 0x16
++ .long .LASF213
++ .byte 0xb
++ .value 0x38b
++ .long 0x4413
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x104
++ .uleb128 0x16
++ .long .LASF214
++ .byte 0xb
++ .value 0x38d
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x108
++ .uleb128 0x16
++ .long .LASF215
++ .byte 0xb
++ .value 0x38e
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x10c
++ .uleb128 0x16
++ .long .LASF216
++ .byte 0xb
++ .value 0x38e
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x110
++ .uleb128 0x16
++ .long .LASF217
++ .byte 0xb
++ .value 0x38f
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x114
++ .uleb128 0x16
++ .long .LASF218
++ .byte 0xb
++ .value 0x38f
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x118
++ .uleb128 0x16
++ .long .LASF219
++ .byte 0xb
++ .value 0x390
++ .long 0x173b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11c
++ .uleb128 0x16
++ .long .LASF220
++ .byte 0xb
++ .value 0x392
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x124
++ .uleb128 0x16
++ .long .LASF221
++ .byte 0xb
++ .value 0x392
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x128
++ .uleb128 0x16
++ .long .LASF222
++ .byte 0xb
++ .value 0x394
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x12c
++ .uleb128 0x16
++ .long .LASF223
++ .byte 0xb
++ .value 0x394
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x130
++ .uleb128 0x16
++ .long .LASF224
++ .byte 0xb
++ .value 0x395
++ .long 0x162
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x134
++ .uleb128 0x16
++ .long .LASF225
++ .byte 0xb
++ .value 0x396
++ .long 0x4330
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x13c
++ .uleb128 0x17
++ .string "uid"
++ .byte 0xb
++ .value 0x399
++ .long 0x1dd
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x154
++ .uleb128 0x16
++ .long .LASF226
++ .byte 0xb
++ .value 0x399
++ .long 0x1dd
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x158
++ .uleb128 0x16
++ .long .LASF227
++ .byte 0xb
++ .value 0x399
++ .long 0x1dd
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x15c
++ .uleb128 0x16
++ .long .LASF228
++ .byte 0xb
++ .value 0x399
++ .long 0x1dd
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x160
++ .uleb128 0x17
++ .string "gid"
++ .byte 0xb
++ .value 0x39a
++ .long 0x1e8
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x164
++ .uleb128 0x16
++ .long .LASF229
++ .byte 0xb
++ .value 0x39a
++ .long 0x1e8
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x168
++ .uleb128 0x16
++ .long .LASF230
++ .byte 0xb
++ .value 0x39a
++ .long 0x1e8
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x16c
++ .uleb128 0x16
++ .long .LASF231
++ .byte 0xb
++ .value 0x39a
++ .long 0x1e8
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x170
++ .uleb128 0x16
++ .long .LASF232
++ .byte 0xb
++ .value 0x39b
++ .long 0x4419
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x174
++ .uleb128 0x16
++ .long .LASF233
++ .byte 0xb
++ .value 0x39c
++ .long 0x16da
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x178
++ .uleb128 0x16
++ .long .LASF234
++ .byte 0xb
++ .value 0x39c
++ .long 0x16da
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x17c
++ .uleb128 0x16
++ .long .LASF235
++ .byte 0xb
++ .value 0x39c
++ .long 0x16da
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x180
++ .uleb128 0x20
++ .long .LASF236
++ .byte 0xb
++ .value 0x39d
++ .long 0x77
++ .byte 0x4
++ .byte 0x1
++ .byte 0x1f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x184
++ .uleb128 0x16
++ .long .LASF237
++ .byte 0xb
++ .value 0x39e
++ .long 0x2729
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x188
++ .uleb128 0x16
++ .long .LASF238
++ .byte 0xb
++ .value 0x3ac
++ .long 0x112
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x18c
++ .uleb128 0x16
++ .long .LASF239
++ .byte 0xb
++ .value 0x3ad
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x190
++ .uleb128 0x16
++ .long .LASF240
++ .byte 0xb
++ .value 0x3ae
++ .long 0x967
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x194
++ .uleb128 0x16
++ .long .LASF241
++ .byte 0xb
++ .value 0x3b3
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1a4
++ .uleb128 0x16
++ .long .LASF242
++ .byte 0xb
++ .value 0x3b3
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1a8
++ .uleb128 0x16
++ .long .LASF243
++ .byte 0xb
++ .value 0x3b6
++ .long 0x2387
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1ac
++ .uleb128 0x16
++ .long .LASF244
++ .byte 0xb
++ .value 0x3b9
++ .long 0xc5d
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1b0
++ .uleb128 0x17
++ .string "fs"
++ .byte 0xb
++ .value 0x3bb
++ .long 0x441f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x440
++ .uleb128 0x16
++ .long .LASF245
++ .byte 0xb
++ .value 0x3bd
++ .long 0x442b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x444
++ .uleb128 0x16
++ .long .LASF246
++ .byte 0xb
++ .value 0x3bf
++ .long 0x4437
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x448
++ .uleb128 0x16
++ .long .LASF247
++ .byte 0xb
++ .value 0x3c1
++ .long 0x443d
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x44c
++ .uleb128 0x16
++ .long .LASF248
++ .byte 0xb
++ .value 0x3c2
++ .long 0x4443
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x450
++ .uleb128 0x16
++ .long .LASF249
++ .byte 0xb
++ .value 0x3c4
++ .long 0x23bf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x454
++ .uleb128 0x16
++ .long .LASF250
++ .byte 0xb
++ .value 0x3c4
++ .long 0x23bf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x45c
++ .uleb128 0x16
++ .long .LASF251
++ .byte 0xb
++ .value 0x3c5
++ .long 0x23bf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x464
++ .uleb128 0x16
++ .long .LASF252
++ .byte 0xb
++ .value 0x3c6
++ .long 0x272f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x46c
++ .uleb128 0x16
++ .long .LASF253
++ .byte 0xb
++ .value 0x3c8
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x47c
++ .uleb128 0x16
++ .long .LASF254
++ .byte 0xb
++ .value 0x3c9
++ .long 0x1fe
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x480
++ .uleb128 0x16
++ .long .LASF255
++ .byte 0xb
++ .value 0x3ca
++ .long 0x4459
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x484
++ .uleb128 0x16
++ .long .LASF256
++ .byte 0xb
++ .value 0x3cb
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x488
++ .uleb128 0x16
++ .long .LASF257
++ .byte 0xb
++ .value 0x3cc
++ .long 0x445f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x48c
++ .uleb128 0x16
++ .long .LASF258
++ .byte 0xb
++ .value 0x3ce
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x490
++ .uleb128 0x16
++ .long .LASF259
++ .byte 0xb
++ .value 0x3cf
++ .long 0x446b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x494
++ .uleb128 0x16
++ .long .LASF260
++ .byte 0xb
++ .value 0x3d0
++ .long 0x308d
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x498
++ .uleb128 0x16
++ .long .LASF261
++ .byte 0xb
++ .value 0x3d3
++ .long 0x173
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x49c
++ .uleb128 0x16
++ .long .LASF262
++ .byte 0xb
++ .value 0x3d4
++ .long 0x173
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4a0
++ .uleb128 0x16
++ .long .LASF263
++ .byte 0xb
++ .value 0x3d6
++ .long 0x1680
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4a4
++ .uleb128 0x16
++ .long .LASF264
++ .byte 0xb
++ .value 0x3d9
++ .long 0x1680
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4a8
++ .uleb128 0x16
++ .long .LASF265
++ .byte 0xb
++ .value 0x3dd
++ .long 0x36b3
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4ac
++ .uleb128 0x16
++ .long .LASF266
++ .byte 0xb
++ .value 0x3df
++ .long 0x4477
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4bc
++ .uleb128 0x16
++ .long .LASF267
++ .byte 0xb
++ .value 0x3fe
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4c0
++ .uleb128 0x16
++ .long .LASF268
++ .byte 0xb
++ .value 0x401
++ .long 0x4483
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4c4
++ .uleb128 0x16
++ .long .LASF269
++ .byte 0xb
++ .value 0x401
++ .long 0x4489
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4c8
++ .uleb128 0x16
++ .long .LASF270
++ .byte 0xb
++ .value 0x404
++ .long 0x44ab
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4cc
++ .uleb128 0x16
++ .long .LASF271
++ .byte 0xb
++ .value 0x406
++ .long 0x4521
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4d0
++ .uleb128 0x16
++ .long .LASF272
++ .byte 0xb
++ .value 0x408
++ .long 0x452d
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4d4
++ .uleb128 0x16
++ .long .LASF273
++ .byte 0xb
++ .value 0x40a
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4d8
++ .uleb128 0x16
++ .long .LASF274
++ .byte 0xb
++ .value 0x40b
++ .long 0x4533
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4dc
++ .uleb128 0x16
++ .long .LASF275
++ .byte 0xb
++ .value 0x412
++ .long 0x18c0
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4e0
++ .uleb128 0x16
++ .long .LASF276
++ .byte 0xb
++ .value 0x417
++ .long 0x3979
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4e4
++ .uleb128 0x16
++ .long .LASF277
++ .byte 0xb
++ .value 0x427
++ .long 0x4539
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4e4
++ .uleb128 0x16
++ .long .LASF278
++ .byte 0xb
++ .value 0x42b
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4e8
++ .uleb128 0x16
++ .long .LASF279
++ .byte 0xb
++ .value 0x42c
++ .long 0x4545
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4f0
++ .uleb128 0x16
++ .long .LASF280
++ .byte 0xb
++ .value 0x42e
++ .long 0x16c4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4f4
++ .uleb128 0x17
++ .string "rcu"
++ .byte 0xb
++ .value 0x42f
++ .long 0x2ea8
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x4f8
++ .uleb128 0x16
++ .long .LASF281
++ .byte 0xb
++ .value 0x434
++ .long 0x4551
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x500
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0xe81
++ .uleb128 0x21
++ .long .LASF162
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x15ff
++ .uleb128 0x22
++ .byte 0x4
++ .uleb128 0x12
++ .long 0x161c
++ .long 0x107
++ .uleb128 0x23
++ .long 0x28
++ .byte 0x0
++ .uleb128 0x24
++ .long .LASF282
++ .byte 0x0
++ .byte 0x3a
++ .value 0x116
++ .uleb128 0x9
++ .long 0x163c
++ .byte 0x4
++ .byte 0xe
++ .byte 0x8
++ .uleb128 0xa
++ .long .LASF283
++ .byte 0xe
++ .byte 0x9
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF284
++ .byte 0xe
++ .byte 0xa
++ .long 0x1625
++ .uleb128 0x9
++ .long 0x165e
++ .byte 0x4
++ .byte 0xe
++ .byte 0xe
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0xe
++ .byte 0xf
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF286
++ .byte 0xe
++ .byte 0x10
++ .long 0x1647
++ .uleb128 0x9
++ .long 0x1680
++ .byte 0x4
++ .byte 0xd
++ .byte 0x14
++ .uleb128 0xa
++ .long .LASF287
++ .byte 0xd
++ .byte 0x15
++ .long 0x163c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF288
++ .byte 0xd
++ .byte 0x20
++ .long 0x1669
++ .uleb128 0x9
++ .long 0x16a2
++ .byte 0x4
++ .byte 0xd
++ .byte 0x24
++ .uleb128 0xa
++ .long .LASF287
++ .byte 0xd
++ .byte 0x25
++ .long 0x165e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF289
++ .byte 0xd
++ .byte 0x30
++ .long 0x168b
++ .uleb128 0x9
++ .long 0x16c4
++ .byte 0x4
++ .byte 0x12
++ .byte 0x12
++ .uleb128 0xa
++ .long .LASF290
++ .byte 0x12
++ .byte 0x12
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF291
++ .byte 0x12
++ .byte 0x12
++ .long 0x16ad
++ .uleb128 0x7
++ .long .LASF292
++ .byte 0x31
++ .byte 0x8d
++ .long 0x16c4
++ .uleb128 0x7
++ .long .LASF293
++ .byte 0x11
++ .byte 0x3c
++ .long 0x141
++ .uleb128 0x9
++ .long 0x170a
++ .byte 0x8
++ .byte 0x7
++ .byte 0x20
++ .uleb128 0xa
++ .long .LASF294
++ .byte 0x7
++ .byte 0x21
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x7
++ .byte 0x22
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF295
++ .byte 0x7
++ .byte 0x23
++ .long 0x16e5
++ .uleb128 0xf
++ .long 0x1730
++ .long .LASF296
++ .byte 0x4
++ .byte 0x7
++ .byte 0x73
++ .uleb128 0xa
++ .long .LASF294
++ .byte 0x7
++ .byte 0x74
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF297
++ .byte 0x7
++ .byte 0x75
++ .long 0x1715
++ .uleb128 0xf
++ .long 0x1764
++ .long .LASF298
++ .byte 0x8
++ .byte 0x1f
++ .byte 0xc
++ .uleb128 0xa
++ .long .LASF299
++ .byte 0x1f
++ .byte 0xd
++ .long 0x214
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF300
++ .byte 0x1f
++ .byte 0xe
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x179b
++ .long .LASF301
++ .byte 0xc
++ .byte 0x16
++ .byte 0x65
++ .uleb128 0xa
++ .long .LASF302
++ .byte 0x16
++ .byte 0x66
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF303
++ .byte 0x16
++ .byte 0x69
++ .long 0x179b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF304
++ .byte 0x16
++ .byte 0x6a
++ .long 0x179b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1764
++ .uleb128 0xf
++ .long 0x17bc
++ .long .LASF305
++ .byte 0x4
++ .byte 0x16
++ .byte 0x6f
++ .uleb128 0xa
++ .long .LASF301
++ .byte 0x16
++ .byte 0x70
++ .long 0x179b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x17e5
++ .long .LASF306
++ .byte 0x8
++ .byte 0x2
++ .byte 0x15
++ .uleb128 0xa
++ .long .LASF307
++ .byte 0x2
++ .byte 0x16
++ .long 0x17e5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF308
++ .byte 0x2
++ .byte 0x16
++ .long 0x17e5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x17bc
++ .uleb128 0x15
++ .long 0x1808
++ .long .LASF309
++ .byte 0x4
++ .byte 0x2
++ .value 0x2a3
++ .uleb128 0x16
++ .long .LASF310
++ .byte 0x2
++ .value 0x2a4
++ .long 0x1834
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x1834
++ .long .LASF311
++ .byte 0x8
++ .byte 0x2
++ .value 0x2a4
++ .uleb128 0x16
++ .long .LASF307
++ .byte 0x2
++ .value 0x2a8
++ .long 0x1834
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF312
++ .byte 0x2
++ .value 0x2a8
++ .long 0x183a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1808
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1834
++ .uleb128 0x7
++ .long .LASF313
++ .byte 0x2c
++ .byte 0x1c
++ .long 0x184b
++ .uleb128 0xf
++ .long 0x1890
++ .long .LASF314
++ .byte 0x14
++ .byte 0x2c
++ .byte 0x1c
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x2c
++ .byte 0x21
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF315
++ .byte 0x2c
++ .byte 0x23
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF316
++ .byte 0x2c
++ .byte 0x24
++ .long 0x1890
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF317
++ .byte 0x2c
++ .byte 0x25
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF318
++ .byte 0x2c
++ .byte 0x1d
++ .long 0x189b
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x18a1
++ .uleb128 0x11
++ .long 0x18c0
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x18c0
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1840
++ .uleb128 0xf
++ .long 0x18ef
++ .long .LASF319
++ .byte 0xc
++ .byte 0x2c
++ .byte 0x32
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x2c
++ .byte 0x33
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF317
++ .byte 0x2c
++ .byte 0x34
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF320
++ .byte 0x2c
++ .byte 0x36
++ .long 0x18c6
++ .uleb128 0xf
++ .long 0x1931
++ .long .LASF321
++ .byte 0x10
++ .byte 0x21
++ .byte 0x13
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x22
++ .byte 0x38
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF323
++ .byte 0x22
++ .byte 0x3f
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF324
++ .byte 0x22
++ .byte 0x40
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x1968
++ .long .LASF325
++ .byte 0x14
++ .byte 0x3d
++ .byte 0x2c
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x3d
++ .byte 0x2d
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF326
++ .byte 0x3d
++ .byte 0x2e
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF327
++ .byte 0x3d
++ .byte 0x2f
++ .long 0x18ef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x19a9
++ .byte 0x20
++ .byte 0x43
++ .byte 0xb
++ .uleb128 0xa
++ .long .LASF328
++ .byte 0x43
++ .byte 0xc
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "sem"
++ .byte 0x43
++ .byte 0xd
++ .long 0x1931
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xb
++ .string "ldt"
++ .byte 0x43
++ .byte 0xe
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF329
++ .byte 0x43
++ .byte 0xf
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF330
++ .byte 0x43
++ .byte 0x10
++ .long 0x1968
++ .uleb128 0x7
++ .long .LASF331
++ .byte 0x45
++ .byte 0x7
++ .long 0x2f
++ .uleb128 0x7
++ .long .LASF332
++ .byte 0x45
++ .byte 0x17
++ .long 0x189
++ .uleb128 0x12
++ .long 0x19da
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x3
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x1a81
++ .long .LASF333
++ .byte 0x2c
++ .byte 0x65
++ .byte 0x22
++ .uleb128 0xa
++ .long .LASF334
++ .byte 0x65
++ .byte 0x23
++ .long 0x19ca
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF335
++ .byte 0x65
++ .byte 0x25
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF336
++ .byte 0x65
++ .byte 0x26
++ .long 0xbb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6
++ .uleb128 0xa
++ .long .LASF337
++ .byte 0x65
++ .byte 0x27
++ .long 0xbb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7
++ .uleb128 0xa
++ .long .LASF338
++ .byte 0x65
++ .byte 0x28
++ .long 0x8ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF339
++ .byte 0x65
++ .byte 0x29
++ .long 0x1a81
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF340
++ .byte 0x65
++ .byte 0x2a
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF341
++ .byte 0x65
++ .byte 0x2b
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF342
++ .byte 0x65
++ .byte 0x2c
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x22
++ .uleb128 0xa
++ .long .LASF343
++ .byte 0x65
++ .byte 0x2d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF344
++ .byte 0x65
++ .byte 0x2e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x1a91
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xb
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x1b00
++ .long .LASF345
++ .byte 0x14
++ .byte 0x65
++ .byte 0x3b
++ .uleb128 0xa
++ .long .LASF346
++ .byte 0x65
++ .byte 0x3c
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF347
++ .byte 0x65
++ .byte 0x3d
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1
++ .uleb128 0xa
++ .long .LASF348
++ .byte 0x65
++ .byte 0x3e
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2
++ .uleb128 0xa
++ .long .LASF349
++ .byte 0x65
++ .byte 0x3f
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3
++ .uleb128 0xa
++ .long .LASF350
++ .byte 0x65
++ .byte 0x42
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF351
++ .byte 0x65
++ .byte 0x46
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF352
++ .byte 0x65
++ .byte 0x47
++ .long 0x1b00
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x1b10
++ .long 0x2f
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x1b47
++ .long .LASF353
++ .byte 0x8
++ .byte 0x65
++ .byte 0x4b
++ .uleb128 0xa
++ .long .LASF346
++ .byte 0x65
++ .byte 0x4c
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF354
++ .byte 0x65
++ .byte 0x4d
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1
++ .uleb128 0xa
++ .long .LASF355
++ .byte 0x65
++ .byte 0x4e
++ .long 0x1b47
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x1b57
++ .long 0x112
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x5
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x1bc6
++ .long .LASF356
++ .byte 0x8
++ .byte 0x65
++ .byte 0x9d
++ .uleb128 0xa
++ .long .LASF346
++ .byte 0x65
++ .byte 0x9e
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF357
++ .byte 0x65
++ .byte 0x9f
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1
++ .uleb128 0xa
++ .long .LASF358
++ .byte 0x65
++ .byte 0xa0
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2
++ .uleb128 0xa
++ .long .LASF359
++ .byte 0x65
++ .byte 0xa1
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3
++ .uleb128 0xa
++ .long .LASF360
++ .byte 0x65
++ .byte 0xa2
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF361
++ .byte 0x65
++ .byte 0xa3
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5
++ .uleb128 0xa
++ .long .LASF362
++ .byte 0x65
++ .byte 0xa4
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x1be1
++ .long .LASF363
++ .byte 0x20
++ .byte 0x64
++ .byte 0x27
++ .uleb128 0xa
++ .long .LASF364
++ .byte 0x64
++ .byte 0x28
++ .long 0x515
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF365
++ .byte 0x64
++ .byte 0x2b
++ .long 0x1bc6
++ .uleb128 0xf
++ .long 0x1c07
++ .long .LASF366
++ .byte 0x4
++ .byte 0x5c
++ .byte 0xca
++ .uleb128 0xa
++ .long .LASF367
++ .byte 0x5c
++ .byte 0xcb
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF368
++ .byte 0x5c
++ .byte 0xcc
++ .long 0x1bec
++ .uleb128 0x15
++ .long 0x1c8f
++ .long .LASF369
++ .byte 0x1c
++ .byte 0x5c
++ .value 0x109
++ .uleb128 0x16
++ .long .LASF370
++ .byte 0x5c
++ .value 0x10a
++ .long 0x1c07
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x20
++ .long .LASF371
++ .byte 0x5c
++ .value 0x10b
++ .long 0x77
++ .byte 0x4
++ .byte 0x1
++ .byte 0x1f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x20
++ .long .LASF372
++ .byte 0x5c
++ .value 0x10d
++ .long 0x77
++ .byte 0x4
++ .byte 0x1
++ .byte 0x1e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF373
++ .byte 0x5c
++ .value 0x10e
++ .long 0x1c07
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF374
++ .byte 0x5c
++ .value 0x10f
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF375
++ .byte 0x5c
++ .value 0x110
++ .long 0x1e7d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF376
++ .byte 0x5c
++ .value 0x111
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x1e7d
++ .long .LASF377
++ .value 0x16c
++ .byte 0x5c
++ .byte 0xc8
++ .uleb128 0x16
++ .long .LASF378
++ .byte 0x5d
++ .value 0x19b
++ .long 0x53c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF379
++ .byte 0x5d
++ .value 0x19c
++ .long 0x541b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF380
++ .byte 0x5d
++ .value 0x19d
++ .long 0x541b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF381
++ .byte 0x5d
++ .value 0x19e
++ .long 0x541b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0x16
++ .long .LASF205
++ .byte 0x5d
++ .value 0x19f
++ .long 0x1e7d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0x16
++ .long .LASF382
++ .byte 0x5d
++ .value 0x1a1
++ .long 0x4b3a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0x16
++ .long .LASF383
++ .byte 0x5d
++ .value 0x1a2
++ .long 0x4c33
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb8
++ .uleb128 0x16
++ .long .LASF384
++ .byte 0x5d
++ .value 0x1a3
++ .long 0x5c6b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xcc
++ .uleb128 0x20
++ .long .LASF385
++ .byte 0x5d
++ .value 0x1a4
++ .long 0x77
++ .byte 0x4
++ .byte 0x1
++ .byte 0x1f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd0
++ .uleb128 0x20
++ .long .LASF386
++ .byte 0x5d
++ .value 0x1a5
++ .long 0x77
++ .byte 0x4
++ .byte 0x1
++ .byte 0x1e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd0
++ .uleb128 0x16
++ .long .LASF387
++ .byte 0x5d
++ .value 0x1a6
++ .long 0x566b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd4
++ .uleb128 0x16
++ .long .LASF388
++ .byte 0x5d
++ .value 0x1a7
++ .long 0x56a5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe8
++ .uleb128 0x17
++ .string "sem"
++ .byte 0x5d
++ .value 0x1a9
++ .long 0x1931
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xec
++ .uleb128 0x17
++ .string "bus"
++ .byte 0x5d
++ .value 0x1ad
++ .long 0x54d3
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x100
++ .uleb128 0x16
++ .long .LASF389
++ .byte 0x5d
++ .value 0x1ae
++ .long 0x56fd
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x104
++ .uleb128 0x16
++ .long .LASF390
++ .byte 0x5d
++ .value 0x1b0
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x108
++ .uleb128 0x16
++ .long .LASF391
++ .byte 0x5d
++ .value 0x1b1
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x10c
++ .uleb128 0x16
++ .long .LASF392
++ .byte 0x5d
++ .value 0x1b3
++ .long 0x1c12
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x110
++ .uleb128 0x16
++ .long .LASF393
++ .byte 0x5d
++ .value 0x1b8
++ .long 0x5c71
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x12c
++ .uleb128 0x16
++ .long .LASF394
++ .byte 0x5d
++ .value 0x1b9
++ .long 0x189
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x130
++ .uleb128 0x16
++ .long .LASF395
++ .byte 0x5d
++ .value 0x1bf
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x138
++ .uleb128 0x16
++ .long .LASF396
++ .byte 0x5d
++ .value 0x1c1
++ .long 0x5c7d
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x140
++ .uleb128 0x16
++ .long .LASF397
++ .byte 0x5d
++ .value 0x1c4
++ .long 0x546c
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x144
++ .uleb128 0x16
++ .long .LASF398
++ .byte 0x5d
++ .value 0x1c6
++ .long 0x1680
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x148
++ .uleb128 0x16
++ .long .LASF399
++ .byte 0x5d
++ .value 0x1c7
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x14c
++ .uleb128 0x16
++ .long .LASF400
++ .byte 0x5d
++ .value 0x1ca
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x154
++ .uleb128 0x16
++ .long .LASF401
++ .byte 0x5d
++ .value 0x1cb
++ .long 0x5b30
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x15c
++ .uleb128 0x16
++ .long .LASF402
++ .byte 0x5d
++ .value 0x1cc
++ .long 0x19f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x160
++ .uleb128 0x16
++ .long .LASF403
++ .byte 0x5d
++ .value 0x1cd
++ .long 0x5bb2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x164
++ .uleb128 0x16
++ .long .LASF404
++ .byte 0x5d
++ .value 0x1cf
++ .long 0x5820
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x168
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1c8f
++ .uleb128 0xf
++ .long 0x1ef2
++ .long .LASF405
++ .byte 0x1c
++ .byte 0x59
++ .byte 0x34
++ .uleb128 0xa
++ .long .LASF406
++ .byte 0x59
++ .byte 0x35
++ .long 0x93a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF407
++ .byte 0x59
++ .byte 0x36
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF408
++ .byte 0x59
++ .byte 0x37
++ .long 0x1f14
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF409
++ .byte 0x59
++ .byte 0x38
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF410
++ .byte 0x59
++ .byte 0x3a
++ .long 0x93a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF411
++ .byte 0x59
++ .byte 0x3b
++ .long 0x36
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF412
++ .byte 0x59
++ .byte 0x3e
++ .long 0x1f4b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .byte 0x0
++ .uleb128 0x5
++ .long 0x1efe
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1ef2
++ .uleb128 0x11
++ .long 0x1f14
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1f04
++ .uleb128 0x11
++ .long 0x1f39
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x923
++ .uleb128 0x6
++ .long 0x1f39
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1f3f
++ .uleb128 0x5
++ .long 0x1f4b
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1f1a
++ .uleb128 0xf
++ .long 0x214b
++ .long .LASF413
++ .byte 0x8c
++ .byte 0x63
++ .byte 0x16
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x63
++ .byte 0x17
++ .long 0xb5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF415
++ .byte 0x63
++ .byte 0x18
++ .long 0x92e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF416
++ .byte 0x63
++ .byte 0x1a
++ .long 0x92e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF417
++ .byte 0x63
++ .byte 0x1b
++ .long 0x2151
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF418
++ .byte 0x63
++ .byte 0x1c
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF419
++ .byte 0x63
++ .byte 0x1d
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF420
++ .byte 0x63
++ .byte 0x1e
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF421
++ .byte 0x63
++ .byte 0x1f
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF422
++ .byte 0x63
++ .byte 0x20
++ .long 0x216c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF423
++ .byte 0x63
++ .byte 0x21
++ .long 0x2182
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF424
++ .byte 0x63
++ .byte 0x22
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF425
++ .byte 0x63
++ .byte 0x23
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xa
++ .long .LASF426
++ .byte 0x63
++ .byte 0x24
++ .long 0x93a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF427
++ .byte 0x63
++ .byte 0x25
++ .long 0x2198
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF428
++ .byte 0x63
++ .byte 0x27
++ .long 0x93a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF429
++ .byte 0x63
++ .byte 0x28
++ .long 0x21b3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xa
++ .long .LASF430
++ .byte 0x63
++ .byte 0x29
++ .long 0x21c9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF431
++ .byte 0x63
++ .byte 0x2a
++ .long 0x21c9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0xa
++ .long .LASF432
++ .byte 0x63
++ .byte 0x2b
++ .long 0x21c9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF433
++ .byte 0x63
++ .byte 0x2c
++ .long 0x21df
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0xa
++ .long .LASF434
++ .byte 0x63
++ .byte 0x2e
++ .long 0x2206
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0xa
++ .long .LASF435
++ .byte 0x63
++ .byte 0x2f
++ .long 0x93a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0xa
++ .long .LASF436
++ .byte 0x63
++ .byte 0x30
++ .long 0x21c9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0xa
++ .long .LASF437
++ .byte 0x63
++ .byte 0x31
++ .long 0x93a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0xa
++ .long .LASF438
++ .byte 0x63
++ .byte 0x32
++ .long 0x2221
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0xa
++ .long .LASF439
++ .byte 0x63
++ .byte 0x36
++ .long 0x2243
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0xa
++ .long .LASF440
++ .byte 0x63
++ .byte 0x38
++ .long 0x225a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0xa
++ .long .LASF441
++ .byte 0x63
++ .byte 0x3e
++ .long 0x2280
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0xa
++ .long .LASF442
++ .byte 0x63
++ .byte 0x3f
++ .long 0x229b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0xa
++ .long .LASF443
++ .byte 0x63
++ .byte 0x41
++ .long 0x22b1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0xa
++ .long .LASF444
++ .byte 0x63
++ .byte 0x42
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0xa
++ .long .LASF445
++ .byte 0x63
++ .byte 0x43
++ .long 0x22c7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0xa
++ .long .LASF446
++ .byte 0x63
++ .byte 0x47
++ .long 0x22de
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .uleb128 0xa
++ .long .LASF447
++ .byte 0x63
++ .byte 0x48
++ .long 0x36
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .uleb128 0xa
++ .long .LASF448
++ .byte 0x63
++ .byte 0x49
++ .long 0x36
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x88
++ .byte 0x0
++ .uleb128 0x18
++ .byte 0x1
++ .long 0x923
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x214b
++ .uleb128 0x11
++ .long 0x216c
++ .byte 0x1
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x1be1
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2157
++ .uleb128 0x11
++ .long 0x2182
++ .byte 0x1
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2172
++ .uleb128 0x11
++ .long 0x2198
++ .byte 0x1
++ .long 0x1be1
++ .uleb128 0x6
++ .long 0x1be1
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2188
++ .uleb128 0x11
++ .long 0x21b3
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x219e
++ .uleb128 0x11
++ .long 0x21c9
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x21b9
++ .uleb128 0x11
++ .long 0x21df
++ .byte 0x1
++ .long 0x1be1
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x21cf
++ .uleb128 0x11
++ .long 0x21fa
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x21fa
++ .uleb128 0x6
++ .long 0x2200
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1a91
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1b57
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x21e5
++ .uleb128 0x11
++ .long 0x2221
++ .byte 0x1
++ .long 0x173
++ .uleb128 0x6
++ .long 0x173
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x220c
++ .uleb128 0x5
++ .long 0x223d
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x223d
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x2200
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1b10
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2227
++ .uleb128 0x5
++ .long 0x225a
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x223d
++ .uleb128 0x6
++ .long 0x2200
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2249
++ .uleb128 0x11
++ .long 0x227a
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x227a
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x19da
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2260
++ .uleb128 0x11
++ .long 0x229b
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2286
++ .uleb128 0x11
++ .long 0x22b1
++ .byte 0x1
++ .long 0x77
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x22a1
++ .uleb128 0x11
++ .long 0x22c7
++ .byte 0x1
++ .long 0x77
++ .uleb128 0x6
++ .long 0x923
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x22b7
++ .uleb128 0x5
++ .long 0x22de
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x923
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x22cd
++ .uleb128 0xf
++ .long 0x22ff
++ .long .LASF449
++ .byte 0x4
++ .byte 0x2b
++ .byte 0x17
++ .uleb128 0xa
++ .long .LASF450
++ .byte 0x2b
++ .byte 0x18
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x2344
++ .long .LASF451
++ .byte 0x10
++ .byte 0x47
++ .byte 0x1e
++ .uleb128 0xa
++ .long .LASF452
++ .byte 0x47
++ .byte 0x7a
++ .long 0x2344
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF453
++ .byte 0x47
++ .byte 0x7b
++ .long 0x2344
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF454
++ .byte 0x47
++ .byte 0x7c
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF455
++ .byte 0x47
++ .byte 0x7d
++ .long 0x234a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x22ff
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x124
++ .uleb128 0xf
++ .long 0x2387
++ .long .LASF456
++ .byte 0xc
++ .byte 0x47
++ .byte 0x83
++ .uleb128 0xa
++ .long .LASF457
++ .byte 0x47
++ .byte 0x84
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x47
++ .byte 0x85
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF458
++ .byte 0x47
++ .byte 0x86
++ .long 0x2344
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x23a2
++ .long .LASF459
++ .byte 0x4
++ .byte 0x47
++ .byte 0x89
++ .uleb128 0xa
++ .long .LASF460
++ .byte 0x47
++ .byte 0x8a
++ .long 0x23a2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2350
++ .uleb128 0x9
++ .long 0x23bf
++ .byte 0x8
++ .byte 0x4a
++ .byte 0x18
++ .uleb128 0xb
++ .string "sig"
++ .byte 0x4a
++ .byte 0x19
++ .long 0x1b00
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF461
++ .byte 0x4a
++ .byte 0x1a
++ .long 0x23a8
++ .uleb128 0x7
++ .long .LASF462
++ .byte 0x4e
++ .byte 0x11
++ .long 0x3c
++ .uleb128 0x7
++ .long .LASF463
++ .byte 0x4e
++ .byte 0x12
++ .long 0x23e0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x23ca
++ .uleb128 0x7
++ .long .LASF464
++ .byte 0x4e
++ .byte 0x14
++ .long 0x940
++ .uleb128 0x7
++ .long .LASF465
++ .byte 0x4e
++ .byte 0x15
++ .long 0x23fc
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x23e6
++ .uleb128 0xf
++ .long 0x2447
++ .long .LASF466
++ .byte 0x14
++ .byte 0x4a
++ .byte 0x7b
++ .uleb128 0xa
++ .long .LASF467
++ .byte 0x4a
++ .byte 0x7c
++ .long 0x23d5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF468
++ .byte 0x4a
++ .byte 0x7d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF469
++ .byte 0x4a
++ .byte 0x7e
++ .long 0x23f1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF470
++ .byte 0x4a
++ .byte 0x7f
++ .long 0x23bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x2461
++ .long .LASF471
++ .byte 0x14
++ .byte 0x4a
++ .byte 0x82
++ .uleb128 0xb
++ .string "sa"
++ .byte 0x4a
++ .byte 0x83
++ .long 0x2402
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x25
++ .long 0x2484
++ .long .LASF473
++ .byte 0x4
++ .byte 0x52
++ .byte 0x7
++ .uleb128 0xe
++ .long .LASF474
++ .byte 0x52
++ .byte 0x8
++ .long 0x21
++ .uleb128 0xe
++ .long .LASF475
++ .byte 0x52
++ .byte 0x9
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF476
++ .byte 0x52
++ .byte 0xa
++ .long 0x2461
++ .uleb128 0x9
++ .long 0x24b4
++ .byte 0x8
++ .byte 0x52
++ .byte 0x31
++ .uleb128 0xa
++ .long .LASF477
++ .byte 0x52
++ .byte 0x32
++ .long 0x1b5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF478
++ .byte 0x52
++ .byte 0x33
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x2503
++ .byte 0x10
++ .byte 0x52
++ .byte 0x37
++ .uleb128 0xa
++ .long .LASF479
++ .byte 0x52
++ .byte 0x38
++ .long 0x1c0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF480
++ .byte 0x52
++ .byte 0x39
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF481
++ .byte 0x52
++ .byte 0x3a
++ .long 0x2503
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF482
++ .byte 0x52
++ .byte 0x3b
++ .long 0x2484
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF483
++ .byte 0x52
++ .byte 0x3c
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2512
++ .long 0xbb
++ .uleb128 0x23
++ .long 0x28
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x2545
++ .byte 0xc
++ .byte 0x52
++ .byte 0x40
++ .uleb128 0xa
++ .long .LASF477
++ .byte 0x52
++ .byte 0x41
++ .long 0x1b5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF478
++ .byte 0x52
++ .byte 0x42
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF482
++ .byte 0x52
++ .byte 0x43
++ .long 0x2484
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x2594
++ .byte 0x14
++ .byte 0x52
++ .byte 0x47
++ .uleb128 0xa
++ .long .LASF477
++ .byte 0x52
++ .byte 0x48
++ .long 0x1b5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF478
++ .byte 0x52
++ .byte 0x49
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF484
++ .byte 0x52
++ .byte 0x4a
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF485
++ .byte 0x52
++ .byte 0x4b
++ .long 0x21f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF486
++ .byte 0x52
++ .byte 0x4c
++ .long 0x21f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x25ab
++ .byte 0x4
++ .byte 0x52
++ .byte 0x50
++ .uleb128 0xa
++ .long .LASF487
++ .byte 0x52
++ .byte 0x51
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x25d0
++ .byte 0x8
++ .byte 0x52
++ .byte 0x58
++ .uleb128 0xa
++ .long .LASF488
++ .byte 0x52
++ .byte 0x59
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "_fd"
++ .byte 0x52
++ .byte 0x5a
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x2626
++ .byte 0x74
++ .byte 0x52
++ .byte 0x2d
++ .uleb128 0xe
++ .long .LASF481
++ .byte 0x52
++ .byte 0x2e
++ .long 0x2626
++ .uleb128 0xe
++ .long .LASF489
++ .byte 0x52
++ .byte 0x34
++ .long 0x248f
++ .uleb128 0xe
++ .long .LASF490
++ .byte 0x52
++ .byte 0x3d
++ .long 0x24b4
++ .uleb128 0x26
++ .string "_rt"
++ .byte 0x52
++ .byte 0x44
++ .long 0x2512
++ .uleb128 0xe
++ .long .LASF491
++ .byte 0x52
++ .byte 0x4d
++ .long 0x2545
++ .uleb128 0xe
++ .long .LASF492
++ .byte 0x52
++ .byte 0x55
++ .long 0x2594
++ .uleb128 0xe
++ .long .LASF493
++ .byte 0x52
++ .byte 0x5b
++ .long 0x25ab
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2636
++ .long 0x21
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1c
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x267b
++ .long .LASF494
++ .byte 0x80
++ .byte 0x4a
++ .byte 0x9
++ .uleb128 0xa
++ .long .LASF495
++ .byte 0x52
++ .byte 0x29
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF496
++ .byte 0x52
++ .byte 0x2a
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF497
++ .byte 0x52
++ .byte 0x2b
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF498
++ .byte 0x52
++ .byte 0x5c
++ .long 0x25d0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF499
++ .byte 0x52
++ .byte 0x5d
++ .long 0x2636
++ .uleb128 0xf
++ .long 0x2729
++ .long .LASF500
++ .byte 0x2c
++ .byte 0x46
++ .byte 0x13
++ .uleb128 0x16
++ .long .LASF501
++ .byte 0xb
++ .value 0x229
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF502
++ .byte 0xb
++ .value 0x22a
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF245
++ .byte 0xb
++ .value 0x22b
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF503
++ .byte 0xb
++ .value 0x22c
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF504
++ .byte 0xb
++ .value 0x22e
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF505
++ .byte 0xb
++ .value 0x22f
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF506
++ .byte 0xb
++ .value 0x232
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF507
++ .byte 0xb
++ .value 0x233
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF508
++ .byte 0xb
++ .value 0x23b
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x17
++ .string "uid"
++ .byte 0xb
++ .value 0x23c
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2686
++ .uleb128 0xf
++ .long 0x2758
++ .long .LASF503
++ .byte 0x10
++ .byte 0x46
++ .byte 0x19
++ .uleb128 0xa
++ .long .LASF509
++ .byte 0x46
++ .byte 0x1a
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF247
++ .byte 0x46
++ .byte 0x1b
++ .long 0x23bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x27e3
++ .long .LASF510
++ .byte 0x24
++ .byte 0x23
++ .byte 0x7
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x23
++ .byte 0x8
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x23
++ .byte 0x9
++ .long 0x16a2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF511
++ .byte 0x23
++ .byte 0xa
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF512
++ .byte 0x23
++ .byte 0xb
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xb
++ .string "pwd"
++ .byte 0x23
++ .byte 0xb
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF513
++ .byte 0x23
++ .byte 0xb
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF514
++ .byte 0x23
++ .byte 0xc
++ .long 0x28f8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF515
++ .byte 0x23
++ .byte 0xc
++ .long 0x28f8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF516
++ .byte 0x23
++ .byte 0xc
++ .long 0x28f8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x28ec
++ .long .LASF517
++ .byte 0x84
++ .byte 0x23
++ .byte 0x4
++ .uleb128 0xa
++ .long .LASF518
++ .byte 0x24
++ .byte 0x53
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF519
++ .byte 0x24
++ .byte 0x54
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF520
++ .byte 0x24
++ .byte 0x55
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF521
++ .byte 0x24
++ .byte 0x56
++ .long 0x3381
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF522
++ .byte 0x24
++ .byte 0x5c
++ .long 0x1808
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF523
++ .byte 0x24
++ .byte 0x5d
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF524
++ .byte 0x24
++ .byte 0x5e
++ .long 0x5db4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF525
++ .byte 0x24
++ .byte 0x60
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xb
++ .string "d_u"
++ .byte 0x24
++ .byte 0x67
++ .long 0x5df6
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF526
++ .byte 0x24
++ .byte 0x68
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF527
++ .byte 0x24
++ .byte 0x69
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF528
++ .byte 0x24
++ .byte 0x6a
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF529
++ .byte 0x24
++ .byte 0x6b
++ .long 0x5e84
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0xa
++ .long .LASF530
++ .byte 0x24
++ .byte 0x6c
++ .long 0x60d1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0xa
++ .long .LASF531
++ .byte 0x24
++ .byte 0x6d
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0xa
++ .long .LASF532
++ .byte 0x24
++ .byte 0x6f
++ .long 0x60dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0xa
++ .long .LASF533
++ .byte 0x24
++ .byte 0x71
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0xa
++ .long .LASF534
++ .byte 0x24
++ .byte 0x72
++ .long 0x60e3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x27e3
++ .uleb128 0x21
++ .long .LASF535
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x28f2
++ .uleb128 0xf
++ .long 0x2927
++ .long .LASF536
++ .byte 0x10
++ .byte 0x38
++ .byte 0x46
++ .uleb128 0xa
++ .long .LASF537
++ .byte 0x44
++ .byte 0xe
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF327
++ .byte 0x44
++ .byte 0xf
++ .long 0x18ef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x2950
++ .long .LASF538
++ .byte 0xc
++ .byte 0x18
++ .byte 0x1b
++ .uleb128 0xa
++ .long .LASF539
++ .byte 0x18
++ .byte 0x1c
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF540
++ .byte 0x18
++ .byte 0x1d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x2969
++ .long .LASF541
++ .byte 0x0
++ .byte 0x18
++ .byte 0x29
++ .uleb128 0xb
++ .string "x"
++ .byte 0x18
++ .byte 0x2a
++ .long 0x2969
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2978
++ .long 0xbb
++ .uleb128 0x23
++ .long 0x28
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x29bd
++ .long .LASF542
++ .byte 0x14
++ .byte 0x18
++ .byte 0x4d
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x18
++ .byte 0x4e
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF543
++ .byte 0x18
++ .byte 0x4f
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF544
++ .byte 0x18
++ .byte 0x50
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF509
++ .byte 0x18
++ .byte 0x51
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x29f4
++ .long .LASF545
++ .byte 0x80
++ .byte 0x18
++ .byte 0x54
++ .uleb128 0xb
++ .string "pcp"
++ .byte 0x18
++ .byte 0x55
++ .long 0x29f4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF546
++ .byte 0x18
++ .byte 0x5a
++ .long 0x169
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF547
++ .byte 0x18
++ .byte 0x5b
++ .long 0x2a04
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x29
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2a04
++ .long 0x2978
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2a14
++ .long 0x169
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xd
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x2bbb
++ .long .LASF548
++ .value 0x1280
++ .byte 0x18
++ .byte 0xb6
++ .uleb128 0xa
++ .long .LASF549
++ .byte 0x18
++ .byte 0xb8
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF550
++ .byte 0x18
++ .byte 0xb8
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF551
++ .byte 0x18
++ .byte 0xb8
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF552
++ .byte 0x18
++ .byte 0xc1
++ .long 0x8dc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF553
++ .byte 0x18
++ .byte 0xcc
++ .long 0x2bbb
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x18
++ .byte 0xd1
++ .long 0x1680
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1080
++ .uleb128 0xa
++ .long .LASF538
++ .byte 0x18
++ .byte 0xd6
++ .long 0x2bcb
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1084
++ .uleb128 0xa
++ .long .LASF554
++ .byte 0x18
++ .byte 0xd9
++ .long 0x2950
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1180
++ .uleb128 0xa
++ .long .LASF555
++ .byte 0x18
++ .byte 0xdc
++ .long 0x1680
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1180
++ .uleb128 0xa
++ .long .LASF556
++ .byte 0x18
++ .byte 0xdd
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1184
++ .uleb128 0xa
++ .long .LASF557
++ .byte 0x18
++ .byte 0xde
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x118c
++ .uleb128 0xa
++ .long .LASF558
++ .byte 0x18
++ .byte 0xdf
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1194
++ .uleb128 0xa
++ .long .LASF559
++ .byte 0x18
++ .byte 0xe0
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1198
++ .uleb128 0xa
++ .long .LASF560
++ .byte 0x18
++ .byte 0xe1
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x119c
++ .uleb128 0xa
++ .long .LASF561
++ .byte 0x18
++ .byte 0xe2
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11a0
++ .uleb128 0xa
++ .long .LASF562
++ .byte 0x18
++ .byte 0xe5
++ .long 0x16c4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11a4
++ .uleb128 0xa
++ .long .LASF563
++ .byte 0x18
++ .byte 0xe8
++ .long 0x2bdb
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11a8
++ .uleb128 0xa
++ .long .LASF564
++ .byte 0x18
++ .byte 0xf7
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11e0
++ .uleb128 0xa
++ .long .LASF565
++ .byte 0x18
++ .byte 0xfa
++ .long 0x2950
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1200
++ .uleb128 0x16
++ .long .LASF566
++ .byte 0x18
++ .value 0x115
++ .long 0x2beb
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1200
++ .uleb128 0x16
++ .long .LASF567
++ .byte 0x18
++ .value 0x116
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1204
++ .uleb128 0x16
++ .long .LASF568
++ .byte 0x18
++ .value 0x117
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1208
++ .uleb128 0x16
++ .long .LASF569
++ .byte 0x18
++ .value 0x11c
++ .long 0x2cbe
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x120c
++ .uleb128 0x16
++ .long .LASF570
++ .byte 0x18
++ .value 0x11e
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1210
++ .uleb128 0x16
++ .long .LASF571
++ .byte 0x18
++ .value 0x12a
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1214
++ .uleb128 0x16
++ .long .LASF572
++ .byte 0x18
++ .value 0x12b
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1218
++ .uleb128 0x16
++ .long .LASF414
++ .byte 0x18
++ .value 0x130
++ .long 0x7f2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x121c
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2bcb
++ .long 0x29bd
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1f
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2bdb
++ .long 0x2927
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xa
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2beb
++ .long 0x16cf
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xd
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x18ef
++ .uleb128 0x1a
++ .long 0x2cbe
++ .long .LASF573
++ .value 0x3800
++ .byte 0x18
++ .byte 0x20
++ .uleb128 0x16
++ .long .LASF574
++ .byte 0x18
++ .value 0x1ae
++ .long 0x2d12
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF575
++ .byte 0x18
++ .value 0x1af
++ .long 0x2d22
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x3780
++ .uleb128 0x16
++ .long .LASF576
++ .byte 0x18
++ .value 0x1b0
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37bc
++ .uleb128 0x16
++ .long .LASF577
++ .byte 0x18
++ .value 0x1b2
++ .long 0x2d82
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37c0
++ .uleb128 0x16
++ .long .LASF578
++ .byte 0x18
++ .value 0x1b4
++ .long 0x2d8e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37c4
++ .uleb128 0x16
++ .long .LASF579
++ .byte 0x18
++ .value 0x1bf
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37c8
++ .uleb128 0x16
++ .long .LASF580
++ .byte 0x18
++ .value 0x1c0
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37cc
++ .uleb128 0x16
++ .long .LASF581
++ .byte 0x18
++ .value 0x1c1
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37d0
++ .uleb128 0x16
++ .long .LASF582
++ .byte 0x18
++ .value 0x1c3
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37d4
++ .uleb128 0x16
++ .long .LASF583
++ .byte 0x18
++ .value 0x1c4
++ .long 0x18ef
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37d8
++ .uleb128 0x16
++ .long .LASF584
++ .byte 0x18
++ .value 0x1c5
++ .long 0x15f9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37e4
++ .uleb128 0x16
++ .long .LASF585
++ .byte 0x18
++ .value 0x1c6
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x37e8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2bf1
++ .uleb128 0x15
++ .long 0x2cf0
++ .long .LASF586
++ .byte 0x14
++ .byte 0x18
++ .value 0x18c
++ .uleb128 0x16
++ .long .LASF587
++ .byte 0x18
++ .value 0x18d
++ .long 0x2cf6
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF588
++ .byte 0x18
++ .value 0x18e
++ .long 0x2cfc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF589
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2cf0
++ .uleb128 0x12
++ .long 0x2d0c
++ .long 0x2d0c
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x3
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2a14
++ .uleb128 0x12
++ .long 0x2d22
++ .long 0x2a14
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x2d32
++ .long 0x2cc4
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x2d82
++ .long .LASF590
++ .byte 0x20
++ .byte 0x18
++ .value 0x19e
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x19
++ .byte 0x13
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF591
++ .byte 0x19
++ .byte 0x15
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x10
++ .long 0x8487
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x10
++ .long 0x8507
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x10
++ .long 0x852a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xb
++ .string "lru"
++ .byte 0x19
++ .byte 0x40
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2d32
++ .uleb128 0x21
++ .long .LASF592
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2d88
++ .uleb128 0xf
++ .long 0x2dd9
++ .long .LASF593
++ .byte 0x14
++ .byte 0x20
++ .byte 0x2f
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x20
++ .byte 0x31
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF323
++ .byte 0x20
++ .byte 0x32
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF324
++ .byte 0x20
++ .byte 0x33
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF594
++ .byte 0x20
++ .byte 0x3a
++ .long 0x2dd9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0xdda
++ .uleb128 0xf
++ .long 0x2e16
++ .long .LASF595
++ .byte 0xc
++ .byte 0x55
++ .byte 0x32
++ .uleb128 0xa
++ .long .LASF596
++ .byte 0x55
++ .byte 0x33
++ .long 0x2e36
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF307
++ .byte 0x55
++ .byte 0x34
++ .long 0x2e30
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF597
++ .byte 0x55
++ .byte 0x35
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x2e30
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x2e30
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2ddf
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2e16
++ .uleb128 0xf
++ .long 0x2e65
++ .long .LASF598
++ .byte 0x14
++ .byte 0x55
++ .byte 0x3d
++ .uleb128 0xa
++ .long .LASF599
++ .byte 0x55
++ .byte 0x3e
++ .long 0x18fa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF600
++ .byte 0x55
++ .byte 0x3f
++ .long 0x2e30
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF601
++ .byte 0x1
++ .uleb128 0xf
++ .long 0x2ea2
++ .long .LASF602
++ .byte 0xc
++ .byte 0x3
++ .byte 0x13
++ .uleb128 0xa
++ .long .LASF603
++ .byte 0x3
++ .byte 0x14
++ .long 0x1fe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF604
++ .byte 0x3
++ .byte 0x15
++ .long 0x2ea2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF605
++ .byte 0x3
++ .byte 0x17
++ .long 0x2ea2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2e65
++ .uleb128 0xf
++ .long 0x2ed1
++ .long .LASF606
++ .byte 0x8
++ .byte 0x25
++ .byte 0x32
++ .uleb128 0xa
++ .long .LASF307
++ .byte 0x25
++ .byte 0x33
++ .long 0x2ed1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF316
++ .byte 0x25
++ .byte 0x34
++ .long 0x2ee3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2ea8
++ .uleb128 0x5
++ .long 0x2ee3
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x2ed1
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2ed7
++ .uleb128 0xf
++ .long 0x2fba
++ .long .LASF607
++ .byte 0x3c
++ .byte 0x25
++ .byte 0x5d
++ .uleb128 0xa
++ .long .LASF608
++ .byte 0x25
++ .byte 0x5f
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF609
++ .byte 0x25
++ .byte 0x60
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF610
++ .byte 0x25
++ .byte 0x61
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF544
++ .byte 0x25
++ .byte 0x64
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF611
++ .byte 0x25
++ .byte 0x65
++ .long 0x2ed1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF612
++ .byte 0x25
++ .byte 0x66
++ .long 0x2fba
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF613
++ .byte 0x25
++ .byte 0x67
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF614
++ .byte 0x25
++ .byte 0x68
++ .long 0x2ed1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF615
++ .byte 0x25
++ .byte 0x69
++ .long 0x2fba
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF616
++ .byte 0x25
++ .byte 0x6a
++ .long 0x2ed1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF617
++ .byte 0x25
++ .byte 0x6b
++ .long 0x2fba
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF618
++ .byte 0x25
++ .byte 0x6c
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xb
++ .string "cpu"
++ .byte 0x25
++ .byte 0x6d
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF619
++ .byte 0x25
++ .byte 0x6e
++ .long 0x2ea8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2ed1
++ .uleb128 0x27
++ .long 0x2fe5
++ .long .LASF739
++ .byte 0x4
++ .byte 0x39
++ .byte 0x7
++ .uleb128 0x28
++ .long .LASF620
++ .sleb128 0
++ .uleb128 0x28
++ .long .LASF621
++ .sleb128 1
++ .uleb128 0x28
++ .long .LASF622
++ .sleb128 2
++ .uleb128 0x28
++ .long .LASF623
++ .sleb128 3
++ .byte 0x0
++ .uleb128 0x29
++ .long 0x3037
++ .string "pid"
++ .byte 0x24
++ .byte 0x38
++ .byte 0x95
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x39
++ .byte 0x2d
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "nr"
++ .byte 0x39
++ .byte 0x2f
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF624
++ .byte 0x39
++ .byte 0x30
++ .long 0x1808
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF192
++ .byte 0x39
++ .byte 0x32
++ .long 0x3037
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xb
++ .string "rcu"
++ .byte 0x39
++ .byte 0x33
++ .long 0x2ea8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x3047
++ .long 0x17eb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x3070
++ .long .LASF625
++ .byte 0xc
++ .byte 0x39
++ .byte 0x39
++ .uleb128 0xa
++ .long .LASF400
++ .byte 0x39
++ .byte 0x3a
++ .long 0x1808
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "pid"
++ .byte 0x39
++ .byte 0x3b
++ .long 0x3070
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2fe5
++ .uleb128 0x9
++ .long 0x308d
++ .byte 0x4
++ .byte 0x4f
++ .byte 0xc
++ .uleb128 0xa
++ .long .LASF626
++ .byte 0x4f
++ .byte 0xc
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF627
++ .byte 0x4f
++ .byte 0xc
++ .long 0x3076
++ .uleb128 0xf
++ .long 0x30b3
++ .long .LASF277
++ .byte 0x4
++ .byte 0x1b
++ .byte 0x2f
++ .uleb128 0xa
++ .long .LASF307
++ .byte 0x1b
++ .byte 0x30
++ .long 0x30b3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3098
++ .uleb128 0xf
++ .long 0x30f0
++ .long .LASF628
++ .byte 0xc
++ .byte 0x1b
++ .byte 0x3b
++ .uleb128 0xa
++ .long .LASF509
++ .byte 0x1b
++ .byte 0x3f
++ .long 0x3098
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF629
++ .byte 0x1b
++ .byte 0x47
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF630
++ .byte 0x1b
++ .byte 0x53
++ .long 0x30b3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x3381
++ .long .LASF631
++ .value 0x148
++ .byte 0x1b
++ .byte 0x8a
++ .uleb128 0x16
++ .long .LASF632
++ .byte 0x1a
++ .value 0x213
++ .long 0x1808
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF633
++ .byte 0x1a
++ .value 0x214
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF634
++ .byte 0x1a
++ .value 0x215
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF635
++ .byte 0x1a
++ .value 0x216
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF636
++ .byte 0x1a
++ .value 0x217
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF637
++ .byte 0x1a
++ .value 0x218
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF638
++ .byte 0x1a
++ .value 0x219
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF639
++ .byte 0x1a
++ .value 0x21a
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF640
++ .byte 0x1a
++ .value 0x21b
++ .long 0x1e8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF641
++ .byte 0x1a
++ .value 0x21c
++ .long 0x19f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF642
++ .byte 0x1a
++ .value 0x21d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF643
++ .byte 0x1a
++ .value 0x21e
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF644
++ .byte 0x1a
++ .value 0x220
++ .long 0x1730
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF645
++ .byte 0x1a
++ .value 0x222
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF646
++ .byte 0x1a
++ .value 0x223
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0x16
++ .long .LASF647
++ .byte 0x1a
++ .value 0x224
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0x16
++ .long .LASF648
++ .byte 0x1a
++ .value 0x225
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0x16
++ .long .LASF649
++ .byte 0x1a
++ .value 0x226
++ .long 0x235
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0x16
++ .long .LASF650
++ .byte 0x1a
++ .value 0x227
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0x16
++ .long .LASF651
++ .byte 0x1a
++ .value 0x228
++ .long 0xea
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6a
++ .uleb128 0x16
++ .long .LASF652
++ .byte 0x1a
++ .value 0x229
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0x16
++ .long .LASF653
++ .byte 0x1a
++ .value 0x22a
++ .long 0x2d94
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0x16
++ .long .LASF654
++ .byte 0x1a
++ .value 0x22b
++ .long 0x18fa
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .uleb128 0x16
++ .long .LASF655
++ .byte 0x1a
++ .value 0x22c
++ .long 0x738a
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x94
++ .uleb128 0x16
++ .long .LASF656
++ .byte 0x1a
++ .value 0x22d
++ .long 0x7538
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x98
++ .uleb128 0x16
++ .long .LASF657
++ .byte 0x1a
++ .value 0x22e
++ .long 0x60d1
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x9c
++ .uleb128 0x16
++ .long .LASF658
++ .byte 0x1a
++ .value 0x22f
++ .long 0x7641
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0
++ .uleb128 0x16
++ .long .LASF659
++ .byte 0x1a
++ .value 0x230
++ .long 0x6e96
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa4
++ .uleb128 0x16
++ .long .LASF660
++ .byte 0x1a
++ .value 0x231
++ .long 0x6e9c
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa8
++ .uleb128 0x16
++ .long .LASF661
++ .byte 0x1a
++ .value 0x235
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xfc
++ .uleb128 0x10
++ .long 0x7207
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x104
++ .uleb128 0x16
++ .long .LASF662
++ .byte 0x1a
++ .value 0x23b
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x108
++ .uleb128 0x16
++ .long .LASF663
++ .byte 0x1a
++ .value 0x23d
++ .long 0x141
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x10c
++ .uleb128 0x16
++ .long .LASF664
++ .byte 0x1a
++ .value 0x240
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x110
++ .uleb128 0x16
++ .long .LASF665
++ .byte 0x1a
++ .value 0x241
++ .long 0x764d
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x114
++ .uleb128 0x16
++ .long .LASF504
++ .byte 0x1a
++ .value 0x245
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x118
++ .uleb128 0x16
++ .long .LASF666
++ .byte 0x1a
++ .value 0x246
++ .long 0x2d94
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x120
++ .uleb128 0x16
++ .long .LASF667
++ .byte 0x1a
++ .value 0x249
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x134
++ .uleb128 0x16
++ .long .LASF668
++ .byte 0x1a
++ .value 0x24a
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x138
++ .uleb128 0x16
++ .long .LASF669
++ .byte 0x1a
++ .value 0x24c
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x13c
++ .uleb128 0x16
++ .long .LASF670
++ .byte 0x1a
++ .value 0x24e
++ .long 0x16c4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x140
++ .uleb128 0x16
++ .long .LASF671
++ .byte 0x1a
++ .value 0x252
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x144
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x30f0
++ .uleb128 0x1f
++ .long 0x36ad
++ .long .LASF672
++ .value 0x1b0
++ .byte 0x8
++ .value 0x19d
++ .uleb128 0x16
++ .long .LASF673
++ .byte 0xb
++ .value 0x144
++ .long 0x3f9c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF674
++ .byte 0xb
++ .value 0x145
++ .long 0x17a1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF675
++ .byte 0xb
++ .value 0x146
++ .long 0x3f9c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF676
++ .byte 0xb
++ .value 0x149
++ .long 0x3fc6
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF677
++ .byte 0xb
++ .value 0x14a
++ .long 0x3fdd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF678
++ .byte 0xb
++ .value 0x14b
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF679
++ .byte 0xb
++ .value 0x14c
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF680
++ .byte 0xb
++ .value 0x14d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF681
++ .byte 0xb
++ .value 0x14e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x17
++ .string "pgd"
++ .byte 0xb
++ .value 0x14f
++ .long 0x3fe3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF682
++ .byte 0xb
++ .value 0x150
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF683
++ .byte 0xb
++ .value 0x151
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF684
++ .byte 0xb
++ .value 0x152
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF685
++ .byte 0xb
++ .value 0x153
++ .long 0x18fa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF686
++ .byte 0xb
++ .value 0x154
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF687
++ .byte 0xb
++ .value 0x156
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF688
++ .byte 0xb
++ .value 0x15e
++ .long 0x3eb1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0x16
++ .long .LASF689
++ .byte 0xb
++ .value 0x15f
++ .long 0x3eb1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0x16
++ .long .LASF690
++ .byte 0xb
++ .value 0x161
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0x16
++ .long .LASF691
++ .byte 0xb
++ .value 0x162
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0x16
++ .long .LASF692
++ .byte 0xb
++ .value 0x164
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0x16
++ .long .LASF693
++ .byte 0xb
++ .value 0x164
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0x16
++ .long .LASF694
++ .byte 0xb
++ .value 0x164
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0x16
++ .long .LASF695
++ .byte 0xb
++ .value 0x164
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0x16
++ .long .LASF696
++ .byte 0xb
++ .value 0x165
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0x16
++ .long .LASF697
++ .byte 0xb
++ .value 0x165
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0x16
++ .long .LASF698
++ .byte 0xb
++ .value 0x165
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0x16
++ .long .LASF699
++ .byte 0xb
++ .value 0x165
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0x16
++ .long .LASF700
++ .byte 0xb
++ .value 0x166
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .uleb128 0x16
++ .long .LASF701
++ .byte 0xb
++ .value 0x166
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .uleb128 0x16
++ .long .LASF702
++ .byte 0xb
++ .value 0x166
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x88
++ .uleb128 0x16
++ .long .LASF703
++ .byte 0xb
++ .value 0x166
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x8c
++ .uleb128 0x16
++ .long .LASF704
++ .byte 0xb
++ .value 0x167
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x90
++ .uleb128 0x17
++ .string "brk"
++ .byte 0xb
++ .value 0x167
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x94
++ .uleb128 0x16
++ .long .LASF705
++ .byte 0xb
++ .value 0x167
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x98
++ .uleb128 0x16
++ .long .LASF706
++ .byte 0xb
++ .value 0x168
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x9c
++ .uleb128 0x16
++ .long .LASF707
++ .byte 0xb
++ .value 0x168
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0
++ .uleb128 0x16
++ .long .LASF708
++ .byte 0xb
++ .value 0x168
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa4
++ .uleb128 0x16
++ .long .LASF709
++ .byte 0xb
++ .value 0x168
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa8
++ .uleb128 0x16
++ .long .LASF710
++ .byte 0xb
++ .value 0x16a
++ .long 0x3fe9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xac
++ .uleb128 0x16
++ .long .LASF711
++ .byte 0xb
++ .value 0x16c
++ .long 0x923
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x15c
++ .uleb128 0x16
++ .long .LASF712
++ .byte 0xb
++ .value 0x16f
++ .long 0x19a9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x160
++ .uleb128 0x16
++ .long .LASF713
++ .byte 0xb
++ .value 0x178
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x180
++ .uleb128 0x16
++ .long .LASF714
++ .byte 0xb
++ .value 0x179
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x184
++ .uleb128 0x16
++ .long .LASF715
++ .byte 0xb
++ .value 0x17a
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x188
++ .uleb128 0x20
++ .long .LASF716
++ .byte 0xb
++ .value 0x17c
++ .long 0x112
++ .byte 0x1
++ .byte 0x2
++ .byte 0x6
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x18c
++ .uleb128 0x16
++ .long .LASF717
++ .byte 0xb
++ .value 0x17f
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x190
++ .uleb128 0x16
++ .long .LASF718
++ .byte 0xb
++ .value 0x180
++ .long 0x3ff9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x194
++ .uleb128 0x16
++ .long .LASF719
++ .byte 0xb
++ .value 0x180
++ .long 0x28fe
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x198
++ .uleb128 0x16
++ .long .LASF720
++ .byte 0xb
++ .value 0x183
++ .long 0x16a2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1a8
++ .uleb128 0x16
++ .long .LASF721
++ .byte 0xb
++ .value 0x184
++ .long 0x3dc4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1ac
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3387
++ .uleb128 0xf
++ .long 0x36dc
++ .long .LASF722
++ .byte 0x10
++ .byte 0x50
++ .byte 0x50
++ .uleb128 0xa
++ .long .LASF723
++ .byte 0x50
++ .byte 0x51
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF724
++ .byte 0x50
++ .byte 0x52
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x3705
++ .long .LASF725
++ .byte 0x8
++ .byte 0x4d
++ .byte 0x2a
++ .uleb128 0xa
++ .long .LASF726
++ .byte 0x4d
++ .byte 0x2b
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF727
++ .byte 0x4d
++ .byte 0x2c
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x25
++ .long 0x371d
++ .long .LASF728
++ .byte 0x8
++ .byte 0x1b
++ .byte 0x6
++ .uleb128 0xe
++ .long .LASF729
++ .byte 0x4c
++ .byte 0x2f
++ .long 0x17e
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF730
++ .byte 0x4c
++ .byte 0x3b
++ .long 0x3705
++ .uleb128 0xf
++ .long 0x377b
++ .long .LASF731
++ .byte 0x18
++ .byte 0xa
++ .byte 0xb
++ .uleb128 0xa
++ .long .LASF376
++ .byte 0xa
++ .byte 0xc
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF732
++ .byte 0xa
++ .byte 0xd
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF733
++ .byte 0xa
++ .byte 0xf
++ .long 0x3787
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF734
++ .byte 0xa
++ .byte 0x10
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF735
++ .byte 0xa
++ .byte 0x12
++ .long 0x380f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0x5
++ .long 0x3787
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x377b
++ .uleb128 0x1a
++ .long 0x380f
++ .long .LASF736
++ .value 0x1080
++ .byte 0xa
++ .byte 0x9
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x1
++ .byte 0x46
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF737
++ .byte 0x1
++ .byte 0x47
++ .long 0x8791
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF738
++ .byte 0x1
++ .byte 0x48
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xb
++ .string "tv1"
++ .byte 0x1
++ .byte 0x49
++ .long 0x8786
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xb
++ .string "tv2"
++ .byte 0x1
++ .byte 0x4a
++ .long 0x874f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80c
++ .uleb128 0xb
++ .string "tv3"
++ .byte 0x1
++ .byte 0x4b
++ .long 0x874f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0c
++ .uleb128 0xb
++ .string "tv4"
++ .byte 0x1
++ .byte 0x4c
++ .long 0x874f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc0c
++ .uleb128 0xb
++ .string "tv5"
++ .byte 0x1
++ .byte 0x4d
++ .long 0x874f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe0c
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x378d
++ .uleb128 0x27
++ .long 0x382e
++ .long .LASF740
++ .byte 0x4
++ .byte 0xa
++ .byte 0xaa
++ .uleb128 0x28
++ .long .LASF741
++ .sleb128 0
++ .uleb128 0x28
++ .long .LASF742
++ .sleb128 1
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x3881
++ .long .LASF743
++ .byte 0x20
++ .byte 0xa
++ .byte 0xa9
++ .uleb128 0xa
++ .long .LASF400
++ .byte 0x4b
++ .byte 0x71
++ .long 0x1764
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF732
++ .byte 0x4b
++ .byte 0x72
++ .long 0x371d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF733
++ .byte 0x4b
++ .byte 0x73
++ .long 0x3897
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF735
++ .byte 0x4b
++ .byte 0x74
++ .long 0x391a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF169
++ .byte 0x4b
++ .byte 0x75
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x3891
++ .byte 0x1
++ .long 0x3815
++ .uleb128 0x6
++ .long 0x3891
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x382e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3881
++ .uleb128 0xf
++ .long 0x391a
++ .long .LASF744
++ .byte 0x28
++ .byte 0x4b
++ .byte 0x18
++ .uleb128 0xa
++ .long .LASF745
++ .byte 0x4b
++ .byte 0x9d
++ .long 0x3957
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF746
++ .byte 0x4b
++ .byte 0x9e
++ .long 0x1cb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF747
++ .byte 0x4b
++ .byte 0x9f
++ .long 0x17a1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF310
++ .byte 0x4b
++ .byte 0xa0
++ .long 0x179b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF748
++ .byte 0x4b
++ .byte 0xa1
++ .long 0x371d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF749
++ .byte 0x4b
++ .byte 0xa2
++ .long 0x3963
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF750
++ .byte 0x4b
++ .byte 0xa3
++ .long 0x3963
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF751
++ .byte 0x4b
++ .byte 0xa4
++ .long 0x371d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x389d
++ .uleb128 0xf
++ .long 0x3957
++ .long .LASF752
++ .byte 0x54
++ .byte 0x4b
++ .byte 0x19
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x4b
++ .byte 0xc2
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF753
++ .byte 0x4b
++ .byte 0xc3
++ .long 0x161c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF754
++ .byte 0x4b
++ .byte 0xc4
++ .long 0x3969
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3920
++ .uleb128 0x18
++ .byte 0x1
++ .long 0x371d
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x395d
++ .uleb128 0x12
++ .long 0x3979
++ .long 0x389d
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1
++ .byte 0x0
++ .uleb128 0x2a
++ .long .LASF755
++ .byte 0x0
++ .byte 0x53
++ .byte 0x23
++ .uleb128 0x7
++ .long .LASF756
++ .byte 0x34
++ .byte 0x10
++ .long 0x398c
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3992
++ .uleb128 0x5
++ .long 0x399e
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x399e
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x39a4
++ .uleb128 0xf
++ .long 0x39db
++ .long .LASF757
++ .byte 0x10
++ .byte 0x34
++ .byte 0xf
++ .uleb128 0xa
++ .long .LASF734
++ .byte 0x34
++ .byte 0x19
++ .long 0x16cf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF376
++ .byte 0x34
++ .byte 0x1d
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF316
++ .byte 0x34
++ .byte 0x1e
++ .long 0x3981
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x3a04
++ .long .LASF758
++ .byte 0x28
++ .byte 0x34
++ .byte 0x23
++ .uleb128 0xa
++ .long .LASF759
++ .byte 0x34
++ .byte 0x24
++ .long 0x39a4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF760
++ .byte 0x34
++ .byte 0x25
++ .long 0x3728
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x3a49
++ .long .LASF761
++ .byte 0x20
++ .byte 0x35
++ .byte 0x39
++ .uleb128 0xa
++ .long .LASF734
++ .byte 0x35
++ .byte 0x3a
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "obj"
++ .byte 0x35
++ .byte 0x3b
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xb
++ .string "res"
++ .byte 0x35
++ .byte 0x3c
++ .long 0x14c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF762
++ .byte 0x35
++ .byte 0x3d
++ .long 0x14c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x3a72
++ .long .LASF763
++ .byte 0x8
++ .byte 0x36
++ .byte 0x15
++ .uleb128 0xa
++ .long .LASF764
++ .byte 0x36
++ .byte 0x16
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF765
++ .byte 0x36
++ .byte 0x17
++ .long 0x6c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x3a91
++ .byte 0x4
++ .byte 0x33
++ .byte 0x63
++ .uleb128 0xe
++ .long .LASF237
++ .byte 0x33
++ .byte 0x64
++ .long 0x160b
++ .uleb128 0x26
++ .string "tsk"
++ .byte 0x33
++ .byte 0x65
++ .long 0x15f9
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x3bfd
++ .long .LASF766
++ .byte 0x88
++ .byte 0x33
++ .byte 0x57
++ .uleb128 0xa
++ .long .LASF767
++ .byte 0x33
++ .byte 0x58
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF768
++ .byte 0x33
++ .byte 0x59
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF769
++ .byte 0x33
++ .byte 0x5a
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF770
++ .byte 0x33
++ .byte 0x5b
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF771
++ .byte 0x33
++ .byte 0x5d
++ .long 0x3cfd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF772
++ .byte 0x33
++ .byte 0x5e
++ .long 0x3dc4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF773
++ .byte 0x33
++ .byte 0x5f
++ .long 0x3deb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF774
++ .byte 0x33
++ .byte 0x60
++ .long 0x3e01
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF775
++ .byte 0x33
++ .byte 0x61
++ .long 0x3e13
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF776
++ .byte 0x33
++ .byte 0x66
++ .long 0x3a72
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF777
++ .byte 0x33
++ .byte 0x68
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xa
++ .long .LASF778
++ .byte 0x33
++ .byte 0x69
++ .long 0x1840
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF779
++ .byte 0x33
++ .byte 0x6a
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF780
++ .byte 0x33
++ .byte 0x6c
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0xa
++ .long .LASF315
++ .byte 0x33
++ .byte 0x6d
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0xa
++ .long .LASF781
++ .byte 0x33
++ .byte 0x6f
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0xa
++ .long .LASF782
++ .byte 0x33
++ .byte 0x70
++ .long 0x1fe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0xa
++ .long .LASF783
++ .byte 0x33
++ .byte 0x71
++ .long 0xb5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0xa
++ .long .LASF784
++ .byte 0x33
++ .byte 0x72
++ .long 0x1fe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0xa
++ .long .LASF785
++ .byte 0x33
++ .byte 0x73
++ .long 0x3a49
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0xa
++ .long .LASF786
++ .byte 0x33
++ .byte 0x74
++ .long 0x3e19
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0xa
++ .long .LASF787
++ .byte 0x33
++ .byte 0x75
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0xa
++ .long .LASF788
++ .byte 0x33
++ .byte 0x76
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0xa
++ .long .LASF789
++ .byte 0x33
++ .byte 0x78
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0xa
++ .long .LASF790
++ .byte 0x33
++ .byte 0x7f
++ .long 0x3cfd
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x3cfd
++ .long .LASF106
++ .byte 0x8c
++ .byte 0x18
++ .value 0x22c
++ .uleb128 0x17
++ .string "f_u"
++ .byte 0x1a
++ .value 0x2d0
++ .long 0x776e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF791
++ .byte 0x1a
++ .value 0x2d1
++ .long 0x628c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF792
++ .byte 0x1a
++ .value 0x2d4
++ .long 0x7538
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF793
++ .byte 0x1a
++ .value 0x2d5
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF794
++ .byte 0x1a
++ .value 0x2d6
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF795
++ .byte 0x1a
++ .value 0x2d7
++ .long 0x1aa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF796
++ .byte 0x1a
++ .value 0x2d8
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF797
++ .byte 0x1a
++ .value 0x2d9
++ .long 0x7653
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF798
++ .byte 0x1a
++ .value 0x2da
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0x16
++ .long .LASF799
++ .byte 0x1a
++ .value 0x2da
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF800
++ .byte 0x1a
++ .value 0x2db
++ .long 0x76bb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF801
++ .byte 0x1a
++ .value 0x2dd
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0x16
++ .long .LASF802
++ .byte 0x1a
++ .value 0x2e2
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0x16
++ .long .LASF803
++ .byte 0x1a
++ .value 0x2e6
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0x16
++ .long .LASF804
++ .byte 0x1a
++ .value 0x2e7
++ .long 0x1680
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .uleb128 0x16
++ .long .LASF805
++ .byte 0x1a
++ .value 0x2e9
++ .long 0x6e96
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x88
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3bfd
++ .uleb128 0xf
++ .long 0x3dc4
++ .long .LASF806
++ .byte 0xa0
++ .byte 0x33
++ .byte 0xf
++ .uleb128 0xa
++ .long .LASF807
++ .byte 0x33
++ .byte 0xb6
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF808
++ .byte 0x33
++ .byte 0xb7
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xb
++ .string "mm"
++ .byte 0x33
++ .byte 0xb8
++ .long 0x36ad
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF809
++ .byte 0x33
++ .byte 0xbb
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF307
++ .byte 0x33
++ .byte 0xbc
++ .long 0x3dc4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF327
++ .byte 0x33
++ .byte 0xbe
++ .long 0x18ef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF810
++ .byte 0x33
++ .byte 0xc0
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF811
++ .byte 0x33
++ .byte 0xc2
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF812
++ .byte 0x33
++ .byte 0xc3
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF178
++ .byte 0x33
++ .byte 0xc4
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF813
++ .byte 0x33
++ .byte 0xc7
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF814
++ .byte 0x33
++ .byte 0xc9
++ .long 0x3e1f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xb
++ .string "wq"
++ .byte 0x33
++ .byte 0xcb
++ .long 0x39db
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3d03
++ .uleb128 0x11
++ .long 0x3ddf
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3ddf
++ .uleb128 0x6
++ .long 0x3de5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3a91
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3a04
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3dca
++ .uleb128 0x11
++ .long 0x3e01
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x3ddf
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3df1
++ .uleb128 0x5
++ .long 0x3e13
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x3ddf
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3e07
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3a49
++ .uleb128 0xf
++ .long 0x3e9b
++ .long .LASF815
++ .byte 0x3c
++ .byte 0x33
++ .byte 0xa8
++ .uleb128 0xa
++ .long .LASF678
++ .byte 0x33
++ .byte 0xa9
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF816
++ .byte 0x33
++ .byte 0xaa
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF817
++ .byte 0x33
++ .byte 0xac
++ .long 0x3e9b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF818
++ .byte 0x33
++ .byte 0xad
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF819
++ .byte 0x33
++ .byte 0xae
++ .long 0x5a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xb
++ .string "nr"
++ .byte 0x33
++ .byte 0xb0
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF820
++ .byte 0x33
++ .byte 0xb0
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF821
++ .byte 0x33
++ .byte 0xb2
++ .long 0x3ea1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2d82
++ .uleb128 0x12
++ .long 0x3eb1
++ .long 0x2d82
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x7
++ .byte 0x0
++ .uleb128 0x1e
++ .long .LASF822
++ .byte 0xb
++ .value 0x127
++ .long 0x16cf
++ .uleb128 0xf
++ .long 0x3f9c
++ .long .LASF823
++ .byte 0x54
++ .byte 0x14
++ .byte 0x9e
++ .uleb128 0xa
++ .long .LASF824
++ .byte 0x15
++ .byte 0x3d
++ .long 0x36ad
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF825
++ .byte 0x15
++ .byte 0x3e
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF826
++ .byte 0x15
++ .byte 0x3f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF827
++ .byte 0x15
++ .byte 0x43
++ .long 0x3f9c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF828
++ .byte 0x15
++ .byte 0x45
++ .long 0x36e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF829
++ .byte 0x15
++ .byte 0x46
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF830
++ .byte 0x15
++ .byte 0x48
++ .long 0x1764
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF831
++ .byte 0x15
++ .byte 0x58
++ .long 0x857c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF832
++ .byte 0x15
++ .byte 0x60
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF833
++ .byte 0x15
++ .byte 0x61
++ .long 0x85a1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xa
++ .long .LASF834
++ .byte 0x15
++ .byte 0x64
++ .long 0x8608
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF835
++ .byte 0x15
++ .byte 0x67
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0xa
++ .long .LASF836
++ .byte 0x15
++ .byte 0x69
++ .long 0x3cfd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF837
++ .byte 0x15
++ .byte 0x6a
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0xa
++ .long .LASF838
++ .byte 0x15
++ .byte 0x6b
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3ebd
++ .uleb128 0x11
++ .long 0x3fc6
++ .byte 0x1
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3fa2
++ .uleb128 0x5
++ .long 0x3fdd
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x36ad
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3fcc
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x34c
++ .uleb128 0x12
++ .long 0x3ff9
++ .long 0x2f
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x28fe
++ .uleb128 0x1f
++ .long 0x404c
++ .long .LASF839
++ .value 0x510
++ .byte 0xb
++ .value 0x187
++ .uleb128 0x16
++ .long .LASF322
++ .byte 0xb
++ .value 0x188
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF840
++ .byte 0xb
++ .value 0x189
++ .long 0x404c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF841
++ .byte 0xb
++ .value 0x18a
++ .long 0x1680
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x504
++ .uleb128 0x16
++ .long .LASF842
++ .byte 0xb
++ .value 0x18b
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x508
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x405c
++ .long 0x2447
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x3f
++ .byte 0x0
++ .uleb128 0x2b
++ .long 0x407e
++ .byte 0x4
++ .byte 0xb
++ .value 0x1c7
++ .uleb128 0x1c
++ .long .LASF843
++ .byte 0xb
++ .value 0x1c8
++ .long 0x1b5
++ .uleb128 0x1c
++ .long .LASF844
++ .byte 0xb
++ .value 0x1c9
++ .long 0x1b5
++ .byte 0x0
++ .uleb128 0x1f
++ .long 0x4314
++ .long .LASF845
++ .value 0x16c
++ .byte 0xb
++ .value 0x19d
++ .uleb128 0x16
++ .long .LASF322
++ .byte 0xb
++ .value 0x19e
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF846
++ .byte 0xb
++ .value 0x19f
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF847
++ .byte 0xb
++ .value 0x1a1
++ .long 0x18ef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF848
++ .byte 0xb
++ .value 0x1a4
++ .long 0x15f9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF849
++ .byte 0xb
++ .value 0x1a7
++ .long 0x272f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF850
++ .byte 0xb
++ .value 0x1aa
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF851
++ .byte 0xb
++ .value 0x1b0
++ .long 0x15f9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF852
++ .byte 0xb
++ .value 0x1b1
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF853
++ .byte 0xb
++ .value 0x1b4
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF53
++ .byte 0xb
++ .value 0x1b5
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF854
++ .byte 0xb
++ .value 0x1b8
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF855
++ .byte 0xb
++ .value 0x1bb
++ .long 0x382e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x17
++ .string "tsk"
++ .byte 0xb
++ .value 0x1bc
++ .long 0x15f9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0x16
++ .long .LASF856
++ .byte 0xb
++ .value 0x1bd
++ .long 0x371d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0x16
++ .long .LASF222
++ .byte 0xb
++ .value 0x1c0
++ .long 0x19b4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0x16
++ .long .LASF223
++ .byte 0xb
++ .value 0x1c0
++ .long 0x19b4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0x16
++ .long .LASF857
++ .byte 0xb
++ .value 0x1c1
++ .long 0x19b4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0x16
++ .long .LASF858
++ .byte 0xb
++ .value 0x1c1
++ .long 0x19b4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0x16
++ .long .LASF859
++ .byte 0xb
++ .value 0x1c4
++ .long 0x1b5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .uleb128 0x16
++ .long .LASF860
++ .byte 0xb
++ .value 0x1c5
++ .long 0x3070
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .uleb128 0x10
++ .long 0x405c
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x88
++ .uleb128 0x16
++ .long .LASF861
++ .byte 0xb
++ .value 0x1cd
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x8c
++ .uleb128 0x17
++ .string "tty"
++ .byte 0xb
++ .value 0x1cf
++ .long 0x431a
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x90
++ .uleb128 0x16
++ .long .LASF215
++ .byte 0xb
++ .value 0x1d7
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x94
++ .uleb128 0x16
++ .long .LASF216
++ .byte 0xb
++ .value 0x1d7
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x98
++ .uleb128 0x16
++ .long .LASF862
++ .byte 0xb
++ .value 0x1d7
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x9c
++ .uleb128 0x16
++ .long .LASF863
++ .byte 0xb
++ .value 0x1d7
++ .long 0x19b4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0
++ .uleb128 0x16
++ .long .LASF217
++ .byte 0xb
++ .value 0x1d8
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa4
++ .uleb128 0x16
++ .long .LASF218
++ .byte 0xb
++ .value 0x1d8
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa8
++ .uleb128 0x16
++ .long .LASF864
++ .byte 0xb
++ .value 0x1d8
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xac
++ .uleb128 0x16
++ .long .LASF865
++ .byte 0xb
++ .value 0x1d8
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb0
++ .uleb128 0x16
++ .long .LASF220
++ .byte 0xb
++ .value 0x1d9
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb4
++ .uleb128 0x16
++ .long .LASF221
++ .byte 0xb
++ .value 0x1d9
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb8
++ .uleb128 0x16
++ .long .LASF866
++ .byte 0xb
++ .value 0x1d9
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xbc
++ .uleb128 0x16
++ .long .LASF867
++ .byte 0xb
++ .value 0x1d9
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc0
++ .uleb128 0x16
++ .long .LASF868
++ .byte 0xb
++ .value 0x1da
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc4
++ .uleb128 0x16
++ .long .LASF869
++ .byte 0xb
++ .value 0x1da
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc8
++ .uleb128 0x16
++ .long .LASF870
++ .byte 0xb
++ .value 0x1da
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xcc
++ .uleb128 0x16
++ .long .LASF871
++ .byte 0xb
++ .value 0x1da
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd0
++ .uleb128 0x16
++ .long .LASF186
++ .byte 0xb
++ .value 0x1e2
++ .long 0x162
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd4
++ .uleb128 0x16
++ .long .LASF872
++ .byte 0xb
++ .value 0x1ed
++ .long 0x4320
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xdc
++ .uleb128 0x16
++ .long .LASF225
++ .byte 0xb
++ .value 0x1ef
++ .long 0x4330
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x154
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF873
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4314
++ .uleb128 0x12
++ .long 0x4330
++ .long 0x36dc
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xe
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x4340
++ .long 0x17bc
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x439b
++ .long .LASF232
++ .byte 0x8c
++ .byte 0xb
++ .value 0x302
++ .uleb128 0x16
++ .long .LASF874
++ .byte 0xb
++ .value 0x303
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF171
++ .byte 0xb
++ .value 0x304
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF875
++ .byte 0xb
++ .value 0x305
++ .long 0x439b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF876
++ .byte 0xb
++ .value 0x306
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x88
++ .uleb128 0x16
++ .long .LASF877
++ .byte 0xb
++ .value 0x307
++ .long 0x43ab
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x8c
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x43ab
++ .long 0x1e8
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1f
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x43ba
++ .long 0x43ba
++ .uleb128 0x23
++ .long 0x28
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1e8
++ .uleb128 0x2c
++ .long 0x43e6
++ .long .LASF187
++ .byte 0x4
++ .byte 0xb
++ .value 0x32c
++ .uleb128 0x28
++ .long .LASF878
++ .sleb128 0
++ .uleb128 0x28
++ .long .LASF879
++ .sleb128 1
++ .uleb128 0x28
++ .long .LASF880
++ .sleb128 2
++ .uleb128 0x28
++ .long .LASF881
++ .sleb128 3
++ .byte 0x0
++ .uleb128 0x2d
++ .long 0x5a
++ .uleb128 0x21
++ .long .LASF882
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x43eb
++ .uleb128 0x21
++ .long .LASF883
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x43f7
++ .uleb128 0x12
++ .long 0x4413
++ .long 0x3047
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x2
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x21
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4340
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x2758
++ .uleb128 0x21
++ .long .LASF884
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4425
++ .uleb128 0x21
++ .long .LASF246
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4431
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x407e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3fff
++ .uleb128 0x11
++ .long 0x4459
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4449
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x23bf
++ .uleb128 0x21
++ .long .LASF259
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4465
++ .uleb128 0x21
++ .long .LASF885
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4471
++ .uleb128 0x2e
++ .string "bio"
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x447d
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4483
++ .uleb128 0x15
++ .long 0x44ab
++ .long .LASF270
++ .byte 0x4
++ .byte 0xb
++ .value 0x245
++ .uleb128 0xa
++ .long .LASF886
++ .byte 0x51
++ .byte 0x56
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x448f
++ .uleb128 0x15
++ .long 0x4521
++ .long .LASF271
++ .byte 0x1c
++ .byte 0xb
++ .value 0x244
++ .uleb128 0xa
++ .long .LASF887
++ .byte 0x3e
++ .byte 0x1c
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF169
++ .byte 0x3e
++ .byte 0x1d
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF888
++ .byte 0x3e
++ .byte 0x1e
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF889
++ .byte 0x3e
++ .byte 0x1f
++ .long 0x8445
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF890
++ .byte 0x3e
++ .byte 0x20
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF891
++ .byte 0x3e
++ .byte 0x21
++ .long 0x845c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF892
++ .byte 0x3e
++ .byte 0x22
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x44b1
++ .uleb128 0x21
++ .long .LASF272
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4527
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x267b
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x30b9
++ .uleb128 0x21
++ .long .LASF893
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x453f
++ .uleb128 0x21
++ .long .LASF894
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x454b
++ .uleb128 0xf
++ .long 0x45d4
++ .long .LASF895
++ .byte 0x40
++ .byte 0x66
++ .byte 0x11
++ .uleb128 0xa
++ .long .LASF237
++ .byte 0x66
++ .byte 0x12
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF896
++ .byte 0x66
++ .byte 0x13
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF897
++ .byte 0x66
++ .byte 0x14
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF898
++ .byte 0x66
++ .byte 0x15
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xb
++ .string "irq"
++ .byte 0x66
++ .byte 0x16
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF899
++ .byte 0x66
++ .byte 0x17
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF900
++ .byte 0x66
++ .byte 0x18
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF901
++ .byte 0x66
++ .byte 0x19
++ .long 0x19bf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x45fe
++ .long .LASF902
++ .value 0x3c0
++ .byte 0x66
++ .byte 0x1c
++ .uleb128 0xa
++ .long .LASF903
++ .byte 0x66
++ .byte 0x1d
++ .long 0x4557
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF904
++ .byte 0x66
++ .byte 0x1e
++ .long 0x45fe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x460e
++ .long 0x77
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xdf
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x46d1
++ .long .LASF905
++ .byte 0x4c
++ .byte 0x41
++ .byte 0x3e
++ .uleb128 0xb
++ .string "ino"
++ .byte 0x41
++ .byte 0x3f
++ .long 0x189
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "dev"
++ .byte 0x41
++ .byte 0x40
++ .long 0x19f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF626
++ .byte 0x41
++ .byte 0x41
++ .long 0xea
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF906
++ .byte 0x41
++ .byte 0x42
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xb
++ .string "uid"
++ .byte 0x41
++ .byte 0x43
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xb
++ .string "gid"
++ .byte 0x41
++ .byte 0x44
++ .long 0x1e8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF907
++ .byte 0x41
++ .byte 0x45
++ .long 0x19f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF328
++ .byte 0x41
++ .byte 0x46
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF908
++ .byte 0x41
++ .byte 0x47
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF909
++ .byte 0x41
++ .byte 0x48
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF910
++ .byte 0x41
++ .byte 0x49
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF911
++ .byte 0x41
++ .byte 0x4a
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF877
++ .byte 0x41
++ .byte 0x4b
++ .long 0x162
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x46e1
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1f
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x46fd
++ .long .LASF912
++ .value 0x1000
++ .byte 0x67
++ .byte 0x16
++ .uleb128 0xb
++ .string "gdt"
++ .byte 0x67
++ .byte 0x17
++ .long 0x46fd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x470d
++ .long 0x942
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1f
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF913
++ .byte 0x32
++ .byte 0x13
++ .long 0x141
++ .uleb128 0x7
++ .long .LASF914
++ .byte 0x32
++ .byte 0x14
++ .long 0x12b
++ .uleb128 0x7
++ .long .LASF915
++ .byte 0x32
++ .byte 0x17
++ .long 0x141
++ .uleb128 0xf
++ .long 0x478f
++ .long .LASF916
++ .byte 0x10
++ .byte 0x32
++ .byte 0xab
++ .uleb128 0xa
++ .long .LASF917
++ .byte 0x32
++ .byte 0xac
++ .long 0x4723
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF918
++ .byte 0x32
++ .byte 0xad
++ .long 0x470d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF919
++ .byte 0x32
++ .byte 0xae
++ .long 0x4723
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF920
++ .byte 0x32
++ .byte 0xaf
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF921
++ .byte 0x32
++ .byte 0xb0
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xd
++ .uleb128 0xa
++ .long .LASF922
++ .byte 0x32
++ .byte 0xb1
++ .long 0x4718
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xe
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF923
++ .byte 0x32
++ .byte 0xb2
++ .long 0x472e
++ .uleb128 0xf
++ .long 0x47d1
++ .long .LASF924
++ .byte 0xc
++ .byte 0x26
++ .byte 0x17
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x26
++ .byte 0x18
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF594
++ .byte 0x26
++ .byte 0x19
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF626
++ .byte 0x26
++ .byte 0x1a
++ .long 0x1aa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x4af9
++ .long .LASF925
++ .value 0x1200
++ .byte 0x28
++ .byte 0x14
++ .uleb128 0xa
++ .long .LASF169
++ .byte 0x29
++ .byte 0xfb
++ .long 0x4f37
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF509
++ .byte 0x29
++ .byte 0xfe
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF414
++ .byte 0x29
++ .value 0x101
++ .long 0x4dea
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF926
++ .byte 0x29
++ .value 0x104
++ .long 0x4ee5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF927
++ .byte 0x29
++ .value 0x105
++ .long 0x4fd9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x90
++ .uleb128 0x16
++ .long .LASF928
++ .byte 0x29
++ .value 0x106
++ .long 0x4e75
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x94
++ .uleb128 0x16
++ .long .LASF929
++ .byte 0x29
++ .value 0x107
++ .long 0x7f2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x98
++ .uleb128 0x16
++ .long .LASF930
++ .byte 0x29
++ .value 0x108
++ .long 0x7f2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x9c
++ .uleb128 0x16
++ .long .LASF931
++ .byte 0x29
++ .value 0x109
++ .long 0x4b34
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0
++ .uleb128 0x16
++ .long .LASF932
++ .byte 0x29
++ .value 0x10c
++ .long 0x4fdf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa4
++ .uleb128 0x16
++ .long .LASF933
++ .byte 0x29
++ .value 0x10d
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa8
++ .uleb128 0x16
++ .long .LASF934
++ .byte 0x29
++ .value 0x10e
++ .long 0x4fea
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xac
++ .uleb128 0x16
++ .long .LASF935
++ .byte 0x29
++ .value 0x111
++ .long 0x4fdf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb0
++ .uleb128 0x16
++ .long .LASF936
++ .byte 0x29
++ .value 0x112
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb4
++ .uleb128 0x16
++ .long .LASF937
++ .byte 0x29
++ .value 0x113
++ .long 0x4fea
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb8
++ .uleb128 0x16
++ .long .LASF938
++ .byte 0x29
++ .value 0x116
++ .long 0x4fdf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xbc
++ .uleb128 0x16
++ .long .LASF939
++ .byte 0x29
++ .value 0x117
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc0
++ .uleb128 0x16
++ .long .LASF940
++ .byte 0x29
++ .value 0x118
++ .long 0x4fea
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc4
++ .uleb128 0x16
++ .long .LASF941
++ .byte 0x29
++ .value 0x11a
++ .long 0x4fdf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc8
++ .uleb128 0x16
++ .long .LASF942
++ .byte 0x29
++ .value 0x11b
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xcc
++ .uleb128 0x16
++ .long .LASF943
++ .byte 0x29
++ .value 0x11c
++ .long 0x4fea
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd0
++ .uleb128 0x16
++ .long .LASF944
++ .byte 0x29
++ .value 0x11f
++ .long 0x4fdf
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd4
++ .uleb128 0x16
++ .long .LASF945
++ .byte 0x29
++ .value 0x120
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xd8
++ .uleb128 0x16
++ .long .LASF946
++ .byte 0x29
++ .value 0x121
++ .long 0x4fea
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xdc
++ .uleb128 0x16
++ .long .LASF947
++ .byte 0x29
++ .value 0x124
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe0
++ .uleb128 0x16
++ .long .LASF948
++ .byte 0x29
++ .value 0x125
++ .long 0x501e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe4
++ .uleb128 0x16
++ .long .LASF949
++ .byte 0x29
++ .value 0x128
++ .long 0x92e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe8
++ .uleb128 0x16
++ .long .LASF950
++ .byte 0x29
++ .value 0x12b
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xec
++ .uleb128 0x16
++ .long .LASF951
++ .byte 0x29
++ .value 0x12e
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xf0
++ .uleb128 0x16
++ .long .LASF952
++ .byte 0x29
++ .value 0x131
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xf4
++ .uleb128 0x16
++ .long .LASF953
++ .byte 0x29
++ .value 0x131
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xf8
++ .uleb128 0x16
++ .long .LASF954
++ .byte 0x29
++ .value 0x134
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xfc
++ .uleb128 0x16
++ .long .LASF955
++ .byte 0x29
++ .value 0x134
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x100
++ .uleb128 0x16
++ .long .LASF956
++ .byte 0x29
++ .value 0x137
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x104
++ .uleb128 0x16
++ .long .LASF957
++ .byte 0x29
++ .value 0x13a
++ .long 0x4db9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x108
++ .uleb128 0x16
++ .long .LASF958
++ .byte 0x29
++ .value 0x13d
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x108
++ .uleb128 0x16
++ .long .LASF959
++ .byte 0x29
++ .value 0x13f
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x10c
++ .uleb128 0x16
++ .long .LASF960
++ .byte 0x29
++ .value 0x143
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x110
++ .uleb128 0x16
++ .long .LASF961
++ .byte 0x29
++ .value 0x144
++ .long 0x5029
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x118
++ .uleb128 0x16
++ .long .LASF962
++ .byte 0x29
++ .value 0x145
++ .long 0x77
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11c
++ .uleb128 0x17
++ .string "ref"
++ .byte 0x29
++ .value 0x14a
++ .long 0x502f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x180
++ .uleb128 0x16
++ .long .LASF963
++ .byte 0x29
++ .value 0x14d
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1180
++ .uleb128 0x16
++ .long .LASF964
++ .byte 0x29
++ .value 0x150
++ .long 0x15f9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1188
++ .uleb128 0x16
++ .long .LASF965
++ .byte 0x29
++ .value 0x153
++ .long 0x93a
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x118c
++ .uleb128 0x16
++ .long .LASF966
++ .byte 0x29
++ .value 0x158
++ .long 0x503f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1190
++ .uleb128 0x16
++ .long .LASF967
++ .byte 0x29
++ .value 0x159
++ .long 0x2f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1194
++ .uleb128 0x16
++ .long .LASF968
++ .byte 0x29
++ .value 0x15a
++ .long 0xb5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x1198
++ .uleb128 0x16
++ .long .LASF969
++ .byte 0x29
++ .value 0x15d
++ .long 0x5045
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x119c
++ .uleb128 0x16
++ .long .LASF970
++ .byte 0x29
++ .value 0x161
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11a0
++ .uleb128 0x16
++ .long .LASF971
++ .byte 0x29
++ .value 0x165
++ .long 0xb5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x11a4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x47d1
++ .uleb128 0xf
++ .long 0x4b28
++ .long .LASF972
++ .byte 0x8
++ .byte 0x26
++ .byte 0x1d
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x26
++ .byte 0x1e
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF973
++ .byte 0x26
++ .byte 0x1f
++ .long 0x4b28
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4b2e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x479a
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4b3a
++ .uleb128 0xf
++ .long 0x4bc5
++ .long .LASF974
++ .byte 0x40
++ .byte 0x26
++ .byte 0x12
++ .uleb128 0xa
++ .long .LASF975
++ .byte 0x2a
++ .byte 0x33
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x2a
++ .byte 0x34
++ .long 0x4c33
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF449
++ .byte 0x2a
++ .byte 0x35
++ .long 0x22e4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF376
++ .byte 0x2a
++ .byte 0x36
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF205
++ .byte 0x2a
++ .byte 0x37
++ .long 0x4b34
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF976
++ .byte 0x2a
++ .byte 0x38
++ .long 0x4c96
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF977
++ .byte 0x2a
++ .byte 0x39
++ .long 0x4cd3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xa
++ .long .LASF517
++ .byte 0x2a
++ .byte 0x3a
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF978
++ .byte 0x2a
++ .byte 0x3b
++ .long 0x18ef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x4bee
++ .long .LASF979
++ .byte 0x8
++ .byte 0x26
++ .byte 0x44
++ .uleb128 0xa
++ .long .LASF980
++ .byte 0x26
++ .byte 0x45
++ .long 0x4c08
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF981
++ .byte 0x26
++ .byte 0x46
++ .long 0x4c2d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x4c08
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x4b34
++ .uleb128 0x6
++ .long 0x4b2e
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4bee
++ .uleb128 0x11
++ .long 0x4c2d
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x4b34
++ .uleb128 0x6
++ .long 0x4b2e
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4c0e
++ .uleb128 0x12
++ .long 0x4c43
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x13
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x4c96
++ .long .LASF976
++ .byte 0x54
++ .byte 0x2a
++ .byte 0x38
++ .uleb128 0xa
++ .long .LASF977
++ .byte 0x2a
++ .byte 0x7f
++ .long 0x4cd3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF509
++ .byte 0x2a
++ .byte 0x80
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF982
++ .byte 0x2a
++ .byte 0x81
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF382
++ .byte 0x2a
++ .byte 0x82
++ .long 0x4b3a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF983
++ .byte 0x2a
++ .byte 0x83
++ .long 0x4d93
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4c43
++ .uleb128 0xf
++ .long 0x4cd3
++ .long .LASF984
++ .byte 0xc
++ .byte 0x2a
++ .byte 0x39
++ .uleb128 0xa
++ .long .LASF404
++ .byte 0x2a
++ .byte 0x60
++ .long 0x4ce5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF979
++ .byte 0x2a
++ .byte 0x61
++ .long 0x4ceb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF985
++ .byte 0x2a
++ .byte 0x62
++ .long 0x4b28
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4c9c
++ .uleb128 0x5
++ .long 0x4ce5
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x4b34
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4cd9
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4bc5
++ .uleb128 0xf
++ .long 0x4d28
++ .long .LASF986
++ .byte 0xc
++ .byte 0x2a
++ .byte 0x77
++ .uleb128 0xa
++ .long .LASF987
++ .byte 0x2a
++ .byte 0x78
++ .long 0x4d3d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x2a
++ .byte 0x79
++ .long 0x4d58
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF988
++ .byte 0x2a
++ .byte 0x7b
++ .long 0x4d8d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x4d3d
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x4c96
++ .uleb128 0x6
++ .long 0x4b34
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4d28
++ .uleb128 0x11
++ .long 0x4d58
++ .byte 0x1
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x4c96
++ .uleb128 0x6
++ .long 0x4b34
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4d43
++ .uleb128 0x11
++ .long 0x4d87
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x4c96
++ .uleb128 0x6
++ .long 0x4b34
++ .uleb128 0x6
++ .long 0x4d87
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0xb5
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4d5e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4cf1
++ .uleb128 0x9
++ .long 0x4dae
++ .byte 0x4
++ .byte 0x30
++ .byte 0x9
++ .uleb128 0xb
++ .string "a"
++ .byte 0x30
++ .byte 0xa
++ .long 0x16cf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF989
++ .byte 0x30
++ .byte 0xb
++ .long 0x4d99
++ .uleb128 0x2a
++ .long .LASF990
++ .byte 0x0
++ .byte 0x2e
++ .byte 0x6
++ .uleb128 0xf
++ .long 0x4dea
++ .long .LASF991
++ .byte 0x8
++ .byte 0x29
++ .byte 0x22
++ .uleb128 0xa
++ .long .LASF992
++ .byte 0x29
++ .byte 0x23
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x29
++ .byte 0x24
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x4dfa
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x3b
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x4e5b
++ .long .LASF993
++ .byte 0x20
++ .byte 0x29
++ .byte 0x2f
++ .uleb128 0xa
++ .long .LASF994
++ .byte 0x29
++ .byte 0x30
++ .long 0x479a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF980
++ .byte 0x29
++ .byte 0x31
++ .long 0x4e7b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF981
++ .byte 0x29
++ .byte 0x33
++ .long 0x4ea0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF995
++ .byte 0x29
++ .byte 0x34
++ .long 0x4eb7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF996
++ .byte 0x29
++ .byte 0x35
++ .long 0x4ecd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF997
++ .byte 0x29
++ .byte 0x36
++ .long 0x4edf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x4e75
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x4e75
++ .uleb128 0x6
++ .long 0x4af9
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4dfa
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4e5b
++ .uleb128 0x11
++ .long 0x4ea0
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x4e75
++ .uleb128 0x6
++ .long 0x4af9
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4e81
++ .uleb128 0x5
++ .long 0x4eb7
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x4af9
++ .uleb128 0x6
++ .long 0x7f2
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4ea6
++ .uleb128 0x11
++ .long 0x4ecd
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x4af9
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4ebd
++ .uleb128 0x5
++ .long 0x4edf
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x4af9
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4ed3
++ .uleb128 0xf
++ .long 0x4f1c
++ .long .LASF998
++ .byte 0x48
++ .byte 0x29
++ .byte 0x3a
++ .uleb128 0xa
++ .long .LASF382
++ .byte 0x29
++ .byte 0x3b
++ .long 0x4b3a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "mod"
++ .byte 0x29
++ .byte 0x3c
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF999
++ .byte 0x29
++ .byte 0x3d
++ .long 0x4b34
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x4f37
++ .long .LASF1000
++ .byte 0x80
++ .byte 0x29
++ .byte 0xdd
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x29
++ .byte 0xde
++ .long 0x4dae
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x27
++ .long 0x4f56
++ .long .LASF1001
++ .byte 0x4
++ .byte 0x29
++ .byte 0xe2
++ .uleb128 0x28
++ .long .LASF1002
++ .sleb128 0
++ .uleb128 0x28
++ .long .LASF1003
++ .sleb128 1
++ .uleb128 0x28
++ .long .LASF1004
++ .sleb128 2
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x4f8d
++ .long .LASF1005
++ .byte 0x28
++ .byte 0x29
++ .byte 0xea
++ .uleb128 0xa
++ .long .LASF1006
++ .byte 0x29
++ .byte 0xeb
++ .long 0x4dfa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x29
++ .byte 0xec
++ .long 0xb5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF1007
++ .byte 0x29
++ .byte 0xed
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x4fc4
++ .long .LASF1008
++ .byte 0xc
++ .byte 0x29
++ .byte 0xf1
++ .uleb128 0xb
++ .string "grp"
++ .byte 0x29
++ .byte 0xf2
++ .long 0x4aff
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1009
++ .byte 0x29
++ .byte 0xf3
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF973
++ .byte 0x29
++ .byte 0xf4
++ .long 0x4fc4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x4fd3
++ .long 0x4f56
++ .uleb128 0x23
++ .long 0x28
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF1010
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4fd3
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4fe5
++ .uleb128 0x14
++ .long 0x4dc1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4ff0
++ .uleb128 0x14
++ .long 0x2f
++ .uleb128 0xf
++ .long 0x501e
++ .long .LASF1011
++ .byte 0x8
++ .byte 0x29
++ .byte 0x45
++ .uleb128 0xa
++ .long .LASF1012
++ .byte 0x2d
++ .byte 0x64
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1013
++ .byte 0x2d
++ .byte 0x64
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5024
++ .uleb128 0x14
++ .long 0x4ff5
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7ad
++ .uleb128 0x12
++ .long 0x503f
++ .long 0x4f1c
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x478f
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4f8d
++ .uleb128 0x7
++ .long .LASF1014
++ .byte 0x6b
++ .byte 0x13
++ .long 0x21
++ .uleb128 0x7
++ .long .LASF1015
++ .byte 0x6a
++ .byte 0x1d
++ .long 0x5061
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5067
++ .uleb128 0x5
++ .long 0x5078
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x5078
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x507e
++ .uleb128 0xf
++ .long 0x5179
++ .long .LASF1016
++ .byte 0x80
++ .byte 0x6a
++ .byte 0x1b
++ .uleb128 0xa
++ .long .LASF1017
++ .byte 0x6a
++ .byte 0x98
++ .long 0x5056
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1018
++ .byte 0x6a
++ .byte 0x99
++ .long 0x52ae
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1019
++ .byte 0x6a
++ .byte 0x9a
++ .long 0x52ba
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1020
++ .byte 0x6a
++ .byte 0x9b
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1021
++ .byte 0x6a
++ .byte 0x9c
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF840
++ .byte 0x6a
++ .byte 0x9d
++ .long 0x533d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF126
++ .byte 0x6a
++ .byte 0x9e
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1022
++ .byte 0x6a
++ .byte 0xa0
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF1023
++ .byte 0x6a
++ .byte 0xa1
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF1024
++ .byte 0x6a
++ .byte 0xa2
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xa
++ .long .LASF1025
++ .byte 0x6a
++ .byte 0xa3
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF285
++ .byte 0x6a
++ .byte 0xa4
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xa
++ .long .LASF1026
++ .byte 0x6a
++ .byte 0xa6
++ .long 0x923
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xb
++ .string "cpu"
++ .byte 0x6a
++ .byte 0xa7
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF1027
++ .byte 0x6a
++ .byte 0xaa
++ .long 0x923
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xb
++ .string "dir"
++ .byte 0x6a
++ .byte 0xad
++ .long 0x5349
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x6a
++ .byte 0xaf
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x5266
++ .long .LASF1028
++ .byte 0x40
++ .byte 0x6a
++ .byte 0x62
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x6a
++ .byte 0x63
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1029
++ .byte 0x6a
++ .byte 0x64
++ .long 0x5276
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1030
++ .byte 0x6a
++ .byte 0x65
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1031
++ .byte 0x6a
++ .byte 0x66
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1032
++ .byte 0x6a
++ .byte 0x67
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xb
++ .string "ack"
++ .byte 0x6a
++ .byte 0x69
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF364
++ .byte 0x6a
++ .byte 0x6a
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1033
++ .byte 0x6a
++ .byte 0x6b
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF1034
++ .byte 0x6a
++ .byte 0x6c
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xb
++ .string "eoi"
++ .byte 0x6a
++ .byte 0x6d
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0xb
++ .string "end"
++ .byte 0x6a
++ .byte 0x6f
++ .long 0x1efe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF1035
++ .byte 0x6a
++ .byte 0x70
++ .long 0x528d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0xa
++ .long .LASF1036
++ .byte 0x6a
++ .byte 0x71
++ .long 0x1f14
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF1037
++ .byte 0x6a
++ .byte 0x72
++ .long 0x52a8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF1038
++ .byte 0x6a
++ .byte 0x73
++ .long 0x52a8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF1039
++ .byte 0x6a
++ .byte 0x7d
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x5276
++ .byte 0x1
++ .long 0x77
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5266
++ .uleb128 0x5
++ .long 0x528d
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x923
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x527c
++ .uleb128 0x11
++ .long 0x52a8
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5293
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5179
++ .uleb128 0x21
++ .long .LASF1019
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x52b4
++ .uleb128 0xf
++ .long 0x533d
++ .long .LASF1040
++ .byte 0x20
++ .byte 0x6a
++ .byte 0x9d
++ .uleb128 0xa
++ .long .LASF1041
++ .byte 0x56
++ .byte 0x55
++ .long 0x5c83
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x56
++ .byte 0x56
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF364
++ .byte 0x56
++ .byte 0x57
++ .long 0x923
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x56
++ .byte 0x58
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1042
++ .byte 0x56
++ .byte 0x59
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF307
++ .byte 0x56
++ .byte 0x5a
++ .long 0x533d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xb
++ .string "irq"
++ .byte 0x56
++ .byte 0x5b
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xb
++ .string "dir"
++ .byte 0x56
++ .byte 0x5c
++ .long 0x5349
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x52c0
++ .uleb128 0x21
++ .long .LASF1043
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5343
++ .uleb128 0xf
++ .long 0x53be
++ .long .LASF1044
++ .byte 0x24
++ .byte 0x6d
++ .byte 0x11
++ .uleb128 0xa
++ .long .LASF1045
++ .byte 0x6d
++ .byte 0x12
++ .long 0x24b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "end"
++ .byte 0x6d
++ .byte 0x13
++ .long 0x24b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x6d
++ .byte 0x14
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x6d
++ .byte 0x15
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF205
++ .byte 0x6d
++ .byte 0x16
++ .long 0x53be
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF207
++ .byte 0x6d
++ .byte 0x16
++ .long 0x53be
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF1046
++ .byte 0x6d
++ .byte 0x16
++ .long 0x53be
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x534f
++ .uleb128 0xf
++ .long 0x5409
++ .long .LASF1047
++ .byte 0x14
++ .byte 0x5e
++ .byte 0x15
++ .uleb128 0xa
++ .long .LASF1048
++ .byte 0x5e
++ .byte 0x16
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1049
++ .byte 0x5e
++ .byte 0x17
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xb
++ .string "get"
++ .byte 0x5e
++ .byte 0x18
++ .long 0x5460
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xb
++ .string "put"
++ .byte 0x5e
++ .byte 0x19
++ .long 0x5460
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x5
++ .long 0x5415
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x5415
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x541b
++ .uleb128 0xf
++ .long 0x5460
++ .long .LASF1050
++ .byte 0x20
++ .byte 0x5e
++ .byte 0x14
++ .uleb128 0xa
++ .long .LASF1051
++ .byte 0x5e
++ .byte 0x21
++ .long 0x5466
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1052
++ .byte 0x5e
++ .byte 0x22
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1053
++ .byte 0x5e
++ .byte 0x23
++ .long 0x22e4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1054
++ .byte 0x5e
++ .byte 0x24
++ .long 0x28fe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5409
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x53c4
++ .uleb128 0xf
++ .long 0x5487
++ .long .LASF1055
++ .byte 0x4
++ .byte 0x5f
++ .byte 0x9
++ .uleb128 0xa
++ .long .LASF1056
++ .byte 0x5f
++ .byte 0xb
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x54be
++ .long .LASF1057
++ .byte 0x14
++ .byte 0x5d
++ .byte 0x27
++ .uleb128 0xa
++ .long .LASF994
++ .byte 0x5d
++ .byte 0x28
++ .long 0x479a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF980
++ .byte 0x5d
++ .byte 0x29
++ .long 0x563f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF981
++ .byte 0x5d
++ .byte 0x2a
++ .long 0x565f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x54d3
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x54d3
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x54d9
++ .uleb128 0x1a
++ .long 0x563f
++ .long .LASF1058
++ .value 0x19c
++ .byte 0x5d
++ .byte 0x25
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x5d
++ .byte 0x35
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF594
++ .byte 0x5d
++ .byte 0x36
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1059
++ .byte 0x5d
++ .byte 0x38
++ .long 0x4c43
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1060
++ .byte 0x5d
++ .byte 0x39
++ .long 0x4c43
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0xa
++ .long .LASF1061
++ .byte 0x5d
++ .byte 0x3a
++ .long 0x4c43
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xb0
++ .uleb128 0xa
++ .long .LASF1062
++ .byte 0x5d
++ .byte 0x3b
++ .long 0x53c4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x104
++ .uleb128 0xa
++ .long .LASF1063
++ .byte 0x5d
++ .byte 0x3c
++ .long 0x53c4
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x118
++ .uleb128 0xa
++ .long .LASF1064
++ .byte 0x5d
++ .byte 0x3e
++ .long 0x2e3c
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x12c
++ .uleb128 0xa
++ .long .LASF1065
++ .byte 0x5d
++ .byte 0x40
++ .long 0x5665
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x140
++ .uleb128 0xa
++ .long .LASF1066
++ .byte 0x5d
++ .byte 0x41
++ .long 0x56a5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x144
++ .uleb128 0xa
++ .long .LASF1067
++ .byte 0x5d
++ .byte 0x42
++ .long 0x56e2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x148
++ .uleb128 0xa
++ .long .LASF1068
++ .byte 0x5d
++ .byte 0x43
++ .long 0x5487
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x14c
++ .uleb128 0xa
++ .long .LASF1069
++ .byte 0x5d
++ .byte 0x44
++ .long 0x5487
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x160
++ .uleb128 0xa
++ .long .LASF1070
++ .byte 0x5d
++ .byte 0x46
++ .long 0x57ce
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x174
++ .uleb128 0xa
++ .long .LASF988
++ .byte 0x5d
++ .byte 0x48
++ .long 0x57f8
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x178
++ .uleb128 0xa
++ .long .LASF415
++ .byte 0x5d
++ .byte 0x49
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x17c
++ .uleb128 0xa
++ .long .LASF1071
++ .byte 0x5d
++ .byte 0x4a
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x180
++ .uleb128 0xa
++ .long .LASF1030
++ .byte 0x5d
++ .byte 0x4b
++ .long 0x5820
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x184
++ .uleb128 0xa
++ .long .LASF1072
++ .byte 0x5d
++ .byte 0x4d
++ .long 0x583b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x188
++ .uleb128 0xa
++ .long .LASF1073
++ .byte 0x5d
++ .byte 0x4e
++ .long 0x583b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x18c
++ .uleb128 0xa
++ .long .LASF1074
++ .byte 0x5d
++ .byte 0x4f
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x190
++ .uleb128 0xa
++ .long .LASF1075
++ .byte 0x5d
++ .byte 0x50
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x194
++ .uleb128 0x2f
++ .long .LASF1076
++ .byte 0x5d
++ .byte 0x52
++ .long 0x77
++ .byte 0x4
++ .byte 0x1
++ .byte 0x1f
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x198
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x54be
++ .uleb128 0x11
++ .long 0x565f
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x54d3
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5645
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5487
++ .uleb128 0xf
++ .long 0x56a5
++ .long .LASF1077
++ .byte 0x14
++ .byte 0x5d
++ .byte 0x41
++ .uleb128 0x16
++ .long .LASF994
++ .byte 0x5d
++ .value 0x160
++ .long 0x479a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF980
++ .byte 0x5d
++ .value 0x162
++ .long 0x5c40
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF981
++ .byte 0x5d
++ .value 0x164
++ .long 0x5c65
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x566b
++ .uleb128 0xf
++ .long 0x56e2
++ .long .LASF1078
++ .byte 0x14
++ .byte 0x5d
++ .byte 0x42
++ .uleb128 0xa
++ .long .LASF994
++ .byte 0x5d
++ .byte 0x9b
++ .long 0x479a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF980
++ .byte 0x5d
++ .byte 0x9c
++ .long 0x585c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF981
++ .byte 0x5d
++ .byte 0x9d
++ .long 0x587c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x56ab
++ .uleb128 0x11
++ .long 0x56fd
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x1e7d
++ .uleb128 0x6
++ .long 0x56fd
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5703
++ .uleb128 0x15
++ .long 0x57ce
++ .long .LASF1079
++ .byte 0x9c
++ .byte 0x29
++ .value 0x23e
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x5d
++ .byte 0x7d
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "bus"
++ .byte 0x5d
++ .byte 0x7e
++ .long 0x54d3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF382
++ .byte 0x5d
++ .byte 0x80
++ .long 0x4b3a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1062
++ .byte 0x5d
++ .byte 0x81
++ .long 0x53c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF381
++ .byte 0x5d
++ .byte 0x82
++ .long 0x541b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0xa
++ .long .LASF594
++ .byte 0x5d
++ .byte 0x84
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0xa
++ .long .LASF1080
++ .byte 0x5d
++ .byte 0x85
++ .long 0x7f2
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .uleb128 0xa
++ .long .LASF926
++ .byte 0x5d
++ .byte 0x86
++ .long 0x5841
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .uleb128 0xa
++ .long .LASF415
++ .byte 0x5d
++ .byte 0x88
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x88
++ .uleb128 0xa
++ .long .LASF1071
++ .byte 0x5d
++ .byte 0x89
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x8c
++ .uleb128 0xa
++ .long .LASF1030
++ .byte 0x5d
++ .byte 0x8a
++ .long 0x5820
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x90
++ .uleb128 0xa
++ .long .LASF1072
++ .byte 0x5d
++ .byte 0x8b
++ .long 0x583b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x94
++ .uleb128 0xa
++ .long .LASF1075
++ .byte 0x5d
++ .byte 0x8c
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x98
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x56e8
++ .uleb128 0x11
++ .long 0x57f8
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x1e7d
++ .uleb128 0x6
++ .long 0x4d87
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x57d4
++ .uleb128 0x11
++ .long 0x580e
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x1e7d
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x57fe
++ .uleb128 0x5
++ .long 0x5820
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x1e7d
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5814
++ .uleb128 0x11
++ .long 0x583b
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x1e7d
++ .uleb128 0x6
++ .long 0x1c07
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5826
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4ee5
++ .uleb128 0x11
++ .long 0x585c
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x56fd
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5847
++ .uleb128 0x11
++ .long 0x587c
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x56fd
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5862
++ .uleb128 0x1a
++ .long 0x5997
++ .long .LASF401
++ .value 0x104
++ .byte 0x5d
++ .byte 0x23
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x5d
++ .byte 0xb2
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF594
++ .byte 0x5d
++ .byte 0xb3
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1059
++ .byte 0x5d
++ .byte 0xb5
++ .long 0x4c43
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF206
++ .byte 0x5d
++ .byte 0xb6
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0xa
++ .long .LASF1061
++ .byte 0x5d
++ .byte 0xb7
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0xa
++ .long .LASF1081
++ .byte 0x5d
++ .byte 0xb8
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0xa
++ .long .LASF1082
++ .byte 0x5d
++ .byte 0xb9
++ .long 0x4c43
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0xb
++ .string "sem"
++ .byte 0x5d
++ .byte 0xba
++ .long 0x1931
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xc8
++ .uleb128 0xa
++ .long .LASF1083
++ .byte 0x5d
++ .byte 0xbc
++ .long 0x59ce
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xdc
++ .uleb128 0xa
++ .long .LASF1084
++ .byte 0x5d
++ .byte 0xbd
++ .long 0x5a0b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe0
++ .uleb128 0xa
++ .long .LASF1066
++ .byte 0x5d
++ .byte 0xbe
++ .long 0x56a5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe4
++ .uleb128 0xa
++ .long .LASF988
++ .byte 0x5d
++ .byte 0xc1
++ .long 0x5b0c
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xe8
++ .uleb128 0xa
++ .long .LASF1085
++ .byte 0x5d
++ .byte 0xc3
++ .long 0x57f8
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xec
++ .uleb128 0xa
++ .long .LASF404
++ .byte 0x5d
++ .byte 0xc5
++ .long 0x5b1e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xf0
++ .uleb128 0xa
++ .long .LASF1086
++ .byte 0x5d
++ .byte 0xc6
++ .long 0x5b36
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xf4
++ .uleb128 0xa
++ .long .LASF1087
++ .byte 0x5d
++ .byte 0xc7
++ .long 0x5820
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xf8
++ .uleb128 0xa
++ .long .LASF1072
++ .byte 0x5d
++ .byte 0xc9
++ .long 0x583b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xfc
++ .uleb128 0xa
++ .long .LASF1075
++ .byte 0x5d
++ .byte 0xca
++ .long 0x580e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x100
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x59ce
++ .long .LASF1088
++ .byte 0x14
++ .byte 0x5d
++ .byte 0xbc
++ .uleb128 0xa
++ .long .LASF994
++ .byte 0x5d
++ .byte 0xd2
++ .long 0x479a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF980
++ .byte 0x5d
++ .byte 0xd3
++ .long 0x5b51
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF981
++ .byte 0x5d
++ .byte 0xd4
++ .long 0x5b71
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5997
++ .uleb128 0xf
++ .long 0x5a0b
++ .long .LASF1089
++ .byte 0x14
++ .byte 0x5d
++ .byte 0xbd
++ .uleb128 0xa
++ .long .LASF994
++ .byte 0x5d
++ .byte 0xdf
++ .long 0x479a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF980
++ .byte 0x5d
++ .byte 0xe0
++ .long 0x5b8c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF981
++ .byte 0x5d
++ .byte 0xe1
++ .long 0x5bac
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x59d4
++ .uleb128 0x11
++ .long 0x5a35
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x5a35
++ .uleb128 0x6
++ .long 0x4d87
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5a3b
++ .uleb128 0xf
++ .long 0x5b0c
++ .long .LASF1090
++ .byte 0x94
++ .byte 0x5d
++ .byte 0x24
++ .uleb128 0x16
++ .long .LASF400
++ .byte 0x5d
++ .value 0x105
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF382
++ .byte 0x5d
++ .value 0x107
++ .long 0x4b3a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF401
++ .byte 0x5d
++ .value 0x108
++ .long 0x5b30
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF402
++ .byte 0x5d
++ .value 0x109
++ .long 0x19f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0x16
++ .long .LASF388
++ .byte 0x5d
++ .value 0x10a
++ .long 0x5a0b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0x16
++ .long .LASF387
++ .byte 0x5d
++ .value 0x10b
++ .long 0x59d4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0x17
++ .string "dev"
++ .byte 0x5d
++ .value 0x10c
++ .long 0x1e7d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0x16
++ .long .LASF1091
++ .byte 0x5d
++ .value 0x10d
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0x16
++ .long .LASF205
++ .byte 0x5d
++ .value 0x10e
++ .long 0x5a35
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0x16
++ .long .LASF403
++ .byte 0x5d
++ .value 0x10f
++ .long 0x5bb2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0x16
++ .long .LASF404
++ .byte 0x5d
++ .value 0x111
++ .long 0x5b1e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x78
++ .uleb128 0x16
++ .long .LASF988
++ .byte 0x5d
++ .value 0x113
++ .long 0x5b0c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0x16
++ .long .LASF1092
++ .byte 0x5d
++ .value 0x114
++ .long 0x4c33
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x80
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5a11
++ .uleb128 0x5
++ .long 0x5b1e
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x5a35
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5b12
++ .uleb128 0x5
++ .long 0x5b30
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x5b30
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5882
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5b24
++ .uleb128 0x11
++ .long 0x5b51
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x5b30
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5b3c
++ .uleb128 0x11
++ .long 0x5b71
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x5b30
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5b57
++ .uleb128 0x11
++ .long 0x5b8c
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x5a35
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5b77
++ .uleb128 0x11
++ .long 0x5bac
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x5a35
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5b92
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5bb8
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x4aff
++ .uleb128 0x15
++ .long 0x5c26
++ .long .LASF1093
++ .byte 0x18
++ .byte 0x5d
++ .value 0x154
++ .uleb128 0x16
++ .long .LASF414
++ .byte 0x5d
++ .value 0x155
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF403
++ .byte 0x5d
++ .value 0x156
++ .long 0x5bb2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF988
++ .byte 0x5d
++ .value 0x158
++ .long 0x57f8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF404
++ .byte 0x5d
++ .value 0x159
++ .long 0x5820
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1072
++ .byte 0x5d
++ .value 0x15a
++ .long 0x583b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1075
++ .byte 0x5d
++ .value 0x15b
++ .long 0x580e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x5c40
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x1e7d
++ .uleb128 0x6
++ .long 0x56a5
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5c26
++ .uleb128 0x11
++ .long 0x5c65
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x1e7d
++ .uleb128 0x6
++ .long 0x56a5
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5c46
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5bbe
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x189
++ .uleb128 0x21
++ .long .LASF1094
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5c77
++ .uleb128 0x7
++ .long .LASF1095
++ .byte 0x56
++ .byte 0x52
++ .long 0x5c8e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5c94
++ .uleb128 0x11
++ .long 0x5ca9
++ .byte 0x1
++ .long 0x504b
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x5cd5
++ .long .LASF1096
++ .byte 0x8
++ .byte 0x56
++ .value 0x117
++ .uleb128 0x16
++ .long .LASF840
++ .byte 0x56
++ .value 0x118
++ .long 0x5ce7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF734
++ .byte 0x56
++ .value 0x119
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x5
++ .long 0x5ce1
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x5ce1
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5ca9
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5cd5
++ .uleb128 0xf
++ .long 0x5d24
++ .long .LASF1097
++ .byte 0xc
++ .byte 0x17
++ .byte 0xe
++ .uleb128 0xa
++ .long .LASF1098
++ .byte 0x17
++ .byte 0xf
++ .long 0x5d77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1099
++ .byte 0x17
++ .byte 0x10
++ .long 0x5d77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF205
++ .byte 0x17
++ .byte 0x11
++ .long 0x5d77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x5d77
++ .long .LASF1100
++ .byte 0x14
++ .byte 0x17
++ .byte 0xf
++ .uleb128 0xa
++ .long .LASF1098
++ .byte 0x17
++ .byte 0x15
++ .long 0x5d77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1099
++ .byte 0x17
++ .byte 0x16
++ .long 0x5d77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF205
++ .byte 0x17
++ .byte 0x17
++ .long 0x5d77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1045
++ .byte 0x17
++ .byte 0x18
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1101
++ .byte 0x17
++ .byte 0x19
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5d24
++ .uleb128 0xf
++ .long 0x5db4
++ .long .LASF1102
++ .byte 0x8
++ .byte 0x17
++ .byte 0x1c
++ .uleb128 0xa
++ .long .LASF1100
++ .byte 0x17
++ .byte 0x1d
++ .long 0x5d77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1103
++ .byte 0x17
++ .byte 0x1e
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xb
++ .string "raw"
++ .byte 0x17
++ .byte 0x1f
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x5deb
++ .long .LASF1104
++ .byte 0xc
++ .byte 0x24
++ .byte 0x21
++ .uleb128 0xa
++ .long .LASF1105
++ .byte 0x24
++ .byte 0x22
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "len"
++ .byte 0x24
++ .byte 0x23
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF414
++ .byte 0x24
++ .byte 0x24
++ .long 0x5deb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5df1
++ .uleb128 0x14
++ .long 0x112
++ .uleb128 0xc
++ .long 0x5e15
++ .byte 0x8
++ .byte 0x24
++ .byte 0x64
++ .uleb128 0xe
++ .long .LASF1106
++ .byte 0x24
++ .byte 0x65
++ .long 0x17bc
++ .uleb128 0xe
++ .long .LASF1107
++ .byte 0x24
++ .byte 0x66
++ .long 0x2ea8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x5e84
++ .long .LASF1108
++ .byte 0x1c
++ .byte 0x24
++ .byte 0x6b
++ .uleb128 0xa
++ .long .LASF1109
++ .byte 0x24
++ .byte 0x82
++ .long 0x618b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF522
++ .byte 0x24
++ .byte 0x83
++ .long 0x61ac
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1110
++ .byte 0x24
++ .byte 0x84
++ .long 0x61cc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1111
++ .byte 0x24
++ .byte 0x85
++ .long 0x61e2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1112
++ .byte 0x24
++ .byte 0x86
++ .long 0x61f4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1113
++ .byte 0x24
++ .byte 0x87
++ .long 0x620b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF1114
++ .byte 0x24
++ .byte 0x88
++ .long 0x622b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5e15
++ .uleb128 0x1a
++ .long 0x60d1
++ .long .LASF1115
++ .value 0x18c
++ .byte 0x24
++ .byte 0x6c
++ .uleb128 0x16
++ .long .LASF1116
++ .byte 0x1a
++ .value 0x38d
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1117
++ .byte 0x1a
++ .value 0x38e
++ .long 0x19f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1118
++ .byte 0x1a
++ .value 0x38f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1119
++ .byte 0x1a
++ .value 0x390
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1120
++ .byte 0x1a
++ .value 0x391
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x11
++ .uleb128 0x16
++ .long .LASF1121
++ .byte 0x1a
++ .value 0x392
++ .long 0x162
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1122
++ .byte 0x1a
++ .value 0x393
++ .long 0x7a6e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1123
++ .byte 0x1a
++ .value 0x394
++ .long 0x7b9f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1124
++ .byte 0x1a
++ .value 0x395
++ .long 0x7baa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1125
++ .byte 0x1a
++ .value 0x396
++ .long 0x7bb0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1126
++ .byte 0x1a
++ .value 0x397
++ .long 0x7c1e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF1127
++ .byte 0x1a
++ .value 0x398
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF1128
++ .byte 0x1a
++ .value 0x399
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF1129
++ .byte 0x1a
++ .value 0x39a
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF1130
++ .byte 0x1a
++ .value 0x39b
++ .long 0x18fa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF1131
++ .byte 0x1a
++ .value 0x39c
++ .long 0x2d94
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0x16
++ .long .LASF1132
++ .byte 0x1a
++ .value 0x39d
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0x16
++ .long .LASF1133
++ .byte 0x1a
++ .value 0x39e
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0x16
++ .long .LASF1134
++ .byte 0x1a
++ .value 0x39f
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .uleb128 0x16
++ .long .LASF1135
++ .byte 0x1a
++ .value 0x3a0
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0x16
++ .long .LASF1136
++ .byte 0x1a
++ .value 0x3a4
++ .long 0x7c2a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .uleb128 0x16
++ .long .LASF1137
++ .byte 0x1a
++ .value 0x3a6
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x74
++ .uleb128 0x16
++ .long .LASF1138
++ .byte 0x1a
++ .value 0x3a7
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x7c
++ .uleb128 0x16
++ .long .LASF1139
++ .byte 0x1a
++ .value 0x3a8
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x84
++ .uleb128 0x16
++ .long .LASF1140
++ .byte 0x1a
++ .value 0x3a9
++ .long 0x17eb
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x8c
++ .uleb128 0x16
++ .long .LASF1141
++ .byte 0x1a
++ .value 0x3aa
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x90
++ .uleb128 0x16
++ .long .LASF1142
++ .byte 0x1a
++ .value 0x3ac
++ .long 0x71e9
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x98
++ .uleb128 0x16
++ .long .LASF1143
++ .byte 0x1a
++ .value 0x3ad
++ .long 0x7c3c
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x9c
++ .uleb128 0x16
++ .long .LASF1144
++ .byte 0x1a
++ .value 0x3ae
++ .long 0x17bc
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa0
++ .uleb128 0x16
++ .long .LASF1145
++ .byte 0x1a
++ .value 0x3af
++ .long 0x6c96
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0xa8
++ .uleb128 0x16
++ .long .LASF1146
++ .byte 0x1a
++ .value 0x3b1
++ .long 0x21
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x13c
++ .uleb128 0x16
++ .long .LASF1147
++ .byte 0x1a
++ .value 0x3b2
++ .long 0x18ef
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x140
++ .uleb128 0x16
++ .long .LASF1148
++ .byte 0x1a
++ .value 0x3b4
++ .long 0x46d1
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x14c
++ .uleb128 0x16
++ .long .LASF1149
++ .byte 0x1a
++ .value 0x3b6
++ .long 0x160b
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x16c
++ .uleb128 0x16
++ .long .LASF1150
++ .byte 0x1a
++ .value 0x3bc
++ .long 0x2d94
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x170
++ .uleb128 0x16
++ .long .LASF1151
++ .byte 0x1a
++ .value 0x3c0
++ .long 0x173
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x184
++ .uleb128 0x16
++ .long .LASF1152
++ .byte 0x1a
++ .value 0x3c6
++ .long 0xb5
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x188
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5e8a
++ .uleb128 0x21
++ .long .LASF1153
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x60d7
++ .uleb128 0x12
++ .long 0x60f3
++ .long 0x112
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x23
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x6108
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x6108
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x610e
++ .uleb128 0xf
++ .long 0x618b
++ .long .LASF1154
++ .byte 0x50
++ .byte 0x26
++ .byte 0x14
++ .uleb128 0xa
++ .long .LASF517
++ .byte 0x27
++ .byte 0x12
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xb
++ .string "mnt"
++ .byte 0x27
++ .byte 0x13
++ .long 0x28f8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1101
++ .byte 0x27
++ .byte 0x14
++ .long 0x5db4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x27
++ .byte 0x15
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF1155
++ .byte 0x27
++ .byte 0x16
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1022
++ .byte 0x27
++ .byte 0x17
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF1156
++ .byte 0x27
++ .byte 0x18
++ .long 0x627c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF1157
++ .byte 0x27
++ .byte 0x1d
++ .long 0x6268
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x60f3
++ .uleb128 0x11
++ .long 0x61a6
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x61a6
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x5db4
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6191
++ .uleb128 0x11
++ .long 0x61cc
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x61a6
++ .uleb128 0x6
++ .long 0x61a6
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x61b2
++ .uleb128 0x11
++ .long 0x61e2
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x61d2
++ .uleb128 0x5
++ .long 0x61f4
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x61e8
++ .uleb128 0x5
++ .long 0x620b
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x3381
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x61fa
++ .uleb128 0x11
++ .long 0x622b
++ .byte 0x1
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6211
++ .uleb128 0xf
++ .long 0x6268
++ .long .LASF1158
++ .byte 0xc
++ .byte 0x27
++ .byte 0x9
++ .uleb128 0xa
++ .long .LASF53
++ .byte 0x27
++ .byte 0xa
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1159
++ .byte 0x27
++ .byte 0xb
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF106
++ .byte 0x27
++ .byte 0xc
++ .long 0x3cfd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x627c
++ .byte 0xc
++ .byte 0x27
++ .byte 0x1b
++ .uleb128 0xe
++ .long .LASF1160
++ .byte 0x27
++ .byte 0x1c
++ .long 0x6231
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x628c
++ .long 0xb5
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x62b5
++ .long .LASF1161
++ .byte 0x8
++ .byte 0x27
++ .byte 0x20
++ .uleb128 0xb
++ .string "mnt"
++ .byte 0x27
++ .byte 0x21
++ .long 0x28f8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF517
++ .byte 0x27
++ .byte 0x22
++ .long 0x28ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x62ec
++ .long .LASF1162
++ .byte 0xc
++ .byte 0x42
++ .byte 0x3a
++ .uleb128 0xa
++ .long .LASF1163
++ .byte 0x42
++ .byte 0x3b
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1164
++ .byte 0x42
++ .byte 0x3c
++ .long 0x240
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1165
++ .byte 0x42
++ .byte 0x3d
++ .long 0x62f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF1166
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x62ec
++ .uleb128 0x15
++ .long 0x638d
++ .long .LASF1167
++ .byte 0x34
++ .byte 0x1a
++ .value 0x154
++ .uleb128 0x16
++ .long .LASF1168
++ .byte 0x1a
++ .value 0x155
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1169
++ .byte 0x1a
++ .value 0x156
++ .long 0xea
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1170
++ .byte 0x1a
++ .value 0x157
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1171
++ .byte 0x1a
++ .value 0x158
++ .long 0x1e8
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1172
++ .byte 0x1a
++ .value 0x159
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1173
++ .byte 0x1a
++ .value 0x15a
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1174
++ .byte 0x1a
++ .value 0x15b
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1175
++ .byte 0x1a
++ .value 0x15c
++ .long 0x173b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1176
++ .byte 0x1a
++ .value 0x163
++ .long 0x3cfd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF1177
++ .byte 0x3b
++ .byte 0x2c
++ .long 0xc2
++ .uleb128 0x7
++ .long .LASF1178
++ .byte 0x3b
++ .byte 0x2d
++ .long 0x157
++ .uleb128 0xf
++ .long 0x642e
++ .long .LASF1179
++ .byte 0x44
++ .byte 0x3b
++ .byte 0x67
++ .uleb128 0xa
++ .long .LASF1180
++ .byte 0x3b
++ .byte 0x68
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1181
++ .byte 0x3b
++ .byte 0x69
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1182
++ .byte 0x3b
++ .byte 0x6a
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1183
++ .byte 0x3b
++ .byte 0x6b
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1184
++ .byte 0x3b
++ .byte 0x6c
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF1185
++ .byte 0x3b
++ .byte 0x6d
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF1186
++ .byte 0x3b
++ .byte 0x6e
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF1187
++ .byte 0x3b
++ .byte 0x6f
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF1188
++ .byte 0x3b
++ .byte 0x70
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x6473
++ .long .LASF1189
++ .byte 0x18
++ .byte 0x3b
++ .byte 0x7c
++ .uleb128 0xa
++ .long .LASF1190
++ .byte 0x3b
++ .byte 0x7d
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1191
++ .byte 0x3b
++ .byte 0x7e
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1192
++ .byte 0x3b
++ .byte 0x7f
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1193
++ .byte 0x3b
++ .byte 0x80
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x65b4
++ .long .LASF1194
++ .byte 0x70
++ .byte 0x3c
++ .byte 0x32
++ .uleb128 0xa
++ .long .LASF1195
++ .byte 0x3c
++ .byte 0x33
++ .long 0xf5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF519
++ .byte 0x3c
++ .byte 0x34
++ .long 0xf5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1
++ .uleb128 0xa
++ .long .LASF1196
++ .byte 0x3c
++ .byte 0x35
++ .long 0x12b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2
++ .uleb128 0xa
++ .long .LASF1197
++ .byte 0x3c
++ .byte 0x36
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1198
++ .byte 0x3c
++ .byte 0x37
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1199
++ .byte 0x3c
++ .byte 0x38
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1200
++ .byte 0x3c
++ .byte 0x39
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1201
++ .byte 0x3c
++ .byte 0x3a
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF1202
++ .byte 0x3c
++ .byte 0x3b
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0xa
++ .long .LASF1203
++ .byte 0x3c
++ .byte 0x3c
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF1204
++ .byte 0x3c
++ .byte 0x3d
++ .long 0x136
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF1205
++ .byte 0x3c
++ .byte 0x3f
++ .long 0x136
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xa
++ .long .LASF1206
++ .byte 0x3c
++ .byte 0x40
++ .long 0x12b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF1207
++ .byte 0x3c
++ .byte 0x41
++ .long 0x12b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x42
++ .uleb128 0xa
++ .long .LASF1208
++ .byte 0x3c
++ .byte 0x42
++ .long 0x136
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0xa
++ .long .LASF1209
++ .byte 0x3c
++ .byte 0x43
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF1210
++ .byte 0x3c
++ .byte 0x44
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0xa
++ .long .LASF1211
++ .byte 0x3c
++ .byte 0x45
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0xa
++ .long .LASF1212
++ .byte 0x3c
++ .byte 0x46
++ .long 0x136
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0xa
++ .long .LASF1213
++ .byte 0x3c
++ .byte 0x47
++ .long 0x12b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0xa
++ .long .LASF1214
++ .byte 0x3c
++ .byte 0x48
++ .long 0x119
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x66
++ .uleb128 0xa
++ .long .LASF1215
++ .byte 0x3c
++ .byte 0x49
++ .long 0x8ec
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x65eb
++ .long .LASF1216
++ .byte 0x14
++ .byte 0x3c
++ .byte 0x89
++ .uleb128 0xa
++ .long .LASF1217
++ .byte 0x3c
++ .byte 0x8a
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1218
++ .byte 0x3c
++ .byte 0x8b
++ .long 0x157
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1219
++ .byte 0x3c
++ .byte 0x8c
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF1220
++ .byte 0x3c
++ .byte 0x8d
++ .long 0x65b4
++ .uleb128 0xf
++ .long 0x669d
++ .long .LASF1221
++ .byte 0x44
++ .byte 0x3c
++ .byte 0x8f
++ .uleb128 0xa
++ .long .LASF1222
++ .byte 0x3c
++ .byte 0x90
++ .long 0xf5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1223
++ .byte 0x3c
++ .byte 0x91
++ .long 0x12b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2
++ .uleb128 0xa
++ .long .LASF1224
++ .byte 0x3c
++ .byte 0x92
++ .long 0xf5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1225
++ .byte 0x3c
++ .byte 0x93
++ .long 0x65eb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1226
++ .byte 0x3c
++ .byte 0x94
++ .long 0x65eb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF1227
++ .byte 0x3c
++ .byte 0x95
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0xa
++ .long .LASF1228
++ .byte 0x3c
++ .byte 0x96
++ .long 0x136
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF1229
++ .byte 0x3c
++ .byte 0x97
++ .long 0x136
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF1230
++ .byte 0x3c
++ .byte 0x98
++ .long 0x136
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0xa
++ .long .LASF1231
++ .byte 0x3c
++ .byte 0x99
++ .long 0x12b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0xa
++ .long .LASF1232
++ .byte 0x3c
++ .byte 0x9a
++ .long 0x12b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x42
++ .byte 0x0
++ .uleb128 0x2a
++ .long .LASF1233
++ .byte 0x0
++ .byte 0x3f
++ .byte 0x15
++ .uleb128 0xf
++ .long 0x66dc
++ .long .LASF1234
++ .byte 0xc
++ .byte 0x40
++ .byte 0x14
++ .uleb128 0xa
++ .long .LASF1235
++ .byte 0x40
++ .byte 0x15
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1236
++ .byte 0x40
++ .byte 0x16
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1237
++ .byte 0x40
++ .byte 0x17
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x6759
++ .long .LASF1238
++ .byte 0x24
++ .byte 0x3b
++ .byte 0x98
++ .uleb128 0xa
++ .long .LASF1180
++ .byte 0x3b
++ .byte 0x99
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1181
++ .byte 0x3b
++ .byte 0x9a
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1182
++ .byte 0x3b
++ .byte 0x9b
++ .long 0x6398
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1183
++ .byte 0x3b
++ .byte 0x9c
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1184
++ .byte 0x3b
++ .byte 0x9d
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF1185
++ .byte 0x3b
++ .byte 0x9e
++ .long 0x141
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1186
++ .byte 0x3b
++ .byte 0x9f
++ .long 0x214
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0xa
++ .long .LASF1187
++ .byte 0x3b
++ .byte 0xa0
++ .long 0x214
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x6778
++ .byte 0xc
++ .byte 0x3b
++ .byte 0xae
++ .uleb128 0xe
++ .long .LASF1239
++ .byte 0x3b
++ .byte 0xaf
++ .long 0x669d
++ .uleb128 0xe
++ .long .LASF1240
++ .byte 0x3b
++ .byte 0xb0
++ .long 0x66a5
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x67d7
++ .long .LASF1241
++ .byte 0x24
++ .byte 0x3b
++ .byte 0xa8
++ .uleb128 0xa
++ .long .LASF1242
++ .byte 0x3b
++ .byte 0xa9
++ .long 0x6820
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1243
++ .byte 0x3b
++ .byte 0xaa
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1192
++ .byte 0x3b
++ .byte 0xab
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1190
++ .byte 0x3b
++ .byte 0xac
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1191
++ .byte 0x3b
++ .byte 0xad
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xb
++ .string "u"
++ .byte 0x3b
++ .byte 0xb1
++ .long 0x6759
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x6820
++ .long .LASF1244
++ .byte 0x10
++ .byte 0x3b
++ .byte 0xa6
++ .uleb128 0x16
++ .long .LASF1245
++ .byte 0x3b
++ .value 0x116
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1246
++ .byte 0x3b
++ .value 0x117
++ .long 0x6c90
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1247
++ .byte 0x3b
++ .value 0x118
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1248
++ .byte 0x3b
++ .value 0x119
++ .long 0x6820
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x67d7
++ .uleb128 0xf
++ .long 0x68e9
++ .long .LASF1249
++ .byte 0x80
++ .byte 0x3b
++ .byte 0xd6
++ .uleb128 0xa
++ .long .LASF1250
++ .byte 0x3b
++ .byte 0xd7
++ .long 0x1808
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1251
++ .byte 0x3b
++ .byte 0xd8
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1252
++ .byte 0x3b
++ .byte 0xd9
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1253
++ .byte 0x3b
++ .byte 0xda
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1254
++ .byte 0x3b
++ .byte 0xdb
++ .long 0x2d94
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0xa
++ .long .LASF1255
++ .byte 0x3b
++ .byte 0xdc
++ .long 0x16c4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0xa
++ .long .LASF1256
++ .byte 0x3b
++ .byte 0xdd
++ .long 0x18ef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0xa
++ .long .LASF1257
++ .byte 0x3b
++ .byte 0xde
++ .long 0x60d1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0xa
++ .long .LASF1258
++ .byte 0x3b
++ .byte 0xdf
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0xa
++ .long .LASF1259
++ .byte 0x3b
++ .byte 0xe0
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0xa
++ .long .LASF1260
++ .byte 0x3b
++ .byte 0xe1
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0xa
++ .long .LASF1261
++ .byte 0x3b
++ .byte 0xe2
++ .long 0x124
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0xa
++ .long .LASF1262
++ .byte 0x3b
++ .byte 0xe3
++ .long 0x66dc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x6958
++ .long .LASF1263
++ .byte 0x1c
++ .byte 0x3b
++ .byte 0xec
++ .uleb128 0xa
++ .long .LASF1264
++ .byte 0x3b
++ .byte 0xed
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1265
++ .byte 0x3b
++ .byte 0xee
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1266
++ .byte 0x3b
++ .byte 0xef
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1267
++ .byte 0x3b
++ .byte 0xf0
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1268
++ .byte 0x3b
++ .byte 0xf1
++ .long 0x6989
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1269
++ .byte 0x3b
++ .byte 0xf2
++ .long 0x6989
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF1270
++ .byte 0x3b
++ .byte 0xf3
++ .long 0x6989
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x696d
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6958
++ .uleb128 0x11
++ .long 0x6983
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x6983
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6826
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6973
++ .uleb128 0xf
++ .long 0x6a48
++ .long .LASF1271
++ .byte 0x30
++ .byte 0x3b
++ .byte 0xf7
++ .uleb128 0xa
++ .long .LASF1272
++ .byte 0x3b
++ .byte 0xf8
++ .long 0x6a5d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1273
++ .byte 0x3b
++ .byte 0xf9
++ .long 0x6a73
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1274
++ .byte 0x3b
++ .byte 0xfa
++ .long 0x6a93
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1275
++ .byte 0x3b
++ .byte 0xfb
++ .long 0x6ab9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1276
++ .byte 0x3b
++ .byte 0xfc
++ .long 0x6ad4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1277
++ .byte 0x3b
++ .byte 0xfd
++ .long 0x6ab9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0xa
++ .long .LASF1278
++ .byte 0x3b
++ .byte 0xfe
++ .long 0x6af5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0xa
++ .long .LASF1279
++ .byte 0x3b
++ .byte 0xff
++ .long 0x6989
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1280
++ .byte 0x3b
++ .value 0x100
++ .long 0x6989
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1281
++ .byte 0x3b
++ .value 0x101
++ .long 0x6989
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1282
++ .byte 0x3b
++ .value 0x102
++ .long 0x6989
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1283
++ .byte 0x3b
++ .value 0x103
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x6a5d
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6a48
++ .uleb128 0x11
++ .long 0x6a73
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6a63
++ .uleb128 0x11
++ .long 0x6a93
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x6398
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6a79
++ .uleb128 0x11
++ .long 0x6aae
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x6aae
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6ab4
++ .uleb128 0x14
++ .long 0x30f0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6a99
++ .uleb128 0x11
++ .long 0x6ad4
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x6398
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6abf
++ .uleb128 0x11
++ .long 0x6aef
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x6aef
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x62f8
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6ada
++ .uleb128 0x15
++ .long 0x6bae
++ .long .LASF1284
++ .byte 0x2c
++ .byte 0x3b
++ .value 0x107
++ .uleb128 0x16
++ .long .LASF1285
++ .byte 0x3b
++ .value 0x108
++ .long 0x6bcd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1286
++ .byte 0x3b
++ .value 0x109
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1287
++ .byte 0x3b
++ .value 0x10a
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1288
++ .byte 0x3b
++ .value 0x10b
++ .long 0x6bf3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1289
++ .byte 0x3b
++ .value 0x10c
++ .long 0x6bf3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1290
++ .byte 0x3b
++ .value 0x10d
++ .long 0x6c1e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1291
++ .byte 0x3b
++ .value 0x10e
++ .long 0x6c1e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1292
++ .byte 0x3b
++ .value 0x10f
++ .long 0x6c3f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1293
++ .byte 0x3b
++ .value 0x110
++ .long 0x6c5f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1294
++ .byte 0x3b
++ .value 0x111
++ .long 0x6c8a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1295
++ .byte 0x3b
++ .value 0x112
++ .long 0x6c8a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x6bcd
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6bae
++ .uleb128 0x11
++ .long 0x6bed
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x6bed
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x642e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6bd3
++ .uleb128 0x11
++ .long 0x6c18
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x638d
++ .uleb128 0x6
++ .long 0x6c18
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x63a3
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6bf9
++ .uleb128 0x11
++ .long 0x6c39
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x6c39
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x65f6
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6c24
++ .uleb128 0x11
++ .long 0x6c5f
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6c45
++ .uleb128 0x11
++ .long 0x6c84
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x638d
++ .uleb128 0x6
++ .long 0x6c84
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6473
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6c65
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x68e9
++ .uleb128 0x15
++ .long 0x6d0e
++ .long .LASF1296
++ .byte 0x94
++ .byte 0x3b
++ .value 0x11f
++ .uleb128 0x16
++ .long .LASF53
++ .byte 0x3b
++ .value 0x120
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1297
++ .byte 0x3b
++ .value 0x121
++ .long 0x2d94
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1298
++ .byte 0x3b
++ .value 0x122
++ .long 0x2d94
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1299
++ .byte 0x3b
++ .value 0x123
++ .long 0x18fa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF245
++ .byte 0x3b
++ .value 0x124
++ .long 0x6d0e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF82
++ .byte 0x3b
++ .value 0x125
++ .long 0x6d1e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x17
++ .string "ops"
++ .byte 0x3b
++ .value 0x126
++ .long 0x6d2e
++ .byte 0x3
++ .byte 0x23
++ .uleb128 0x8c
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x6d1e
++ .long 0x3381
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x6d2e
++ .long 0x6778
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x6d3e
++ .long 0x6c90
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x6e2d
++ .long .LASF1300
++ .byte 0x3c
++ .byte 0x1a
++ .value 0x191
++ .uleb128 0x16
++ .long .LASF1301
++ .byte 0x1a
++ .value 0x192
++ .long 0x6e4e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1302
++ .byte 0x1a
++ .value 0x193
++ .long 0x6e69
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1303
++ .byte 0x1a
++ .value 0x194
++ .long 0x6e7b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1304
++ .byte 0x1a
++ .value 0x197
++ .long 0x6f9a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1305
++ .byte 0x1a
++ .value 0x19a
++ .long 0x6fb0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1306
++ .byte 0x1a
++ .value 0x19d
++ .long 0x6fd5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1307
++ .byte 0x1a
++ .value 0x1a3
++ .long 0x6ffa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1308
++ .byte 0x1a
++ .value 0x1a4
++ .long 0x6ffa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1309
++ .byte 0x1a
++ .value 0x1a6
++ .long 0x7015
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1310
++ .byte 0x1a
++ .value 0x1a7
++ .long 0x702c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1311
++ .byte 0x1a
++ .value 0x1a8
++ .long 0x7047
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1312
++ .byte 0x1a
++ .value 0x1aa
++ .long 0x707c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF1313
++ .byte 0x1a
++ .value 0x1ac
++ .long 0x709c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF1314
++ .byte 0x1a
++ .value 0x1af
++ .long 0x70bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF1315
++ .byte 0x1a
++ .value 0x1b0
++ .long 0x6fb0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x6e42
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x6e42
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6e48
++ .uleb128 0x21
++ .long .LASF1316
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6e2d
++ .uleb128 0x11
++ .long 0x6e69
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x2d82
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6e54
++ .uleb128 0x5
++ .long 0x6e7b
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x2d82
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6e6f
++ .uleb128 0x11
++ .long 0x6e96
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x6e96
++ .uleb128 0x6
++ .long 0x6e42
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6e9c
++ .uleb128 0x15
++ .long 0x6f9a
++ .long .LASF1317
++ .byte 0x54
++ .byte 0x1a
++ .value 0x18e
++ .uleb128 0x16
++ .long .LASF1318
++ .byte 0x1a
++ .value 0x1b5
++ .long 0x3381
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1319
++ .byte 0x1a
++ .value 0x1b6
++ .long 0x62b5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1320
++ .byte 0x1a
++ .value 0x1b7
++ .long 0x16a2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1321
++ .byte 0x1a
++ .value 0x1b8
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1322
++ .byte 0x1a
++ .value 0x1b9
++ .long 0x5d7d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1323
++ .byte 0x1a
++ .value 0x1ba
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1324
++ .byte 0x1a
++ .value 0x1bb
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1325
++ .byte 0x1a
++ .value 0x1bc
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF1326
++ .byte 0x1a
++ .value 0x1bd
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF1327
++ .byte 0x1a
++ .value 0x1be
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF1328
++ .byte 0x1a
++ .value 0x1bf
++ .long 0x70c2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF53
++ .byte 0x1a
++ .value 0x1c0
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF271
++ .byte 0x1a
++ .value 0x1c1
++ .long 0x4521
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0x16
++ .long .LASF1329
++ .byte 0x1a
++ .value 0x1c2
++ .long 0x1680
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF1330
++ .byte 0x1a
++ .value 0x1c3
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF1331
++ .byte 0x1a
++ .value 0x1c4
++ .long 0x6e96
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6e81
++ .uleb128 0x11
++ .long 0x6fb0
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x2d82
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6fa0
++ .uleb128 0x11
++ .long 0x6fd5
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x6e96
++ .uleb128 0x6
++ .long 0x17e5
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6fb6
++ .uleb128 0x11
++ .long 0x6ffa
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6fdb
++ .uleb128 0x11
++ .long 0x7015
++ .byte 0x1
++ .long 0x22a
++ .uleb128 0x6
++ .long 0x6e96
++ .uleb128 0x6
++ .long 0x22a
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7000
++ .uleb128 0x5
++ .long 0x702c
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x701b
++ .uleb128 0x11
++ .long 0x7047
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x240
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7032
++ .uleb128 0x11
++ .long 0x7071
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3ddf
++ .uleb128 0x6
++ .long 0x7071
++ .uleb128 0x6
++ .long 0x1f3
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7077
++ .uleb128 0x14
++ .long 0x3a49
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x704d
++ .uleb128 0x11
++ .long 0x709c
++ .byte 0x1
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x6e96
++ .uleb128 0x6
++ .long 0x22a
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7082
++ .uleb128 0x11
++ .long 0x70bc
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x6e96
++ .uleb128 0x6
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x2d82
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x70a2
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x70c8
++ .uleb128 0x14
++ .long 0x6d3e
++ .uleb128 0x15
++ .long 0x71e9
++ .long .LASF1332
++ .byte 0x74
++ .byte 0x1a
++ .value 0x1cc
++ .uleb128 0x16
++ .long .LASF1333
++ .byte 0x1a
++ .value 0x1cd
++ .long 0x19f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1334
++ .byte 0x1a
++ .value 0x1ce
++ .long 0x3381
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1335
++ .byte 0x1a
++ .value 0x1cf
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1336
++ .byte 0x1a
++ .value 0x1d0
++ .long 0x2d94
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1337
++ .byte 0x1a
++ .value 0x1d1
++ .long 0x1931
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1338
++ .byte 0x1a
++ .value 0x1d2
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF1339
++ .byte 0x1a
++ .value 0x1d3
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF1340
++ .byte 0x1a
++ .value 0x1d4
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0x16
++ .long .LASF1341
++ .byte 0x1a
++ .value 0x1d6
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF1342
++ .byte 0x1a
++ .value 0x1d8
++ .long 0x71e9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0x16
++ .long .LASF1343
++ .byte 0x1a
++ .value 0x1d9
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0x16
++ .long .LASF1344
++ .byte 0x1a
++ .value 0x1da
++ .long 0x71f5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0x16
++ .long .LASF1345
++ .byte 0x1a
++ .value 0x1dc
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0x16
++ .long .LASF1346
++ .byte 0x1a
++ .value 0x1dd
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0x16
++ .long .LASF1347
++ .byte 0x1a
++ .value 0x1de
++ .long 0x7201
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0x16
++ .long .LASF1348
++ .byte 0x1a
++ .value 0x1df
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0x16
++ .long .LASF1349
++ .byte 0x1a
++ .value 0x1e0
++ .long 0x4521
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x6c
++ .uleb128 0x16
++ .long .LASF1350
++ .byte 0x1a
++ .value 0x1e7
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x70
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x70cd
++ .uleb128 0x21
++ .long .LASF1351
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x71ef
++ .uleb128 0x21
++ .long .LASF1352
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x71fb
++ .uleb128 0x2b
++ .long 0x7235
++ .byte 0x4
++ .byte 0x1a
++ .value 0x236
++ .uleb128 0x1c
++ .long .LASF1353
++ .byte 0x1a
++ .value 0x237
++ .long 0x4551
++ .uleb128 0x1c
++ .long .LASF1354
++ .byte 0x1a
++ .value 0x238
++ .long 0x71e9
++ .uleb128 0x1c
++ .long .LASF1355
++ .byte 0x1a
++ .value 0x239
++ .long 0x723b
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF1356
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7235
++ .uleb128 0x15
++ .long 0x738a
++ .long .LASF1357
++ .byte 0x54
++ .byte 0x1a
++ .value 0x22c
++ .uleb128 0x16
++ .long .LASF1358
++ .byte 0x1a
++ .value 0x45f
++ .long 0x7ffc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1359
++ .byte 0x1a
++ .value 0x460
++ .long 0x801c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1360
++ .byte 0x1a
++ .value 0x461
++ .long 0x803c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1361
++ .byte 0x1a
++ .value 0x462
++ .long 0x8057
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1362
++ .byte 0x1a
++ .value 0x463
++ .long 0x8077
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1363
++ .byte 0x1a
++ .value 0x464
++ .long 0x8097
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1364
++ .byte 0x1a
++ .value 0x465
++ .long 0x8057
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1365
++ .byte 0x1a
++ .value 0x466
++ .long 0x80bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1366
++ .byte 0x1a
++ .value 0x468
++ .long 0x80e1
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1367
++ .byte 0x1a
++ .value 0x469
++ .long 0x8101
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1368
++ .byte 0x1a
++ .value 0x46a
++ .long 0x811c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1369
++ .byte 0x1a
++ .value 0x46b
++ .long 0x8138
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF1370
++ .byte 0x1a
++ .value 0x46c
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF1371
++ .byte 0x1a
++ .value 0x46d
++ .long 0x816a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF1372
++ .byte 0x1a
++ .value 0x46e
++ .long 0x8185
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF1373
++ .byte 0x1a
++ .value 0x46f
++ .long 0x81ab
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF1374
++ .byte 0x1a
++ .value 0x470
++ .long 0x81dc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0x16
++ .long .LASF1375
++ .byte 0x1a
++ .value 0x471
++ .long 0x8201
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF1376
++ .byte 0x1a
++ .value 0x472
++ .long 0x8221
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF1377
++ .byte 0x1a
++ .value 0x473
++ .long 0x823c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0x16
++ .long .LASF1378
++ .byte 0x1a
++ .value 0x474
++ .long 0x8258
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7390
++ .uleb128 0x14
++ .long 0x7241
++ .uleb128 0x15
++ .long 0x7538
++ .long .LASF1379
++ .byte 0x6c
++ .byte 0x1a
++ .value 0x22d
++ .uleb128 0x16
++ .long .LASF594
++ .byte 0x1a
++ .value 0x441
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1380
++ .byte 0x1a
++ .value 0x442
++ .long 0x7da2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1381
++ .byte 0x1a
++ .value 0x443
++ .long 0x7dcd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1382
++ .byte 0x1a
++ .value 0x444
++ .long 0x7df2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1383
++ .byte 0x1a
++ .value 0x445
++ .long 0x7e17
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1384
++ .byte 0x1a
++ .value 0x446
++ .long 0x7e17
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1385
++ .byte 0x1a
++ .value 0x447
++ .long 0x7e37
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF978
++ .byte 0x1a
++ .value 0x448
++ .long 0x7e5e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1386
++ .byte 0x1a
++ .value 0x449
++ .long 0x7cb7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1387
++ .byte 0x1a
++ .value 0x44a
++ .long 0x7cd7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1388
++ .byte 0x1a
++ .value 0x44b
++ .long 0x7cd7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF673
++ .byte 0x1a
++ .value 0x44c
++ .long 0x7e79
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF1160
++ .byte 0x1a
++ .value 0x44d
++ .long 0x7c92
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF1389
++ .byte 0x1a
++ .value 0x44e
++ .long 0x7e94
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF404
++ .byte 0x1a
++ .value 0x44f
++ .long 0x7c92
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF1390
++ .byte 0x1a
++ .value 0x450
++ .long 0x7eb4
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF1391
++ .byte 0x1a
++ .value 0x451
++ .long 0x7ecf
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0x16
++ .long .LASF1392
++ .byte 0x1a
++ .value 0x452
++ .long 0x7eef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF285
++ .byte 0x1a
++ .value 0x453
++ .long 0x7f0f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF1393
++ .byte 0x1a
++ .value 0x454
++ .long 0x7f39
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0x16
++ .long .LASF1394
++ .byte 0x1a
++ .value 0x455
++ .long 0x7f68
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .uleb128 0x16
++ .long .LASF676
++ .byte 0x1a
++ .value 0x456
++ .long 0x3fc6
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x54
++ .uleb128 0x16
++ .long .LASF1395
++ .byte 0x1a
++ .value 0x457
++ .long 0x21c9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x58
++ .uleb128 0x16
++ .long .LASF1396
++ .byte 0x1a
++ .value 0x458
++ .long 0x7f83
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x5c
++ .uleb128 0x16
++ .long .LASF1397
++ .byte 0x1a
++ .value 0x459
++ .long 0x7f0f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x60
++ .uleb128 0x16
++ .long .LASF1398
++ .byte 0x1a
++ .value 0x45a
++ .long 0x7fad
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x64
++ .uleb128 0x16
++ .long .LASF1399
++ .byte 0x1a
++ .value 0x45b
++ .long 0x7fd7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x68
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x753e
++ .uleb128 0x14
++ .long 0x7395
++ .uleb128 0x15
++ .long 0x7641
++ .long .LASF1400
++ .byte 0x60
++ .byte 0x1a
++ .value 0x22f
++ .uleb128 0x16
++ .long .LASF1401
++ .byte 0x1a
++ .value 0x323
++ .long 0x7641
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1402
++ .byte 0x1a
++ .value 0x324
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1403
++ .byte 0x1a
++ .value 0x325
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1404
++ .byte 0x1a
++ .value 0x326
++ .long 0x7790
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1405
++ .byte 0x1a
++ .value 0x327
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1406
++ .byte 0x1a
++ .value 0x328
++ .long 0x18ef
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1407
++ .byte 0x1a
++ .value 0x329
++ .long 0x3cfd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1408
++ .byte 0x1a
++ .value 0x32a
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF1409
++ .byte 0x1a
++ .value 0x32b
++ .long 0x112
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2d
++ .uleb128 0x16
++ .long .LASF1410
++ .byte 0x1a
++ .value 0x32c
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF1411
++ .byte 0x1a
++ .value 0x32d
++ .long 0x1f3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF1412
++ .byte 0x1a
++ .value 0x32f
++ .long 0x79c7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0x16
++ .long .LASF1413
++ .byte 0x1a
++ .value 0x330
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF1414
++ .byte 0x1a
++ .value 0x332
++ .long 0x79cd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .uleb128 0x16
++ .long .LASF1415
++ .byte 0x1a
++ .value 0x333
++ .long 0x79d3
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4c
++ .uleb128 0x16
++ .long .LASF1416
++ .byte 0x1a
++ .value 0x337
++ .long 0x795b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x50
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7543
++ .uleb128 0x21
++ .long .LASF1417
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7647
++ .uleb128 0x15
++ .long 0x76bb
++ .long .LASF1418
++ .byte 0x18
++ .byte 0x1a
++ .value 0x2ad
++ .uleb128 0x16
++ .long .LASF285
++ .byte 0x1a
++ .value 0x2ae
++ .long 0x16a2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x17
++ .string "pid"
++ .byte 0x1a
++ .value 0x2af
++ .long 0x3070
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF739
++ .byte 0x1a
++ .value 0x2b0
++ .long 0x2fc0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x17
++ .string "uid"
++ .byte 0x1a
++ .value 0x2b1
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF226
++ .byte 0x1a
++ .value 0x2b1
++ .long 0x1dd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1419
++ .byte 0x1a
++ .value 0x2b2
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x776e
++ .long .LASF1420
++ .byte 0x2c
++ .byte 0x1a
++ .value 0x2b8
++ .uleb128 0x16
++ .long .LASF1045
++ .byte 0x1a
++ .value 0x2b9
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF328
++ .byte 0x1a
++ .value 0x2ba
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF53
++ .byte 0x1a
++ .value 0x2bb
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1421
++ .byte 0x1a
++ .value 0x2bc
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1422
++ .byte 0x1a
++ .value 0x2bd
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1423
++ .byte 0x1a
++ .value 0x2be
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1424
++ .byte 0x1a
++ .value 0x2bf
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF887
++ .byte 0x1a
++ .value 0x2c0
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1425
++ .byte 0x1a
++ .value 0x2c1
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1426
++ .byte 0x1a
++ .value 0x2c2
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1427
++ .byte 0x1a
++ .value 0x2c3
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .byte 0x0
++ .uleb128 0x2b
++ .long 0x7790
++ .byte 0x8
++ .byte 0x1a
++ .value 0x2cd
++ .uleb128 0x1c
++ .long .LASF1428
++ .byte 0x1a
++ .value 0x2ce
++ .long 0x17bc
++ .uleb128 0x1c
++ .long .LASF1429
++ .byte 0x1a
++ .value 0x2cf
++ .long 0x2ea8
++ .byte 0x0
++ .uleb128 0x1e
++ .long .LASF1430
++ .byte 0x1a
++ .value 0x30b
++ .long 0x442b
++ .uleb128 0x15
++ .long 0x77e6
++ .long .LASF1431
++ .byte 0x10
++ .byte 0x1a
++ .value 0x30d
++ .uleb128 0x16
++ .long .LASF1432
++ .byte 0x1a
++ .value 0x30e
++ .long 0x77f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1433
++ .byte 0x1a
++ .value 0x30f
++ .long 0x77f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1434
++ .byte 0x1a
++ .value 0x310
++ .long 0x7809
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1435
++ .byte 0x1a
++ .value 0x311
++ .long 0x77f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x5
++ .long 0x77f2
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x7641
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x77e6
++ .uleb128 0x5
++ .long 0x7809
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x7641
++ .uleb128 0x6
++ .long 0x7641
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x77f8
++ .uleb128 0x15
++ .long 0x7895
++ .long .LASF1436
++ .byte 0x20
++ .byte 0x1a
++ .value 0x314
++ .uleb128 0x16
++ .long .LASF1437
++ .byte 0x1a
++ .value 0x315
++ .long 0x78aa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1438
++ .byte 0x1a
++ .value 0x316
++ .long 0x77f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1439
++ .byte 0x1a
++ .value 0x317
++ .long 0x78ca
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1434
++ .byte 0x1a
++ .value 0x318
++ .long 0x7809
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1435
++ .byte 0x1a
++ .value 0x319
++ .long 0x77f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1440
++ .byte 0x1a
++ .value 0x31a
++ .long 0x77f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1441
++ .byte 0x1a
++ .value 0x31b
++ .long 0x78aa
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1442
++ .byte 0x1a
++ .value 0x31c
++ .long 0x78eb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x78aa
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x7641
++ .uleb128 0x6
++ .long 0x7641
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7895
++ .uleb128 0x11
++ .long 0x78ca
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x7641
++ .uleb128 0x6
++ .long 0x7641
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x78b0
++ .uleb128 0x11
++ .long 0x78e5
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x78e5
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7641
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x78d0
++ .uleb128 0xf
++ .long 0x7928
++ .long .LASF1443
++ .byte 0x10
++ .byte 0x37
++ .byte 0xd
++ .uleb128 0xa
++ .long .LASF169
++ .byte 0x37
++ .byte 0xe
++ .long 0x173
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF594
++ .byte 0x37
++ .byte 0xf
++ .long 0x792e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF509
++ .byte 0x37
++ .byte 0x10
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF1444
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7928
++ .uleb128 0xf
++ .long 0x794f
++ .long .LASF1445
++ .byte 0x4
++ .byte 0x37
++ .byte 0x14
++ .uleb128 0xa
++ .long .LASF594
++ .byte 0x37
++ .byte 0x15
++ .long 0x7955
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF1446
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x794f
++ .uleb128 0x2b
++ .long 0x797d
++ .byte 0x10
++ .byte 0x1a
++ .value 0x334
++ .uleb128 0x1c
++ .long .LASF1447
++ .byte 0x1a
++ .value 0x335
++ .long 0x78f1
++ .uleb128 0x1c
++ .long .LASF1448
++ .byte 0x1a
++ .value 0x336
++ .long 0x7934
++ .byte 0x0
++ .uleb128 0x15
++ .long 0x79c7
++ .long .LASF1449
++ .byte 0x10
++ .byte 0x1a
++ .value 0x32f
++ .uleb128 0x16
++ .long .LASF1450
++ .byte 0x1a
++ .value 0x36a
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1451
++ .byte 0x1a
++ .value 0x36b
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1452
++ .byte 0x1a
++ .value 0x36c
++ .long 0x79c7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1453
++ .byte 0x1a
++ .value 0x36d
++ .long 0x3cfd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x797d
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x779c
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x780f
++ .uleb128 0x15
++ .long 0x7a6e
++ .long .LASF1454
++ .byte 0x20
++ .byte 0x1a
++ .value 0x393
++ .uleb128 0x16
++ .long .LASF414
++ .byte 0x1a
++ .value 0x56f
++ .long 0x7f2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1455
++ .byte 0x1a
++ .value 0x570
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1456
++ .byte 0x1a
++ .value 0x572
++ .long 0x841f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1457
++ .byte 0x1a
++ .value 0x573
++ .long 0x8280
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF594
++ .byte 0x1a
++ .value 0x574
++ .long 0x4af9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF307
++ .byte 0x1a
++ .value 0x575
++ .long 0x7a6e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1458
++ .byte 0x1a
++ .value 0x576
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1459
++ .byte 0x1a
++ .value 0x577
++ .long 0x161c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1460
++ .byte 0x1a
++ .value 0x578
++ .long 0x161c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x79d9
++ .uleb128 0x15
++ .long 0x7b9f
++ .long .LASF1461
++ .byte 0x4c
++ .byte 0x1a
++ .value 0x394
++ .uleb128 0x16
++ .long .LASF1275
++ .byte 0x1a
++ .value 0x48a
++ .long 0x826e
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1462
++ .byte 0x1a
++ .value 0x48b
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1463
++ .byte 0x1a
++ .value 0x48d
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1464
++ .byte 0x1a
++ .value 0x48f
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1465
++ .byte 0x1a
++ .value 0x490
++ .long 0x6a5d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1466
++ .byte 0x1a
++ .value 0x491
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .uleb128 0x16
++ .long .LASF1467
++ .byte 0x1a
++ .value 0x492
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x18
++ .uleb128 0x16
++ .long .LASF1468
++ .byte 0x1a
++ .value 0x493
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x1c
++ .uleb128 0x16
++ .long .LASF1469
++ .byte 0x1a
++ .value 0x494
++ .long 0x8280
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x20
++ .uleb128 0x16
++ .long .LASF1470
++ .byte 0x1a
++ .value 0x495
++ .long 0x8280
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x24
++ .uleb128 0x16
++ .long .LASF1471
++ .byte 0x1a
++ .value 0x496
++ .long 0x696d
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x28
++ .uleb128 0x16
++ .long .LASF1472
++ .byte 0x1a
++ .value 0x497
++ .long 0x8280
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2c
++ .uleb128 0x16
++ .long .LASF1473
++ .byte 0x1a
++ .value 0x498
++ .long 0x8280
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x30
++ .uleb128 0x16
++ .long .LASF1474
++ .byte 0x1a
++ .value 0x499
++ .long 0x82a7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x34
++ .uleb128 0x16
++ .long .LASF1475
++ .byte 0x1a
++ .value 0x49a
++ .long 0x82c7
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x38
++ .uleb128 0x16
++ .long .LASF1476
++ .byte 0x1a
++ .value 0x49b
++ .long 0x814a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x3c
++ .uleb128 0x16
++ .long .LASF1477
++ .byte 0x1a
++ .value 0x49c
++ .long 0x82de
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x40
++ .uleb128 0x16
++ .long .LASF1478
++ .byte 0x1a
++ .value 0x49e
++ .long 0x8305
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x44
++ .uleb128 0x16
++ .long .LASF1479
++ .byte 0x1a
++ .value 0x49f
++ .long 0x8305
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x48
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7ba5
++ .uleb128 0x14
++ .long 0x7a74
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x698f
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x6afb
++ .uleb128 0x15
++ .long 0x7c1e
++ .long .LASF1480
++ .byte 0x18
++ .byte 0x1a
++ .value 0x397
++ .uleb128 0x16
++ .long .LASF1481
++ .byte 0x1a
++ .value 0x556
++ .long 0x8355
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1482
++ .byte 0x1a
++ .value 0x558
++ .long 0x837a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF1483
++ .byte 0x1a
++ .value 0x55c
++ .long 0x839a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1484
++ .byte 0x1a
++ .value 0x55d
++ .long 0x83b0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0x16
++ .long .LASF1485
++ .byte 0x1a
++ .value 0x55e
++ .long 0x83cb
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0x16
++ .long .LASF1486
++ .byte 0x1a
++ .value 0x564
++ .long 0x83f5
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7bb6
++ .uleb128 0x21
++ .long .LASF1487
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7c30
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7c24
++ .uleb128 0x21
++ .long .LASF1488
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7c36
++ .uleb128 0x1e
++ .long .LASF1489
++ .byte 0x1a
++ .value 0x411
++ .long 0x7c4e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7c54
++ .uleb128 0x11
++ .long 0x7c7d
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x1f3
++ .uleb128 0x6
++ .long 0x189
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x11
++ .long 0x7c92
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x3cfd
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7c7d
++ .uleb128 0x11
++ .long 0x7cb7
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7c98
++ .uleb128 0x11
++ .long 0x7cd7
++ .byte 0x1
++ .long 0x5a
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x77
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7cbd
++ .uleb128 0x2b
++ .long 0x7cff
++ .byte 0x4
++ .byte 0x1a
++ .value 0x42c
++ .uleb128 0x30
++ .string "buf"
++ .byte 0x1a
++ .value 0x42d
++ .long 0xb5
++ .uleb128 0x1c
++ .long .LASF734
++ .byte 0x1a
++ .value 0x42e
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x1d
++ .long 0x7d45
++ .byte 0x10
++ .byte 0x1a
++ .value 0x429
++ .uleb128 0x16
++ .long .LASF1490
++ .byte 0x1a
++ .value 0x42a
++ .long 0x1fe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF322
++ .byte 0x1a
++ .value 0x42b
++ .long 0x1fe
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x17
++ .string "arg"
++ .byte 0x1a
++ .value 0x42f
++ .long 0x7cdd
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1491
++ .byte 0x1a
++ .value 0x430
++ .long 0x21
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x1e
++ .long .LASF1492
++ .byte 0x1a
++ .value 0x431
++ .long 0x7cff
++ .uleb128 0x1e
++ .long .LASF1493
++ .byte 0x1a
++ .value 0x433
++ .long 0x7d5d
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7d63
++ .uleb128 0x11
++ .long 0x7d82
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x7d82
++ .uleb128 0x6
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7d45
++ .uleb128 0x11
++ .long 0x7da2
++ .byte 0x1
++ .long 0x1f3
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x1f3
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7d88
++ .uleb128 0x11
++ .long 0x7dc7
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x1fe
++ .uleb128 0x6
++ .long 0x7dc7
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1f3
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7da8
++ .uleb128 0x11
++ .long 0x7df2
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x1fe
++ .uleb128 0x6
++ .long 0x7dc7
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7dd3
++ .uleb128 0x11
++ .long 0x7e17
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x3ddf
++ .uleb128 0x6
++ .long 0x7071
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x1f3
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7df8
++ .uleb128 0x11
++ .long 0x7e37
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x7c42
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7e1d
++ .uleb128 0x11
++ .long 0x7e52
++ .byte 0x1
++ .long 0x77
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x7e52
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7e58
++ .uleb128 0x21
++ .long .LASF1494
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7e3d
++ .uleb128 0x11
++ .long 0x7e79
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x3f9c
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7e64
++ .uleb128 0x11
++ .long 0x7e94
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x7790
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7e7f
++ .uleb128 0x11
++ .long 0x7eb4
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7e9a
++ .uleb128 0x11
++ .long 0x7ecf
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3ddf
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7eba
++ .uleb128 0x11
++ .long 0x7eef
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7ed5
++ .uleb128 0x11
++ .long 0x7f0f
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x7641
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7ef5
++ .uleb128 0x11
++ .long 0x7f39
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x7dc7
++ .uleb128 0x6
++ .long 0x1fe
++ .uleb128 0x6
++ .long 0x7d51
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7f15
++ .uleb128 0x11
++ .long 0x7f68
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x1fe
++ .uleb128 0x6
++ .long 0x7dc7
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7f3f
++ .uleb128 0x11
++ .long 0x7f83
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7f6e
++ .uleb128 0x11
++ .long 0x7fad
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x4551
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x7dc7
++ .uleb128 0x6
++ .long 0x1fe
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7f89
++ .uleb128 0x11
++ .long 0x7fd7
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x3cfd
++ .uleb128 0x6
++ .long 0x7dc7
++ .uleb128 0x6
++ .long 0x4551
++ .uleb128 0x6
++ .long 0x1fe
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7fb3
++ .uleb128 0x11
++ .long 0x7ffc
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x6108
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7fdd
++ .uleb128 0x11
++ .long 0x801c
++ .byte 0x1
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x6108
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8002
++ .uleb128 0x11
++ .long 0x803c
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8022
++ .uleb128 0x11
++ .long 0x8057
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8042
++ .uleb128 0x11
++ .long 0x8077
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x7f2
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x805d
++ .uleb128 0x11
++ .long 0x8097
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x807d
++ .uleb128 0x11
++ .long 0x80bc
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x19f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x809d
++ .uleb128 0x11
++ .long 0x80e1
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x80c2
++ .uleb128 0x11
++ .long 0x8101
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x80e7
++ .uleb128 0x11
++ .long 0x811c
++ .byte 0x1
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x6108
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8107
++ .uleb128 0x5
++ .long 0x8138
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x6108
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8122
++ .uleb128 0x5
++ .long 0x814a
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x3381
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x813e
++ .uleb128 0x11
++ .long 0x816a
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x6108
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8150
++ .uleb128 0x11
++ .long 0x8185
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x6aef
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8170
++ .uleb128 0x11
++ .long 0x81a5
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28f8
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x81a5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x460e
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x818b
++ .uleb128 0x11
++ .long 0x81d5
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x81d5
++ .uleb128 0x6
++ .long 0x1fe
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x81db
++ .uleb128 0x31
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x81b1
++ .uleb128 0x11
++ .long 0x8201
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x81e2
++ .uleb128 0x11
++ .long 0x8221
++ .byte 0x1
++ .long 0x209
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x1fe
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8207
++ .uleb128 0x11
++ .long 0x823c
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x7f2
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8227
++ .uleb128 0x5
++ .long 0x8258
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x1f3
++ .uleb128 0x6
++ .long 0x1f3
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8242
++ .uleb128 0x11
++ .long 0x826e
++ .byte 0x1
++ .long 0x3381
++ .uleb128 0x6
++ .long 0x60d1
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x825e
++ .uleb128 0x5
++ .long 0x8280
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x60d1
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8274
++ .uleb128 0x11
++ .long 0x829b
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x829b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x82a1
++ .uleb128 0x21
++ .long .LASF1495
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8286
++ .uleb128 0x11
++ .long 0x82c7
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x4413
++ .uleb128 0x6
++ .long 0xb5
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x82ad
++ .uleb128 0x5
++ .long 0x82de
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x28f8
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x82cd
++ .uleb128 0x11
++ .long 0x82f9
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x82f9
++ .uleb128 0x6
++ .long 0x28f8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x82ff
++ .uleb128 0x21
++ .long .LASF1496
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x82e4
++ .uleb128 0x11
++ .long 0x8334
++ .byte 0x1
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x8334
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x833a
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x141
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8340
++ .uleb128 0x11
++ .long 0x8355
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x830b
++ .uleb128 0x11
++ .long 0x837a
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x8334
++ .uleb128 0x6
++ .long 0x4413
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x835b
++ .uleb128 0x11
++ .long 0x839a
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0xb5
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8380
++ .uleb128 0x11
++ .long 0x83b0
++ .byte 0x1
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x28ec
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x83a0
++ .uleb128 0x11
++ .long 0x83cb
++ .byte 0x1
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x83b6
++ .uleb128 0x11
++ .long 0x83f5
++ .byte 0x1
++ .long 0x28ec
++ .uleb128 0x6
++ .long 0x60d1
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x833a
++ .uleb128 0x6
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x83d1
++ .uleb128 0x11
++ .long 0x841f
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x7a6e
++ .uleb128 0x6
++ .long 0x21
++ .uleb128 0x6
++ .long 0x7f2
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x28f8
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x83fb
++ .uleb128 0x7
++ .long .LASF889
++ .byte 0x3e
++ .byte 0x19
++ .long 0x8430
++ .uleb128 0x11
++ .long 0x8445
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8425
++ .uleb128 0x5
++ .long 0x845c
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x4521
++ .uleb128 0x6
++ .long 0x2d82
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x844b
++ .uleb128 0x9
++ .long 0x8487
++ .byte 0x4
++ .byte 0x19
++ .byte 0x1b
++ .uleb128 0xa
++ .long .LASF1497
++ .byte 0x19
++ .byte 0x1c
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1498
++ .byte 0x19
++ .byte 0x1d
++ .long 0x53
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x2
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x84a0
++ .byte 0x4
++ .byte 0x19
++ .byte 0x16
++ .uleb128 0xe
++ .long .LASF1499
++ .byte 0x19
++ .byte 0x17
++ .long 0x16c4
++ .uleb128 0xd
++ .long 0x8462
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x84c5
++ .byte 0x8
++ .byte 0x19
++ .byte 0x21
++ .uleb128 0xa
++ .long .LASF315
++ .byte 0x19
++ .byte 0x22
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1500
++ .byte 0x19
++ .byte 0x29
++ .long 0x6e96
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x84ea
++ .byte 0x8
++ .byte 0x19
++ .byte 0x34
++ .uleb128 0xa
++ .long .LASF1501
++ .byte 0x19
++ .byte 0x35
++ .long 0x84ea
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1502
++ .byte 0x19
++ .byte 0x36
++ .long 0x2ea2
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x160b
++ .uleb128 0x9
++ .long 0x8507
++ .byte 0x4
++ .byte 0x19
++ .byte 0x38
++ .uleb128 0xa
++ .long .LASF1503
++ .byte 0x19
++ .byte 0x39
++ .long 0x2d82
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x852a
++ .byte 0x8
++ .byte 0x19
++ .byte 0x20
++ .uleb128 0xd
++ .long 0x84a0
++ .uleb128 0x26
++ .string "ptl"
++ .byte 0x19
++ .byte 0x32
++ .long 0x1680
++ .uleb128 0xd
++ .long 0x84c5
++ .uleb128 0xd
++ .long 0x84f0
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x8549
++ .byte 0x4
++ .byte 0x19
++ .byte 0x3c
++ .uleb128 0xe
++ .long .LASF746
++ .byte 0x19
++ .byte 0x3d
++ .long 0x2f
++ .uleb128 0xe
++ .long .LASF1504
++ .byte 0x19
++ .byte 0x3e
++ .long 0x160b
++ .byte 0x0
++ .uleb128 0x9
++ .long 0x857c
++ .byte 0x10
++ .byte 0x15
++ .byte 0x51
++ .uleb128 0xa
++ .long .LASF509
++ .byte 0x15
++ .byte 0x52
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF205
++ .byte 0x15
++ .byte 0x53
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF600
++ .byte 0x15
++ .byte 0x54
++ .long 0x3f9c
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0xc
++ .long 0x859b
++ .byte 0x10
++ .byte 0x15
++ .byte 0x50
++ .uleb128 0xe
++ .long .LASF1505
++ .byte 0x15
++ .byte 0x55
++ .long 0x8549
++ .uleb128 0xe
++ .long .LASF1100
++ .byte 0x15
++ .byte 0x57
++ .long 0x5ced
++ .byte 0x0
++ .uleb128 0x21
++ .long .LASF833
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x859b
++ .uleb128 0xf
++ .long 0x8608
++ .long .LASF1506
++ .byte 0x18
++ .byte 0x15
++ .byte 0x64
++ .uleb128 0xa
++ .long .LASF1160
++ .byte 0x15
++ .byte 0xca
++ .long 0x861a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1507
++ .byte 0x15
++ .byte 0xcb
++ .long 0x861a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0xa
++ .long .LASF1508
++ .byte 0x15
++ .byte 0xcc
++ .long 0x863a
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF1509
++ .byte 0x15
++ .byte 0xcd
++ .long 0x8655
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1510
++ .byte 0x15
++ .byte 0xce
++ .long 0x8684
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF1511
++ .byte 0x15
++ .byte 0xd2
++ .long 0x869f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x85a7
++ .uleb128 0x5
++ .long 0x861a
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x3f9c
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x860e
++ .uleb128 0x11
++ .long 0x863a
++ .byte 0x1
++ .long 0x2d82
++ .uleb128 0x6
++ .long 0x3f9c
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x4413
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8620
++ .uleb128 0x11
++ .long 0x8655
++ .byte 0x1
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x3f9c
++ .uleb128 0x6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8640
++ .uleb128 0x11
++ .long 0x8684
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3f9c
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x36e
++ .uleb128 0x6
++ .long 0x2f
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x865b
++ .uleb128 0x11
++ .long 0x869f
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x3f9c
++ .uleb128 0x6
++ .long 0x2d82
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x868a
++ .uleb128 0xf
++ .long 0x86c0
++ .long .LASF1512
++ .byte 0x7c
++ .byte 0x6e
++ .byte 0x36
++ .uleb128 0xa
++ .long .LASF367
++ .byte 0x6e
++ .byte 0x37
++ .long 0x86c0
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x86d0
++ .long 0x2f
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1e
++ .byte 0x0
++ .uleb128 0xf
++ .long 0x8723
++ .long .LASF367
++ .byte 0x18
++ .byte 0x57
++ .byte 0x1d
++ .uleb128 0xa
++ .long .LASF1360
++ .byte 0x57
++ .byte 0x1e
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0xa
++ .long .LASF1513
++ .byte 0x57
++ .byte 0x1f
++ .long 0x160b
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0xa
++ .long .LASF322
++ .byte 0x57
++ .byte 0x20
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .uleb128 0xa
++ .long .LASF1514
++ .byte 0x57
++ .byte 0x21
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x10
++ .uleb128 0xa
++ .long .LASF161
++ .byte 0x57
++ .byte 0x22
++ .long 0x15f9
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x14
++ .byte 0x0
++ .uleb128 0x1a
++ .long 0x873f
++ .long .LASF1515
++ .value 0x200
++ .byte 0x1
++ .byte 0x3d
++ .uleb128 0xb
++ .string "vec"
++ .byte 0x1
++ .byte 0x3e
++ .long 0x873f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x874f
++ .long 0x17bc
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x3f
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF1516
++ .byte 0x1
++ .byte 0x3f
++ .long 0x8723
++ .uleb128 0x1a
++ .long 0x8776
++ .long .LASF1517
++ .value 0x800
++ .byte 0x1
++ .byte 0x41
++ .uleb128 0xb
++ .string "vec"
++ .byte 0x1
++ .byte 0x42
++ .long 0x8776
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .long 0x8786
++ .long 0x17bc
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xff
++ .byte 0x0
++ .uleb128 0x7
++ .long .LASF1518
++ .byte 0x1
++ .byte 0x43
++ .long 0x875a
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x3728
++ .uleb128 0x7
++ .long .LASF1519
++ .byte 0x1
++ .byte 0x50
++ .long 0x378d
++ .uleb128 0x15
++ .long 0x87eb
++ .long .LASF1520
++ .byte 0x10
++ .byte 0x1
++ .value 0x3a6
++ .uleb128 0x17
++ .string "pc"
++ .byte 0x1
++ .value 0x3a7
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x0
++ .uleb128 0x16
++ .long .LASF1521
++ .byte 0x1
++ .value 0x3a8
++ .long 0x2f
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x4
++ .uleb128 0x16
++ .long .LASF322
++ .byte 0x1
++ .value 0x3a9
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0x8
++ .uleb128 0x16
++ .long .LASF1522
++ .byte 0x1
++ .value 0x3aa
++ .long 0x77
++ .byte 0x2
++ .byte 0x23
++ .uleb128 0xc
++ .byte 0x0
++ .uleb128 0x32
++ .long .LASF1548
++ .byte 0x6
++ .byte 0x23
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x33
++ .long 0x8823
++ .long .LASF1523
++ .byte 0x2
++ .byte 0x2e
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x34
++ .string "new"
++ .byte 0x2
++ .byte 0x2b
++ .long 0x17e5
++ .uleb128 0x35
++ .long .LASF308
++ .byte 0x2
++ .byte 0x2c
++ .long 0x17e5
++ .uleb128 0x35
++ .long .LASF307
++ .byte 0x2
++ .byte 0x2d
++ .long 0x17e5
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8847
++ .long .LASF1524
++ .byte 0x2
++ .byte 0xd9
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x34
++ .string "old"
++ .byte 0x2
++ .byte 0xd7
++ .long 0x17e5
++ .uleb128 0x34
++ .string "new"
++ .byte 0x2
++ .byte 0xd8
++ .long 0x17e5
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8860
++ .long .LASF1525
++ .byte 0x2
++ .byte 0x1f
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF509
++ .byte 0x2
++ .byte 0x1e
++ .long 0x17e5
++ .byte 0x0
++ .uleb128 0x36
++ .long 0x887d
++ .long .LASF1526
++ .byte 0x1
++ .byte 0x5f
++ .byte 0x1
++ .long 0x77
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF735
++ .byte 0x1
++ .byte 0x5e
++ .long 0x887d
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8797
++ .uleb128 0x33
++ .long 0x88a8
++ .long .LASF1527
++ .byte 0x2
++ .byte 0x55
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x34
++ .string "new"
++ .byte 0x2
++ .byte 0x54
++ .long 0x17e5
++ .uleb128 0x35
++ .long .LASF600
++ .byte 0x2
++ .byte 0x54
++ .long 0x17e5
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x88cc
++ .long .LASF1528
++ .byte 0x2
++ .byte 0x9c
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF308
++ .byte 0x2
++ .byte 0x9b
++ .long 0x17e5
++ .uleb128 0x35
++ .long .LASF307
++ .byte 0x2
++ .byte 0x9b
++ .long 0x17e5
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x88f2
++ .long .LASF1529
++ .byte 0x2
++ .byte 0xe2
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x34
++ .string "old"
++ .byte 0x2
++ .byte 0xe0
++ .long 0x17e5
++ .uleb128 0x34
++ .string "new"
++ .byte 0x2
++ .byte 0xe1
++ .long 0x17e5
++ .uleb128 0x37
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x36
++ .long 0x890f
++ .long .LASF1530
++ .byte 0x1
++ .byte 0x64
++ .byte 0x1
++ .long 0x887d
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF735
++ .byte 0x1
++ .byte 0x63
++ .long 0x887d
++ .byte 0x0
++ .uleb128 0x36
++ .long 0x8936
++ .long .LASF1531
++ .byte 0xc
++ .byte 0xf5
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x34
++ .string "nr"
++ .byte 0xc
++ .byte 0xf4
++ .long 0x21
++ .uleb128 0x35
++ .long .LASF1532
++ .byte 0xc
++ .byte 0xf4
++ .long 0x8936
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x893c
++ .uleb128 0x14
++ .long 0x8941
++ .uleb128 0x2d
++ .long 0x2f
++ .uleb128 0x36
++ .long 0x8978
++ .long .LASF1533
++ .byte 0xc
++ .byte 0xfa
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x34
++ .string "nr"
++ .byte 0xc
++ .byte 0xf9
++ .long 0x21
++ .uleb128 0x35
++ .long .LASF1532
++ .byte 0xc
++ .byte 0xf9
++ .long 0x8936
++ .uleb128 0x38
++ .long .LASF1538
++ .byte 0xc
++ .byte 0xfb
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x36
++ .long 0x89a9
++ .long .LASF1534
++ .byte 0xf
++ .byte 0x40
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x34
++ .string "ti"
++ .byte 0xf
++ .byte 0x3f
++ .long 0x2dd9
++ .uleb128 0x35
++ .long .LASF1535
++ .byte 0xf
++ .byte 0x3f
++ .long 0x21
++ .uleb128 0x37
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x896c
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3b
++ .long 0x89e0
++ .long .LASF1536
++ .byte 0xb
++ .value 0x620
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x3c
++ .string "tsk"
++ .byte 0xb
++ .value 0x61f
++ .long 0x15f9
++ .uleb128 0x3d
++ .long .LASF1535
++ .byte 0xb
++ .value 0x61f
++ .long 0x21
++ .uleb128 0x39
++ .uleb128 0x37
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x896c
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x36
++ .long 0x8a1a
++ .long .LASF1537
++ .byte 0x3
++ .byte 0x1d
++ .byte 0x1
++ .long 0x160b
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF328
++ .byte 0x3
++ .byte 0x1c
++ .long 0x1fe
++ .uleb128 0x35
++ .long .LASF53
++ .byte 0x3
++ .byte 0x1c
++ .long 0x240
++ .uleb128 0x3e
++ .long .LASF1596
++ .byte 0x3
++ .byte 0x2b
++ .uleb128 0x39
++ .uleb128 0x3f
++ .string "i"
++ .byte 0x3
++ .byte 0x1f
++ .long 0x21
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x36
++ .long 0x8a5d
++ .long .LASF1539
++ .byte 0x54
++ .byte 0xc3
++ .byte 0x1
++ .long 0x160b
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF328
++ .byte 0x54
++ .byte 0xc2
++ .long 0x1fe
++ .uleb128 0x35
++ .long .LASF53
++ .byte 0x54
++ .byte 0xc2
++ .long 0x240
++ .uleb128 0x35
++ .long .LASF400
++ .byte 0x54
++ .byte 0xc2
++ .long 0x21
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x40
++ .long 0x8a07
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x8a0f
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3b
++ .long 0x8aaa
++ .long .LASF1540
++ .byte 0x4
++ .value 0x1a3
++ .byte 0x1
++ .long 0x160b
++ .byte 0x3
++ .uleb128 0x3c
++ .string "s"
++ .byte 0x4
++ .value 0x1a2
++ .long 0x160b
++ .uleb128 0x3d
++ .long .LASF1541
++ .byte 0x4
++ .value 0x1a2
++ .long 0x2f
++ .uleb128 0x3d
++ .long .LASF322
++ .byte 0x4
++ .value 0x1a2
++ .long 0x1fe
++ .uleb128 0x39
++ .uleb128 0x41
++ .string "d0"
++ .byte 0x4
++ .value 0x1bd
++ .long 0x21
++ .uleb128 0x41
++ .string "d1"
++ .byte 0x4
++ .value 0x1bd
++ .long 0x21
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8ac3
++ .long .LASF1542
++ .byte 0xa
++ .byte 0x7b
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0xa
++ .byte 0x7a
++ .long 0x8791
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8ae7
++ .long .LASF1543
++ .byte 0x1
++ .byte 0xfb
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF735
++ .byte 0x1
++ .byte 0xf9
++ .long 0x887d
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0x1
++ .byte 0xfa
++ .long 0x8791
++ .byte 0x0
++ .uleb128 0x42
++ .long 0x8b1b
++ .long .LASF1544
++ .byte 0x1
++ .value 0x153
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x3d
++ .long .LASF760
++ .byte 0x1
++ .value 0x151
++ .long 0x8791
++ .uleb128 0x3d
++ .long .LASF1545
++ .byte 0x1
++ .value 0x152
++ .long 0x21
++ .uleb128 0x43
++ .long .LASF376
++ .byte 0x1
++ .value 0x154
++ .long 0x17e5
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8b34
++ .long .LASF1546
++ .byte 0x5
++ .byte 0x6b
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF285
++ .byte 0x5
++ .byte 0x6a
++ .long 0x8b34
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x163c
++ .uleb128 0x33
++ .long 0x8b49
++ .long .LASF1547
++ .byte 0x6
++ .byte 0x47
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x44
++ .long .LASF1549
++ .byte 0x10
++ .byte 0x5c
++ .byte 0x1
++ .long 0x2dd9
++ .byte 0x3
++ .uleb128 0x3b
++ .long 0x8b75
++ .long .LASF1550
++ .byte 0x2
++ .value 0x12b
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x3d
++ .long .LASF600
++ .byte 0x2
++ .value 0x12a
++ .long 0x8b75
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8b7b
++ .uleb128 0x14
++ .long 0x17bc
++ .uleb128 0x36
++ .long 0x8ba7
++ .long .LASF1551
++ .byte 0x7
++ .byte 0x57
++ .byte 0x1
++ .long 0x77
++ .byte 0x3
++ .uleb128 0x34
++ .string "sl"
++ .byte 0x7
++ .byte 0x56
++ .long 0x8ba7
++ .uleb128 0x3f
++ .string "ret"
++ .byte 0x7
++ .byte 0x58
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8bad
++ .uleb128 0x14
++ .long 0x170a
++ .uleb128 0x36
++ .long 0x8bd8
++ .long .LASF1552
++ .byte 0x7
++ .byte 0x66
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x34
++ .string "sl"
++ .byte 0x7
++ .byte 0x65
++ .long 0x8ba7
++ .uleb128 0x34
++ .string "iv"
++ .byte 0x7
++ .byte 0x65
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x42
++ .long 0x8c11
++ .long .LASF1553
++ .byte 0x1
++ .value 0x354
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x3d
++ .long .LASF1554
++ .byte 0x1
++ .value 0x353
++ .long 0x2f
++ .uleb128 0x43
++ .long .LASF1555
++ .byte 0x1
++ .value 0x355
++ .long 0x2f
++ .uleb128 0x45
++ .long .LASF322
++ .byte 0x1
++ .value 0x356
++ .long 0x21
++ .byte 0x5
++ .byte 0x3
++ .long count.18791
++ .byte 0x0
++ .uleb128 0x46
++ .long .LASF1556
++ .byte 0x8
++ .value 0x1f0
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x36
++ .long 0x8c38
++ .long .LASF1557
++ .byte 0xa
++ .byte 0x3e
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0xa
++ .byte 0x3d
++ .long 0x8c38
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x8c3e
++ .uleb128 0x14
++ .long 0x3728
++ .uleb128 0x33
++ .long 0x8c5c
++ .long .LASF1558
++ .byte 0xa
++ .byte 0x7f
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0xa
++ .byte 0x7e
++ .long 0x8791
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8c81
++ .long .LASF1559
++ .byte 0x1
++ .byte 0x70
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0x1
++ .byte 0x6f
++ .long 0x8791
++ .uleb128 0x35
++ .long .LASF1560
++ .byte 0x1
++ .byte 0x6f
++ .long 0x887d
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x36
++ .long 0x8ca0
++ .long .LASF1561
++ .byte 0x9
++ .byte 0xb
++ .byte 0x1
++ .long 0x15f9
++ .byte 0x3
++ .uleb128 0x39
++ .uleb128 0x38
++ .long .LASF1562
++ .byte 0x9
++ .byte 0xc
++ .long 0x15f9
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8ccf
++ .long .LASF1563
++ .byte 0xa
++ .byte 0x2d
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0xa
++ .byte 0x2a
++ .long 0x8791
++ .uleb128 0x35
++ .long .LASF733
++ .byte 0xa
++ .byte 0x2b
++ .long 0x3787
++ .uleb128 0x35
++ .long .LASF734
++ .byte 0xa
++ .byte 0x2c
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x47
++ .long 0x8d1f
++ .byte 0x1
++ .long .LASF1565
++ .byte 0x1
++ .byte 0x8a
++ .byte 0x1
++ .long 0x2f
++ .long .LFB883
++ .long .LFE883
++ .long .LLST0
++ .uleb128 0x48
++ .string "j"
++ .byte 0x1
++ .byte 0x89
++ .long 0x2f
++ .long .LLST1
++ .uleb128 0x48
++ .string "cpu"
++ .byte 0x1
++ .byte 0x89
++ .long 0x21
++ .long .LLST2
++ .uleb128 0x3f
++ .string "rem"
++ .byte 0x1
++ .byte 0x8b
++ .long 0x21
++ .uleb128 0x38
++ .long .LASF1564
++ .byte 0x1
++ .byte 0x8c
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x49
++ .long 0x8d58
++ .byte 0x1
++ .long .LASF1566
++ .byte 0x1
++ .byte 0xc3
++ .byte 0x1
++ .long 0x2f
++ .long .LFB884
++ .long .LFE884
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x48
++ .string "j"
++ .byte 0x1
++ .byte 0xc2
++ .long 0x2f
++ .long .LLST4
++ .uleb128 0x48
++ .string "cpu"
++ .byte 0x1
++ .byte 0xc2
++ .long 0x21
++ .long .LLST5
++ .byte 0x0
++ .uleb128 0x49
++ .long 0x8d9b
++ .byte 0x1
++ .long .LASF1567
++ .byte 0x1
++ .byte 0xde
++ .byte 0x1
++ .long 0x2f
++ .long .LFB885
++ .long .LFE885
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x48
++ .string "j"
++ .byte 0x1
++ .byte 0xdd
++ .long 0x2f
++ .long .LLST7
++ .uleb128 0x4a
++ .long .LBB179
++ .long .LBE179
++ .uleb128 0x4b
++ .long .LASF1562
++ .byte 0x1
++ .byte 0xdf
++ .long 0x21
++ .long .LLST8
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x49
++ .long 0x8dde
++ .byte 0x1
++ .long .LASF1568
++ .byte 0x1
++ .byte 0xf3
++ .byte 0x1
++ .long 0x2f
++ .long .LFB886
++ .long .LFE886
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x48
++ .string "j"
++ .byte 0x1
++ .byte 0xf2
++ .long 0x2f
++ .long .LLST10
++ .uleb128 0x4a
++ .long .LBB180
++ .long .LBE180
++ .uleb128 0x4b
++ .long .LASF1562
++ .byte 0x1
++ .byte 0xf4
++ .long 0x21
++ .long .LLST11
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4c
++ .long 0x8eda
++ .long .LASF1569
++ .byte 0x1
++ .value 0x102
++ .byte 0x1
++ .long .LFB888
++ .long .LFE888
++ .long .LLST12
++ .uleb128 0x4d
++ .long .LASF735
++ .byte 0x1
++ .value 0x101
++ .long 0x887d
++ .long .LLST13
++ .uleb128 0x4e
++ .long .LASF760
++ .byte 0x1
++ .value 0x101
++ .long 0x8791
++ .byte 0x1
++ .byte 0x52
++ .uleb128 0x4f
++ .long .LASF732
++ .byte 0x1
++ .value 0x103
++ .long 0x2f
++ .long .LLST14
++ .uleb128 0x50
++ .string "idx"
++ .byte 0x1
++ .value 0x104
++ .long 0x2f
++ .long .LLST15
++ .uleb128 0x50
++ .string "vec"
++ .byte 0x1
++ .value 0x105
++ .long 0x17e5
++ .long .LLST16
++ .uleb128 0x51
++ .long 0x8e5d
++ .long .LBB181
++ .long .LBE181
++ .uleb128 0x41
++ .string "i"
++ .byte 0x1
++ .value 0x10b
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x8e75
++ .long .LBB182
++ .long .LBE182
++ .uleb128 0x41
++ .string "i"
++ .byte 0x1
++ .value 0x10e
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x8e8d
++ .long .LBB183
++ .long .LBE183
++ .uleb128 0x41
++ .string "i"
++ .byte 0x1
++ .value 0x111
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x8ea5
++ .long .LBB184
++ .long .LBE184
++ .uleb128 0x41
++ .string "i"
++ .byte 0x1
++ .value 0x11a
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x52
++ .long 0x8883
++ .long .Ldebug_ranges0+0x0
++ .byte 0x1
++ .value 0x128
++ .uleb128 0x53
++ .long 0x889b
++ .uleb128 0x53
++ .long 0x8890
++ .uleb128 0x54
++ .long 0x87f4
++ .long .Ldebug_ranges0+0x18
++ .byte 0x2
++ .byte 0x56
++ .uleb128 0x53
++ .long 0x8817
++ .uleb128 0x55
++ .long 0x880c
++ .byte 0x1
++ .byte 0x50
++ .uleb128 0x53
++ .long 0x8801
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x56
++ .long 0x8f30
++ .byte 0x1
++ .long .LASF1570
++ .byte 0x1
++ .value 0x13f
++ .byte 0x1
++ .long .LFB889
++ .long .LFE889
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4e
++ .long .LASF760
++ .byte 0x1
++ .value 0x13e
++ .long 0x8791
++ .byte 0x1
++ .byte 0x50
++ .uleb128 0x4a
++ .long .LBB193
++ .long .LBE193
++ .uleb128 0x43
++ .long .LASF1571
++ .byte 0x1
++ .value 0x141
++ .long 0x2f
++ .uleb128 0x4a
++ .long .LBB194
++ .long .LBE194
++ .uleb128 0x45
++ .long .LASF1562
++ .byte 0x1
++ .value 0x141
++ .long 0x2f
++ .byte 0x1
++ .byte 0x51
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x33
++ .long 0x8f49
++ .long .LASF1572
++ .byte 0x1
++ .byte 0x69
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0x1
++ .byte 0x68
++ .long 0x8791
++ .byte 0x0
++ .uleb128 0x57
++ .long 0x8f8a
++ .byte 0x1
++ .long .LASF1573
++ .byte 0x1
++ .value 0x14b
++ .byte 0x1
++ .long .LFB890
++ .long .LFE890
++ .long .LLST18
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x14a
++ .long 0x8791
++ .long .LLST19
++ .uleb128 0x58
++ .long 0x8f30
++ .long .LBB197
++ .long .LBE197
++ .byte 0x1
++ .value 0x14d
++ .uleb128 0x53
++ .long 0x8f3d
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x59
++ .long 0x909a
++ .long .LASF1574
++ .byte 0x1
++ .value 0x245
++ .byte 0x1
++ .long 0x21
++ .long .LFB899
++ .long .LFE899
++ .long .LLST20
++ .uleb128 0x4d
++ .long .LASF735
++ .byte 0x1
++ .value 0x244
++ .long 0x887d
++ .long .LLST21
++ .uleb128 0x5a
++ .string "tv"
++ .byte 0x1
++ .value 0x244
++ .long 0x909a
++ .long .LLST22
++ .uleb128 0x4d
++ .long .LASF746
++ .byte 0x1
++ .value 0x244
++ .long 0x21
++ .long .LLST23
++ .uleb128 0x4f
++ .long .LASF760
++ .byte 0x1
++ .value 0x247
++ .long 0x8791
++ .long .LLST24
++ .uleb128 0x50
++ .string "tmp"
++ .byte 0x1
++ .value 0x247
++ .long 0x8791
++ .long .LLST25
++ .uleb128 0x45
++ .long .LASF1575
++ .byte 0x1
++ .value 0x248
++ .long 0x17bc
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -24
++ .uleb128 0x5b
++ .long 0x904f
++ .long 0x88cc
++ .long .Ldebug_ranges0+0x30
++ .byte 0x1
++ .value 0x24a
++ .uleb128 0x53
++ .long 0x88e4
++ .uleb128 0x53
++ .long 0x88d9
++ .uleb128 0x5c
++ .long 0x9039
++ .long 0x8823
++ .long .Ldebug_ranges0+0x48
++ .byte 0x2
++ .byte 0xe3
++ .uleb128 0x53
++ .long 0x883b
++ .uleb128 0x53
++ .long 0x8830
++ .byte 0x0
++ .uleb128 0x54
++ .long 0x8847
++ .long .Ldebug_ranges0+0x60
++ .byte 0x2
++ .byte 0xe4
++ .uleb128 0x5d
++ .long 0x8854
++ .long .LLST26
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x9069
++ .long .LBB207
++ .long .LBE207
++ .uleb128 0x43
++ .long .LASF1576
++ .byte 0x1
++ .value 0x250
++ .long 0x8b75
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x9083
++ .long .LBB212
++ .long .LBE212
++ .uleb128 0x43
++ .long .LASF1576
++ .byte 0x1
++ .value 0x250
++ .long 0x8b75
++ .byte 0x0
++ .uleb128 0x4a
++ .long .LBB213
++ .long .LBE213
++ .uleb128 0x43
++ .long .LASF1576
++ .byte 0x1
++ .value 0x250
++ .long 0x8b75
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x874f
++ .uleb128 0x3b
++ .long 0x916f
++ .long .LASF1577
++ .byte 0x1
++ .value 0x4d1
++ .byte 0x1
++ .long 0x21
++ .byte 0x1
++ .uleb128 0x3c
++ .string "cpu"
++ .byte 0x1
++ .value 0x4d0
++ .long 0x21
++ .uleb128 0x41
++ .string "j"
++ .byte 0x1
++ .value 0x4d2
++ .long 0x21
++ .uleb128 0x43
++ .long .LASF735
++ .byte 0x1
++ .value 0x4d3
++ .long 0x887d
++ .uleb128 0x5e
++ .long 0x9123
++ .uleb128 0x5e
++ .long 0x90eb
++ .uleb128 0x43
++ .long .LASF1578
++ .byte 0x1
++ .value 0x4e4
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x5e
++ .long 0x90fd
++ .uleb128 0x43
++ .long .LASF1571
++ .byte 0x1
++ .value 0x4e9
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x5e
++ .long 0x9113
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x40
++ .long 0x8a07
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x8a0f
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x37
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x8a92
++ .uleb128 0x3a
++ .long 0x8a9d
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5e
++ .long 0x9135
++ .uleb128 0x43
++ .long .LASF1571
++ .byte 0x1
++ .value 0x4f6
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x37
++ .uleb128 0x37
++ .uleb128 0x37
++ .uleb128 0x37
++ .uleb128 0x37
++ .uleb128 0x45
++ .long .LASF1579
++ .byte 0x1
++ .value 0x4d7
++ .long 0xbb
++ .byte 0x5
++ .byte 0x3
++ .long boot_done.19029
++ .uleb128 0x5f
++ .long .LASF1580
++ .long 0xa316
++ .byte 0x1
++ .byte 0x5
++ .byte 0x3
++ .long __func__.19031
++ .uleb128 0x45
++ .long .LASF1581
++ .byte 0x1
++ .value 0x4d4
++ .long 0x46d1
++ .byte 0x5
++ .byte 0x3
++ .long tvec_base_done.19028
++ .byte 0x0
++ .uleb128 0x59
++ .long 0x933c
++ .long .LASF1582
++ .byte 0x1
++ .value 0x538
++ .byte 0x1
++ .long 0x21
++ .long .LFB923
++ .long .LFE923
++ .long .LLST27
++ .uleb128 0x4d
++ .long .LASF1583
++ .byte 0x1
++ .value 0x536
++ .long 0x2e30
++ .long .LLST28
++ .uleb128 0x4d
++ .long .LASF840
++ .byte 0x1
++ .value 0x537
++ .long 0x2f
++ .long .LLST29
++ .uleb128 0x4d
++ .long .LASF1584
++ .byte 0x1
++ .value 0x537
++ .long 0x160b
++ .long .LLST30
++ .uleb128 0x41
++ .string "cpu"
++ .byte 0x1
++ .value 0x539
++ .long 0x5a
++ .uleb128 0x58
++ .long 0x90a0
++ .long .LBB240
++ .long .LBE240
++ .byte 0x1
++ .value 0x53d
++ .uleb128 0x53
++ .long 0x90b2
++ .uleb128 0x60
++ .long 0x9286
++ .long .Ldebug_ranges0+0x78
++ .uleb128 0x61
++ .long 0x90be
++ .long .LLST31
++ .uleb128 0x61
++ .long 0x90c8
++ .long .LLST32
++ .uleb128 0x62
++ .long 0x9236
++ .long 0x8a1a
++ .long .LBB243
++ .long .LBE243
++ .byte 0x1
++ .value 0x4dd
++ .uleb128 0x53
++ .long 0x8a41
++ .uleb128 0x53
++ .long 0x8a36
++ .uleb128 0x53
++ .long 0x8a2b
++ .uleb128 0x63
++ .long 0x89e0
++ .long .LBB245
++ .long .LBE245
++ .byte 0x54
++ .byte 0xc4
++ .uleb128 0x53
++ .long 0x89fc
++ .uleb128 0x53
++ .long 0x89f1
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x9249
++ .long .LBB246
++ .long .LBE246
++ .uleb128 0x3a
++ .long 0x90de
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8a5d
++ .long .LBB247
++ .long .LBE247
++ .byte 0x1
++ .value 0x4e8
++ .uleb128 0x53
++ .long 0x8a85
++ .uleb128 0x53
++ .long 0x8a79
++ .uleb128 0x53
++ .long 0x8a6f
++ .uleb128 0x4a
++ .long .LBB249
++ .long .LBE249
++ .uleb128 0x61
++ .long 0x8a92
++ .long .LLST33
++ .uleb128 0x61
++ .long 0x8a9d
++ .long .LLST34
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x9299
++ .long .LBB252
++ .long .LBE252
++ .uleb128 0x3a
++ .long 0x90f0
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x92ac
++ .long .LBB256
++ .long .LBE256
++ .uleb128 0x3a
++ .long 0x9128
++ .byte 0x0
++ .uleb128 0x5b
++ .long 0x92c6
++ .long 0x8847
++ .long .Ldebug_ranges0+0xa8
++ .byte 0x1
++ .value 0x4fd
++ .uleb128 0x5d
++ .long 0x8854
++ .long .LLST35
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x92e4
++ .long 0x8847
++ .long .LBB262
++ .long .LBE262
++ .byte 0x1
++ .value 0x4fe
++ .uleb128 0x5d
++ .long 0x8854
++ .long .LLST36
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x9302
++ .long 0x8847
++ .long .LBB264
++ .long .LBE264
++ .byte 0x1
++ .value 0x4ff
++ .uleb128 0x5d
++ .long 0x8854
++ .long .LLST37
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x9320
++ .long 0x8847
++ .long .LBB266
++ .long .LBE266
++ .byte 0x1
++ .value 0x500
++ .uleb128 0x5d
++ .long 0x8854
++ .long .LLST38
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8847
++ .long .LBB268
++ .long .LBE268
++ .byte 0x1
++ .value 0x503
++ .uleb128 0x5d
++ .long 0x8854
++ .long .LLST39
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x32
++ .long .LASF1585
++ .byte 0xa
++ .byte 0x77
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x56
++ .long 0x9389
++ .byte 0x1
++ .long .LASF1586
++ .byte 0x1
++ .value 0x552
++ .byte 0x1
++ .long .LFB924
++ .long .LFE924
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x50
++ .string "err"
++ .byte 0x1
++ .value 0x553
++ .long 0x21
++ .long .LLST41
++ .uleb128 0x4a
++ .long .LBB273
++ .long .LBE273
++ .uleb128 0x4f
++ .long .LASF1562
++ .byte 0x1
++ .value 0x554
++ .long 0x21
++ .long .LLST42
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x42
++ .long 0x947c
++ .long .LASF1587
++ .byte 0x1
++ .value 0x262
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x3d
++ .long .LASF735
++ .byte 0x1
++ .value 0x261
++ .long 0x887d
++ .uleb128 0x43
++ .long .LASF760
++ .byte 0x1
++ .value 0x263
++ .long 0x8791
++ .uleb128 0x5e
++ .long 0x943a
++ .uleb128 0x43
++ .long .LASF1588
++ .byte 0x1
++ .value 0x267
++ .long 0x17bc
++ .uleb128 0x43
++ .long .LASF600
++ .byte 0x1
++ .value 0x268
++ .long 0x17e5
++ .uleb128 0x43
++ .long .LASF746
++ .byte 0x1
++ .value 0x269
++ .long 0x21
++ .uleb128 0x5e
++ .long 0x9430
++ .uleb128 0x41
++ .string "fn"
++ .byte 0x1
++ .value 0x276
++ .long 0x3787
++ .uleb128 0x43
++ .long .LASF734
++ .byte 0x1
++ .value 0x277
++ .long 0x2f
++ .uleb128 0x5e
++ .long 0x9406
++ .uleb128 0x43
++ .long .LASF1576
++ .byte 0x1
++ .value 0x279
++ .long 0x8b75
++ .byte 0x0
++ .uleb128 0x5e
++ .long 0x941b
++ .uleb128 0x43
++ .long .LASF163
++ .byte 0x1
++ .value 0x283
++ .long 0x21
++ .uleb128 0x37
++ .uleb128 0x37
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x37
++ .uleb128 0x37
++ .uleb128 0x5e
++ .long 0x942b
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x8b0d
++ .uleb128 0x37
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x37
++ .uleb128 0x39
++ .uleb128 0x37
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5e
++ .long 0x9438
++ .uleb128 0x37
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x37
++ .byte 0x0
++ .uleb128 0x5e
++ .long 0x9458
++ .uleb128 0x43
++ .long .LASF1589
++ .byte 0x1
++ .value 0x266
++ .long 0x2f
++ .uleb128 0x43
++ .long .LASF1590
++ .byte 0x1
++ .value 0x266
++ .long 0x8941
++ .byte 0x0
++ .uleb128 0x5e
++ .long 0x9476
++ .uleb128 0x43
++ .long .LASF1589
++ .byte 0x1
++ .value 0x266
++ .long 0x2f
++ .uleb128 0x43
++ .long .LASF1590
++ .byte 0x1
++ .value 0x266
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x37
++ .uleb128 0x37
++ .uleb128 0x39
++ .uleb128 0x37
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4c
++ .long 0x96cb
++ .long .LASF1591
++ .byte 0x1
++ .value 0x368
++ .byte 0x1
++ .long .LFB904
++ .long .LFE904
++ .long .LLST43
++ .uleb128 0x5a
++ .string "h"
++ .byte 0x1
++ .value 0x367
++ .long 0x5ce1
++ .long .LLST44
++ .uleb128 0x4f
++ .long .LASF735
++ .byte 0x1
++ .value 0x369
++ .long 0x887d
++ .long .LLST45
++ .uleb128 0x60
++ .long 0x94c9
++ .long .Ldebug_ranges0+0xc0
++ .uleb128 0x43
++ .long .LASF1571
++ .byte 0x1
++ .value 0x369
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x94e7
++ .long .LBB324
++ .long .LBE324
++ .uleb128 0x4f
++ .long .LASF1562
++ .byte 0x1
++ .value 0x369
++ .long 0x2f
++ .long .LLST46
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x9389
++ .long .LBB325
++ .long .LBE325
++ .byte 0x1
++ .value 0x36e
++ .uleb128 0x53
++ .long 0x9397
++ .uleb128 0x60
++ .long 0x9527
++ .long .Ldebug_ranges0+0xd8
++ .uleb128 0x3a
++ .long 0x93a3
++ .uleb128 0x64
++ .long .Ldebug_ranges0+0x100
++ .uleb128 0x65
++ .long 0x93b4
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -24
++ .uleb128 0x3a
++ .long 0x93c0
++ .uleb128 0x61
++ .long 0x93cc
++ .long .LLST47
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5b
++ .long 0x9558
++ .long 0x88cc
++ .long .Ldebug_ranges0+0x128
++ .byte 0x1
++ .value 0x274
++ .uleb128 0x53
++ .long 0x88e4
++ .uleb128 0x53
++ .long 0x88d9
++ .uleb128 0x54
++ .long 0x8823
++ .long .Ldebug_ranges0+0x140
++ .byte 0x2
++ .byte 0xe3
++ .uleb128 0x53
++ .long 0x883b
++ .uleb128 0x53
++ .long 0x8830
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x66
++ .long 0x9575
++ .long 0x8847
++ .long .LBB342
++ .long .LBE342
++ .byte 0x2
++ .byte 0xe4
++ .uleb128 0x5d
++ .long 0x8854
++ .long .LLST48
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x9656
++ .long .LBB344
++ .long .LBE344
++ .uleb128 0x61
++ .long 0x93dd
++ .long .LLST49
++ .uleb128 0x61
++ .long 0x93e8
++ .long .LLST50
++ .uleb128 0x62
++ .long 0x95b3
++ .long 0x8ac3
++ .long .LBB345
++ .long .LBE345
++ .byte 0x1
++ .value 0x27f
++ .uleb128 0x53
++ .long 0x8adb
++ .uleb128 0x53
++ .long 0x8ad0
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x9604
++ .long 0x8ae7
++ .long .LBB347
++ .long .LBE347
++ .byte 0x1
++ .value 0x280
++ .uleb128 0x53
++ .long 0x8b01
++ .uleb128 0x53
++ .long 0x8af5
++ .uleb128 0x4a
++ .long .LBB348
++ .long .LBE348
++ .uleb128 0x3a
++ .long 0x8b0d
++ .uleb128 0x58
++ .long 0x88a8
++ .long .LBB349
++ .long .LBE349
++ .byte 0x1
++ .value 0x156
++ .uleb128 0x5d
++ .long 0x88c0
++ .long .LLST51
++ .uleb128 0x5d
++ .long 0x88b5
++ .long .LLST52
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x961e
++ .long 0x8b1b
++ .long .LBB351
++ .long .LBE351
++ .byte 0x1
++ .value 0x281
++ .uleb128 0x53
++ .long 0x8b28
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x9642
++ .long 0x8b3a
++ .long .LBB353
++ .long .LBE353
++ .byte 0x1
++ .value 0x281
++ .uleb128 0x67
++ .long 0x87eb
++ .long .LBB355
++ .long .LBE355
++ .byte 0x6
++ .byte 0x48
++ .byte 0x0
++ .uleb128 0x4a
++ .long .LBB357
++ .long .LBE357
++ .uleb128 0x61
++ .long 0x940b
++ .long .LLST53
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x9670
++ .long 0x8b56
++ .long .LBB358
++ .long .LBE358
++ .byte 0x1
++ .value 0x275
++ .uleb128 0x53
++ .long 0x8b68
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x968f
++ .long 0x8ac3
++ .long .LBB360
++ .long .LBE360
++ .byte 0x1
++ .value 0x291
++ .uleb128 0x53
++ .long 0x8adb
++ .uleb128 0x53
++ .long 0x8ad0
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x96a9
++ .long 0x8b1b
++ .long .LBB362
++ .long .LBE362
++ .byte 0x1
++ .value 0x292
++ .uleb128 0x53
++ .long 0x8b28
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8b3a
++ .long .LBB364
++ .long .LBE364
++ .byte 0x1
++ .value 0x292
++ .uleb128 0x67
++ .long 0x87eb
++ .long .LBB366
++ .long .LBE366
++ .byte 0x6
++ .byte 0x48
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x68
++ .long 0x97e4
++ .byte 0x1
++ .long .LASF1592
++ .byte 0x1
++ .value 0x46c
++ .byte 0x1
++ .long 0x21
++ .long .LFB920
++ .long .LFE920
++ .long .LLST54
++ .uleb128 0x4d
++ .long .LASF82
++ .byte 0x1
++ .value 0x46b
++ .long 0x97e4
++ .long .LLST55
++ .uleb128 0x4f
++ .long .LASF1593
++ .byte 0x1
++ .value 0x46d
++ .long 0x2f
++ .long .LLST56
++ .uleb128 0x43
++ .long .LASF1594
++ .byte 0x1
++ .value 0x46d
++ .long 0x2f
++ .uleb128 0x4f
++ .long .LASF120
++ .byte 0x1
++ .value 0x46e
++ .long 0x77
++ .long .LLST57
++ .uleb128 0x45
++ .long .LASF1595
++ .byte 0x1
++ .value 0x46e
++ .long 0x77
++ .byte 0x1
++ .byte 0x51
++ .uleb128 0x41
++ .string "seq"
++ .byte 0x1
++ .value 0x46f
++ .long 0x2f
++ .uleb128 0x69
++ .string "out"
++ .byte 0x1
++ .value 0x4b9
++ .long .L91
++ .uleb128 0x5b
++ .long 0x9783
++ .long 0x8a5d
++ .long .Ldebug_ranges0+0x158
++ .byte 0x1
++ .value 0x471
++ .uleb128 0x53
++ .long 0x8a85
++ .uleb128 0x53
++ .long 0x8a79
++ .uleb128 0x53
++ .long 0x8a6f
++ .uleb128 0x64
++ .long .Ldebug_ranges0+0x170
++ .uleb128 0x61
++ .long 0x8a92
++ .long .LLST58
++ .uleb128 0x61
++ .long 0x8a9d
++ .long .LLST59
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x97c8
++ .long .LBB374
++ .long .LBE374
++ .uleb128 0x6a
++ .string "tp"
++ .byte 0x1
++ .value 0x474
++ .long 0x173b
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -20
++ .uleb128 0x58
++ .long 0x8b80
++ .long .LBB375
++ .long .LBE375
++ .byte 0x1
++ .value 0x475
++ .uleb128 0x53
++ .long 0x8b91
++ .uleb128 0x4a
++ .long .LBB376
++ .long .LBE376
++ .uleb128 0x61
++ .long 0x8b9b
++ .long .LLST60
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8bb2
++ .long .LBB377
++ .long .LBE377
++ .byte 0x1
++ .value 0x48c
++ .uleb128 0x53
++ .long 0x8bcd
++ .uleb128 0x53
++ .long 0x8bc3
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x7fd
++ .uleb128 0x68
++ .long 0x9828
++ .byte 0x1
++ .long .LASF1597
++ .byte 0x1
++ .value 0x4be
++ .byte 0x1
++ .long 0x5a
++ .long .LFB921
++ .long .LFE921
++ .long .LLST61
++ .uleb128 0x4e
++ .long .LASF82
++ .byte 0x1
++ .value 0x4bd
++ .long 0x97e4
++ .byte 0x2
++ .byte 0x91
++ .sleb128 0
++ .uleb128 0x6a
++ .string "val"
++ .byte 0x1
++ .value 0x4bf
++ .long 0x7fd
++ .byte 0x3
++ .byte 0x91
++ .sleb128 -72
++ .byte 0x0
++ .uleb128 0x6b
++ .long 0x9851
++ .long .LASF1598
++ .byte 0x1
++ .value 0x401
++ .byte 0x1
++ .long .LFB915
++ .long .LFE915
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4d
++ .long .LASF1599
++ .byte 0x1
++ .value 0x400
++ .long 0x2f
++ .long .LLST63
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0x987e
++ .byte 0x1
++ .long .LASF1600
++ .byte 0x1
++ .value 0x397
++ .byte 0x1
++ .long 0x2f
++ .long .LFB908
++ .long .LFE908
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4e
++ .long .LASF1601
++ .byte 0x1
++ .value 0x396
++ .long 0x77
++ .byte 0x2
++ .byte 0x91
++ .sleb128 0
++ .byte 0x0
++ .uleb128 0x42
++ .long 0x98a2
++ .long .LASF1602
++ .byte 0x1
++ .value 0x37f
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x3d
++ .long .LASF1554
++ .byte 0x1
++ .value 0x37e
++ .long 0x2f
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x8bf2
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6d
++ .long .LASF1603
++ .byte 0x1
++ .value 0x33f
++ .byte 0x1
++ .long 0x2f
++ .byte 0x1
++ .uleb128 0x57
++ .long 0x9926
++ .byte 0x1
++ .long .LASF1604
++ .byte 0x1
++ .value 0x38b
++ .byte 0x1
++ .long .LFB907
++ .long .LFE907
++ .long .LLST65
++ .uleb128 0x4d
++ .long .LASF1554
++ .byte 0x1
++ .value 0x38a
++ .long 0x2f
++ .long .LLST66
++ .uleb128 0x58
++ .long 0x987e
++ .long .LBB385
++ .long .LBE385
++ .byte 0x1
++ .value 0x38d
++ .uleb128 0x53
++ .long 0x988c
++ .uleb128 0x58
++ .long 0x8bd8
++ .long .LBB387
++ .long .LBE387
++ .byte 0x1
++ .value 0x381
++ .uleb128 0x53
++ .long 0x8be6
++ .uleb128 0x4a
++ .long .LBB388
++ .long .LBE388
++ .uleb128 0x61
++ .long 0x8bf2
++ .long .LLST67
++ .uleb128 0x6e
++ .long 0x98a2
++ .long .Ldebug_ranges0+0x188
++ .byte 0x1
++ .value 0x35a
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6f
++ .byte 0x1
++ .long .LASF1721
++ .byte 0x1
++ .value 0x375
++ .byte 0x1
++ .long .LFB905
++ .long .LFE905
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x59
++ .long 0x99b3
++ .long .LASF1605
++ .byte 0x1
++ .value 0x16b
++ .byte 0x1
++ .long 0x887d
++ .long .LFB892
++ .long .LFE892
++ .long .LLST69
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x168
++ .long 0x8791
++ .long .LLST70
++ .uleb128 0x4d
++ .long .LASF53
++ .byte 0x1
++ .value 0x169
++ .long 0xdd4
++ .long .LLST71
++ .uleb128 0x4f
++ .long .LASF735
++ .byte 0x1
++ .value 0x16c
++ .long 0x887d
++ .long .LLST72
++ .uleb128 0x4a
++ .long .LBB392
++ .long .LBE392
++ .uleb128 0x4f
++ .long .LASF1606
++ .byte 0x1
++ .value 0x16f
++ .long 0x887d
++ .long .LLST73
++ .uleb128 0x70
++ .long 0x8c11
++ .long .LBB393
++ .long .LBE393
++ .byte 0x1
++ .value 0x178
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x68
++ .long 0x9a6a
++ .byte 0x1
++ .long .LASF1607
++ .byte 0x1
++ .value 0x20f
++ .byte 0x1
++ .long 0x21
++ .long .LFB897
++ .long .LFE897
++ .long .LLST74
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x20e
++ .long 0x8791
++ .long .LLST75
++ .uleb128 0x4f
++ .long .LASF735
++ .byte 0x1
++ .value 0x210
++ .long 0x887d
++ .long .LLST76
++ .uleb128 0x45
++ .long .LASF53
++ .byte 0x1
++ .value 0x211
++ .long 0x2f
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -16
++ .uleb128 0x50
++ .string "ret"
++ .byte 0x1
++ .value 0x212
++ .long 0x21
++ .long .LLST77
++ .uleb128 0x69
++ .string "out"
++ .byte 0x1
++ .value 0x21e
++ .long .L133
++ .uleb128 0x58
++ .long 0x8ae7
++ .long .LBB395
++ .long .LBE395
++ .byte 0x1
++ .value 0x21b
++ .uleb128 0x53
++ .long 0x8b01
++ .uleb128 0x53
++ .long 0x8af5
++ .uleb128 0x4a
++ .long .LBB396
++ .long .LBE396
++ .uleb128 0x3a
++ .long 0x8b0d
++ .uleb128 0x58
++ .long 0x88a8
++ .long .LBB397
++ .long .LBE397
++ .byte 0x1
++ .value 0x156
++ .uleb128 0x5d
++ .long 0x88c0
++ .long .LLST78
++ .uleb128 0x5d
++ .long 0x88b5
++ .long .LLST79
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x68
++ .long 0x9ac3
++ .byte 0x1
++ .long .LASF1608
++ .byte 0x1
++ .value 0x238
++ .byte 0x1
++ .long 0x21
++ .long .LFB898
++ .long .LFE898
++ .long .LLST80
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x237
++ .long 0x8791
++ .long .LLST81
++ .uleb128 0x4a
++ .long .LBB399
++ .long .LBE399
++ .uleb128 0x50
++ .string "ret"
++ .byte 0x1
++ .value 0x23a
++ .long 0x21
++ .long .LLST82
++ .uleb128 0x70
++ .long 0x8c11
++ .long .LBB400
++ .long .LBE400
++ .byte 0x1
++ .value 0x23d
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x68
++ .long 0x9c15
++ .byte 0x1
++ .long .LASF1609
++ .byte 0x1
++ .value 0x17d
++ .byte 0x1
++ .long 0x21
++ .long .LFB893
++ .long .LFE893
++ .long .LLST83
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x17c
++ .long 0x8791
++ .long .LLST84
++ .uleb128 0x4d
++ .long .LASF732
++ .byte 0x1
++ .value 0x17c
++ .long 0x2f
++ .long .LLST85
++ .uleb128 0x4f
++ .long .LASF735
++ .byte 0x1
++ .value 0x17e
++ .long 0x887d
++ .long .LLST86
++ .uleb128 0x4f
++ .long .LASF1560
++ .byte 0x1
++ .value 0x17e
++ .long 0x887d
++ .long .LLST87
++ .uleb128 0x45
++ .long .LASF53
++ .byte 0x1
++ .value 0x17f
++ .long 0x2f
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -24
++ .uleb128 0x6a
++ .string "ret"
++ .byte 0x1
++ .value 0x180
++ .long 0x21
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -28
++ .uleb128 0x62
++ .long 0x9b90
++ .long 0x8ae7
++ .long .LBB402
++ .long .LBE402
++ .byte 0x1
++ .value 0x188
++ .uleb128 0x53
++ .long 0x8b01
++ .uleb128 0x53
++ .long 0x8af5
++ .uleb128 0x4a
++ .long .LBB403
++ .long .LBE403
++ .uleb128 0x3a
++ .long 0x8b0d
++ .uleb128 0x58
++ .long 0x88a8
++ .long .LBB404
++ .long .LBE404
++ .byte 0x1
++ .value 0x156
++ .uleb128 0x5d
++ .long 0x88c0
++ .long .LLST88
++ .uleb128 0x5d
++ .long 0x88b5
++ .long .LLST89
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x51
++ .long 0x9bc4
++ .long .LBB406
++ .long .LBE406
++ .uleb128 0x43
++ .long .LASF1571
++ .byte 0x1
++ .value 0x18c
++ .long 0x2f
++ .uleb128 0x4a
++ .long .LBB407
++ .long .LBE407
++ .uleb128 0x4f
++ .long .LASF1562
++ .byte 0x1
++ .value 0x18c
++ .long 0x2f
++ .long .LLST90
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x9be3
++ .long 0x8c5c
++ .long .LBB408
++ .long .LBE408
++ .byte 0x1
++ .value 0x198
++ .uleb128 0x53
++ .long 0x8c74
++ .uleb128 0x53
++ .long 0x8c69
++ .byte 0x0
++ .uleb128 0x62
++ .long 0x9bfd
++ .long 0x8b1b
++ .long .LBB410
++ .long .LBE410
++ .byte 0x1
++ .value 0x199
++ .uleb128 0x53
++ .long 0x8b28
++ .byte 0x0
++ .uleb128 0x52
++ .long 0x8c5c
++ .long .Ldebug_ranges0+0x1a0
++ .byte 0x1
++ .value 0x19c
++ .uleb128 0x53
++ .long 0x8c74
++ .uleb128 0x53
++ .long 0x8c69
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x68
++ .long 0x9cdb
++ .byte 0x1
++ .long .LASF1610
++ .byte 0x1
++ .value 0x420
++ .byte 0x1
++ .long 0x5a
++ .long .LFB916
++ .long .LFE916
++ .long .LLST91
++ .uleb128 0x4d
++ .long .LASF1611
++ .byte 0x1
++ .value 0x41f
++ .long 0x5a
++ .long .LLST92
++ .uleb128 0x45
++ .long .LASF760
++ .byte 0x1
++ .value 0x421
++ .long 0x3728
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -36
++ .uleb128 0x4f
++ .long .LASF1612
++ .byte 0x1
++ .value 0x422
++ .long 0x2f
++ .long .LLST93
++ .uleb128 0x69
++ .string "out"
++ .byte 0x1
++ .value 0x44a
++ .long .L158
++ .uleb128 0x62
++ .long 0x9c96
++ .long 0x8c81
++ .long .LBB416
++ .long .LBE416
++ .byte 0x1
++ .value 0x43c
++ .uleb128 0x4a
++ .long .LBB418
++ .long .LBE418
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST94
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5b
++ .long 0x9cb6
++ .long 0x8ca0
++ .long .Ldebug_ranges0+0x1b8
++ .byte 0x1
++ .value 0x443
++ .uleb128 0x53
++ .long 0x8cc3
++ .uleb128 0x53
++ .long 0x8cb8
++ .uleb128 0x53
++ .long 0x8cad
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB421
++ .long .LBE421
++ .byte 0x1
++ .value 0x443
++ .uleb128 0x4a
++ .long .LBB423
++ .long .LBE423
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST95
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0x9d2d
++ .byte 0x1
++ .long .LASF1613
++ .byte 0x1
++ .value 0x45b
++ .byte 0x1
++ .long 0x5a
++ .long .LFB918
++ .long .LFE918
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4d
++ .long .LASF1611
++ .byte 0x1
++ .value 0x45a
++ .long 0x5a
++ .long .LLST97
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB426
++ .long .LBE426
++ .byte 0x1
++ .value 0x45c
++ .uleb128 0x4a
++ .long .LBB428
++ .long .LBE428
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST98
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x56
++ .long 0x9d67
++ .byte 0x1
++ .long .LASF1614
++ .byte 0x1
++ .value 0x61e
++ .byte 0x1
++ .long .LFB925
++ .long .LFE925
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4d
++ .long .LASF1615
++ .byte 0x1
++ .value 0x61d
++ .long 0x77
++ .long .LLST100
++ .uleb128 0x4f
++ .long .LASF1611
++ .byte 0x1
++ .value 0x61f
++ .long 0x2f
++ .long .LLST101
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0x9db9
++ .byte 0x1
++ .long .LASF1616
++ .byte 0x1
++ .value 0x454
++ .byte 0x1
++ .long 0x5a
++ .long .LFB917
++ .long .LFE917
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4d
++ .long .LASF1611
++ .byte 0x1
++ .value 0x453
++ .long 0x5a
++ .long .LLST103
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB429
++ .long .LBE429
++ .byte 0x1
++ .value 0x455
++ .uleb128 0x4a
++ .long .LBB431
++ .long .LBE431
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST104
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3b
++ .long 0x9de4
++ .long .LASF1617
++ .byte 0xb
++ .value 0x62f
++ .byte 0x1
++ .long 0x21
++ .byte 0x3
++ .uleb128 0x3c
++ .string "p"
++ .byte 0xb
++ .value 0x62e
++ .long 0x15f9
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x37
++ .uleb128 0x39
++ .uleb128 0x39
++ .uleb128 0x3a
++ .long 0x896c
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0x9eb0
++ .byte 0x1
++ .long .LASF1618
++ .byte 0x1
++ .value 0x62c
++ .byte 0x1
++ .long 0x2f
++ .long .LFB926
++ .long .LFE926
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4d
++ .long .LASF1615
++ .byte 0x1
++ .value 0x62b
++ .long 0x77
++ .long .LLST106
++ .uleb128 0x4f
++ .long .LASF1611
++ .byte 0x1
++ .value 0x62d
++ .long 0x2f
++ .long .LLST107
++ .uleb128 0x62
++ .long 0x9e45
++ .long 0x8c81
++ .long .LBB445
++ .long .LBE445
++ .byte 0x1
++ .value 0x62f
++ .uleb128 0x4a
++ .long .LBB447
++ .long .LBE447
++ .uleb128 0x3a
++ .long 0x8c93
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x9db9
++ .long .LBB448
++ .long .LBE448
++ .byte 0x1
++ .value 0x62f
++ .uleb128 0x53
++ .long 0x9dcb
++ .uleb128 0x58
++ .long 0x89a9
++ .long .LBB450
++ .long .LBE450
++ .byte 0xb
++ .value 0x630
++ .uleb128 0x53
++ .long 0x89c7
++ .uleb128 0x5d
++ .long 0x89bb
++ .long .LLST108
++ .uleb128 0x58
++ .long 0x8978
++ .long .LBB452
++ .long .LBE452
++ .byte 0xb
++ .value 0x621
++ .uleb128 0x53
++ .long 0x8993
++ .uleb128 0x53
++ .long 0x8989
++ .uleb128 0x63
++ .long 0x890f
++ .long .LBB454
++ .long .LBE454
++ .byte 0xf
++ .byte 0x41
++ .uleb128 0x53
++ .long 0x892a
++ .uleb128 0x53
++ .long 0x8920
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x57
++ .long 0x9f33
++ .byte 0x1
++ .long .LASF1619
++ .byte 0x1
++ .value 0x32b
++ .byte 0x1
++ .long .LFB901
++ .long .LFE901
++ .long .LLST109
++ .uleb128 0x4d
++ .long .LASF1620
++ .byte 0x1
++ .value 0x32a
++ .long 0x21
++ .long .LLST110
++ .uleb128 0x50
++ .string "p"
++ .byte 0x1
++ .value 0x32c
++ .long 0x15f9
++ .long .LLST111
++ .uleb128 0x41
++ .string "cpu"
++ .byte 0x1
++ .value 0x32d
++ .long 0x21
++ .uleb128 0x51
++ .long 0x9f12
++ .long .LBB460
++ .long .LBE460
++ .uleb128 0x4f
++ .long .LASF1562
++ .byte 0x1
++ .value 0x32d
++ .long 0x21
++ .long .LLST112
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB461
++ .long .LBE461
++ .byte 0x1
++ .value 0x32c
++ .uleb128 0x4a
++ .long .LBB463
++ .long .LBE463
++ .uleb128 0x3a
++ .long 0x8c93
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x68
++ .long 0x9fd6
++ .byte 0x1
++ .long .LASF1621
++ .byte 0x1
++ .value 0x3bd
++ .byte 0x1
++ .long 0x5a
++ .long .LFB909
++ .long .LFE909
++ .long .LLST113
++ .uleb128 0x51
++ .long 0x9fb1
++ .long .LBB474
++ .long .LBE474
++ .uleb128 0x45
++ .long .LASF367
++ .byte 0x1
++ .value 0x3c0
++ .long 0x86d0
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -48
++ .uleb128 0x45
++ .long .LASF1622
++ .byte 0x1
++ .value 0x3c1
++ .long 0x87a2
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -24
++ .uleb128 0x50
++ .string "eip"
++ .byte 0x1
++ .value 0x3c2
++ .long 0x2f
++ .long .LLST114
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB475
++ .long .LBE475
++ .byte 0x1
++ .value 0x3c5
++ .uleb128 0x4a
++ .long .LBB477
++ .long .LBE477
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST115
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB478
++ .long .LBE478
++ .byte 0x1
++ .value 0x3d0
++ .uleb128 0x4a
++ .long .LBB480
++ .long .LBE480
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST116
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0xa03a
++ .byte 0x1
++ .long .LASF1623
++ .byte 0x1
++ .value 0x3da
++ .byte 0x1
++ .long 0x5a
++ .long .LFB910
++ .long .LFE910
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x41
++ .string "pid"
++ .byte 0x1
++ .value 0x3db
++ .long 0x21
++ .uleb128 0x4a
++ .long .LBB485
++ .long .LBE485
++ .uleb128 0x43
++ .long .LASF1624
++ .byte 0x1
++ .value 0x3de
++ .long 0x15f9
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB486
++ .long .LBE486
++ .byte 0x1
++ .value 0x3de
++ .uleb128 0x4a
++ .long .LBB488
++ .long .LBE488
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST118
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0xa07c
++ .byte 0x1
++ .long .LASF1625
++ .byte 0x1
++ .value 0x3e7
++ .byte 0x1
++ .long 0x5a
++ .long .LFB911
++ .long .LFE911
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB492
++ .long .LBE492
++ .byte 0x1
++ .value 0x3e9
++ .uleb128 0x4a
++ .long .LBB494
++ .long .LBE494
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST120
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0xa0be
++ .byte 0x1
++ .long .LASF1626
++ .byte 0x1
++ .value 0x3ed
++ .byte 0x1
++ .long 0x5a
++ .long .LFB912
++ .long .LFE912
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB498
++ .long .LBE498
++ .byte 0x1
++ .value 0x3ef
++ .uleb128 0x4a
++ .long .LBB500
++ .long .LBE500
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST122
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0xa100
++ .byte 0x1
++ .long .LASF1627
++ .byte 0x1
++ .value 0x3f3
++ .byte 0x1
++ .long 0x5a
++ .long .LFB913
++ .long .LFE913
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB504
++ .long .LBE504
++ .byte 0x1
++ .value 0x3f5
++ .uleb128 0x4a
++ .long .LBB506
++ .long .LBE506
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST124
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0xa142
++ .byte 0x1
++ .long .LASF1628
++ .byte 0x1
++ .value 0x3f9
++ .byte 0x1
++ .long 0x5a
++ .long .LFB914
++ .long .LFE914
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB510
++ .long .LBE510
++ .byte 0x1
++ .value 0x3fb
++ .uleb128 0x4a
++ .long .LBB512
++ .long .LBE512
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST126
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0xa184
++ .byte 0x1
++ .long .LASF1629
++ .byte 0x1
++ .value 0x463
++ .byte 0x1
++ .long 0x5a
++ .long .LFB919
++ .long .LFE919
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x58
++ .long 0x8c81
++ .long .LBB516
++ .long .LBE516
++ .byte 0x1
++ .value 0x464
++ .uleb128 0x4a
++ .long .LBB518
++ .long .LBE518
++ .uleb128 0x61
++ .long 0x8c93
++ .long .LLST128
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .long 0xa1c2
++ .byte 0x1
++ .long .LASF1630
++ .byte 0x1
++ .value 0x1d3
++ .byte 0x1
++ .long 0x21
++ .long .LFB895
++ .long .LFE895
++ .byte 0x2
++ .byte 0x74
++ .sleb128 4
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x1d2
++ .long 0x8791
++ .long .LLST130
++ .uleb128 0x4d
++ .long .LASF732
++ .byte 0x1
++ .value 0x1d2
++ .long 0x2f
++ .long .LLST131
++ .byte 0x0
++ .uleb128 0x33
++ .long 0xa1db
++ .long .LASF1631
++ .byte 0xa
++ .byte 0x83
++ .byte 0x1
++ .byte 0x3
++ .uleb128 0x35
++ .long .LASF760
++ .byte 0xa
++ .byte 0x82
++ .long 0x8791
++ .byte 0x0
++ .uleb128 0x68
++ .long 0xa286
++ .byte 0x1
++ .long .LASF1632
++ .byte 0x1
++ .value 0x1f0
++ .byte 0x1
++ .long 0x21
++ .long .LFB896
++ .long .LFE896
++ .long .LLST132
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x1ef
++ .long 0x8791
++ .long .LLST133
++ .uleb128 0x4f
++ .long .LASF735
++ .byte 0x1
++ .value 0x1f1
++ .long 0x887d
++ .long .LLST134
++ .uleb128 0x45
++ .long .LASF53
++ .byte 0x1
++ .value 0x1f2
++ .long 0x2f
++ .byte 0x2
++ .byte 0x91
++ .sleb128 -16
++ .uleb128 0x50
++ .string "ret"
++ .byte 0x1
++ .value 0x1f3
++ .long 0x21
++ .long .LLST135
++ .uleb128 0x58
++ .long 0x8ae7
++ .long .LBB533
++ .long .LBE533
++ .byte 0x1
++ .value 0x1f9
++ .uleb128 0x53
++ .long 0x8b01
++ .uleb128 0x53
++ .long 0x8af5
++ .uleb128 0x4a
++ .long .LBB534
++ .long .LBE534
++ .uleb128 0x3a
++ .long 0x8b0d
++ .uleb128 0x58
++ .long 0x88a8
++ .long .LBB535
++ .long .LBE535
++ .byte 0x1
++ .value 0x156
++ .uleb128 0x5d
++ .long 0x88c0
++ .long .LLST136
++ .uleb128 0x5d
++ .long 0x88b5
++ .long .LLST137
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x57
++ .long 0xa316
++ .byte 0x1
++ .long .LASF1633
++ .byte 0x1
++ .value 0x1b1
++ .byte 0x1
++ .long .LFB894
++ .long .LFE894
++ .long .LLST138
++ .uleb128 0x4d
++ .long .LASF760
++ .byte 0x1
++ .value 0x1b0
++ .long 0x8791
++ .long .LLST139
++ .uleb128 0x5a
++ .string "cpu"
++ .byte 0x1
++ .value 0x1b0
++ .long 0x21
++ .long .LLST140
++ .uleb128 0x4f
++ .long .LASF735
++ .byte 0x1
++ .value 0x1b2
++ .long 0x887d
++ .long .LLST141
++ .uleb128 0x4f
++ .long .LASF53
++ .byte 0x1
++ .value 0x1b3
++ .long 0x2f
++ .long .LLST142
++ .uleb128 0x51
++ .long 0xa2fa
++ .long .LBB546
++ .long .LBE546
++ .uleb128 0x43
++ .long .LASF1571
++ .byte 0x1
++ .value 0x1b2
++ .long 0x2f
++ .byte 0x0
++ .uleb128 0x58
++ .long 0x8c5c
++ .long .LBB547
++ .long .LBE547
++ .byte 0x1
++ .value 0x1b8
++ .uleb128 0x53
++ .long 0x8c74
++ .uleb128 0x53
++ .long 0x8c69
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x14
++ .long 0x967
++ .uleb128 0x12
++ .long 0xa32b
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xa
++ .byte 0x0
++ .uleb128 0x71
++ .long .LASF1634
++ .byte 0x1
++ .byte 0x31
++ .long 0xa33c
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_jiffies_64
++ .uleb128 0x14
++ .long 0xa31b
++ .uleb128 0x71
++ .long .LASF1635
++ .byte 0x1
++ .byte 0x31
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_jiffies_64
++ .uleb128 0x12
++ .long 0xa362
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xf
++ .byte 0x0
++ .uleb128 0x71
++ .long .LASF1636
++ .byte 0x1
++ .byte 0x53
++ .long 0xa373
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_boot_tvec_bases
++ .uleb128 0x14
++ .long 0xa352
++ .uleb128 0x71
++ .long .LASF1637
++ .byte 0x1
++ .byte 0x53
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_boot_tvec_bases
++ .uleb128 0x71
++ .long .LASF1638
++ .byte 0x1
++ .byte 0x54
++ .long 0x887d
++ .byte 0x5
++ .byte 0x3
++ .long per_cpu__tvec_bases
++ .uleb128 0x12
++ .long 0xa3aa
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xf
++ .byte 0x0
++ .uleb128 0x71
++ .long .LASF1639
++ .byte 0x1
++ .byte 0xac
++ .long 0xa3bb
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab___round_jiffies
++ .uleb128 0x14
++ .long 0xa39a
++ .uleb128 0x71
++ .long .LASF1640
++ .byte 0x1
++ .byte 0xac
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab___round_jiffies
++ .uleb128 0x12
++ .long 0xa3e1
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x18
++ .byte 0x0
++ .uleb128 0x71
++ .long .LASF1641
++ .byte 0x1
++ .byte 0xcc
++ .long 0xa3f2
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab___round_jiffies_relative
++ .uleb128 0x14
++ .long 0xa3d1
++ .uleb128 0x71
++ .long .LASF1642
++ .byte 0x1
++ .byte 0xcc
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab___round_jiffies_relative
++ .uleb128 0x12
++ .long 0xa418
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xd
++ .byte 0x0
++ .uleb128 0x71
++ .long .LASF1643
++ .byte 0x1
++ .byte 0xe1
++ .long 0xa429
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_round_jiffies
++ .uleb128 0x14
++ .long 0xa408
++ .uleb128 0x71
++ .long .LASF1644
++ .byte 0x1
++ .byte 0xe1
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_round_jiffies
++ .uleb128 0x12
++ .long 0xa44f
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x16
++ .byte 0x0
++ .uleb128 0x71
++ .long .LASF1645
++ .byte 0x1
++ .byte 0xf6
++ .long 0xa460
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_round_jiffies_relative
++ .uleb128 0x14
++ .long 0xa43f
++ .uleb128 0x71
++ .long .LASF1646
++ .byte 0x1
++ .byte 0xf6
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_round_jiffies_relative
++ .uleb128 0x12
++ .long 0xa486
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xa
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1647
++ .byte 0x1
++ .value 0x148
++ .long 0xa498
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_init_timer
++ .uleb128 0x14
++ .long 0xa476
++ .uleb128 0x45
++ .long .LASF1648
++ .byte 0x1
++ .value 0x148
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_init_timer
++ .uleb128 0x12
++ .long 0xa4bf
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x15
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1649
++ .byte 0x1
++ .value 0x14f
++ .long 0xa4d1
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_init_timer_deferrable
++ .uleb128 0x14
++ .long 0xa4af
++ .uleb128 0x45
++ .long .LASF1650
++ .byte 0x1
++ .value 0x14f
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_init_timer_deferrable
++ .uleb128 0x12
++ .long 0xa4f8
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xb
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1651
++ .byte 0x1
++ .value 0x1a7
++ .long 0xa50a
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab___mod_timer
++ .uleb128 0x14
++ .long 0xa4e8
++ .uleb128 0x45
++ .long .LASF1652
++ .byte 0x1
++ .value 0x1a7
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab___mod_timer
++ .uleb128 0x12
++ .long 0xa531
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x9
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1653
++ .byte 0x1
++ .value 0x1e2
++ .long 0xa543
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_mod_timer
++ .uleb128 0x14
++ .long 0xa521
++ .uleb128 0x45
++ .long .LASF1654
++ .byte 0x1
++ .value 0x1e2
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_mod_timer
++ .uleb128 0x12
++ .long 0xa56a
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x9
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1655
++ .byte 0x1
++ .value 0x202
++ .long 0xa57c
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_del_timer
++ .uleb128 0x14
++ .long 0xa55a
++ .uleb128 0x45
++ .long .LASF1656
++ .byte 0x1
++ .value 0x202
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_del_timer
++ .uleb128 0x12
++ .long 0xa5a3
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x15
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1657
++ .byte 0x1
++ .value 0x224
++ .long 0xa5b5
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_try_to_del_timer_sync
++ .uleb128 0x14
++ .long 0xa593
++ .uleb128 0x45
++ .long .LASF1658
++ .byte 0x1
++ .value 0x224
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_try_to_del_timer_sync
++ .uleb128 0x12
++ .long 0xa5dc
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xe
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1659
++ .byte 0x1
++ .value 0x241
++ .long 0xa5ee
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_del_timer_sync
++ .uleb128 0x14
++ .long 0xa5cc
++ .uleb128 0x45
++ .long .LASF1660
++ .byte 0x1
++ .value 0x241
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_del_timer_sync
++ .uleb128 0x12
++ .long 0xa615
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x7
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1661
++ .byte 0x1
++ .value 0x34d
++ .long 0xa627
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_avenrun
++ .uleb128 0x14
++ .long 0xa605
++ .uleb128 0x45
++ .long .LASF1662
++ .byte 0x1
++ .value 0x34d
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_avenrun
++ .uleb128 0x12
++ .long 0xa64e
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x10
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1663
++ .byte 0x1
++ .value 0x44d
++ .long 0xa660
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_schedule_timeout
++ .uleb128 0x14
++ .long 0xa63e
++ .uleb128 0x45
++ .long .LASF1664
++ .byte 0x1
++ .value 0x44d
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_schedule_timeout
++ .uleb128 0x12
++ .long 0xa687
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1e
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1665
++ .byte 0x1
++ .value 0x458
++ .long 0xa699
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_schedule_timeout_interruptible
++ .uleb128 0x14
++ .long 0xa677
++ .uleb128 0x45
++ .long .LASF1666
++ .byte 0x1
++ .value 0x458
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_schedule_timeout_interruptible
++ .uleb128 0x12
++ .long 0xa6c0
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x20
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1667
++ .byte 0x1
++ .value 0x45f
++ .long 0xa6d2
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_schedule_timeout_uninterruptible
++ .uleb128 0x14
++ .long 0xa6b0
++ .uleb128 0x45
++ .long .LASF1668
++ .byte 0x1
++ .value 0x45f
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_schedule_timeout_uninterruptible
++ .uleb128 0x12
++ .long 0xa6f9
++ .long 0x161c
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x1f
++ .byte 0x0
++ .uleb128 0x43
++ .long .LASF1669
++ .byte 0x1
++ .value 0x4ce
++ .long 0xa6e9
++ .uleb128 0x45
++ .long .LASF1670
++ .byte 0x1
++ .value 0x54c
++ .long 0x2ddf
++ .byte 0x5
++ .byte 0x3
++ .long timers_nb
++ .uleb128 0x12
++ .long 0xa727
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x6
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1671
++ .byte 0x1
++ .value 0x625
++ .long 0xa739
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_msleep
++ .uleb128 0x14
++ .long 0xa717
++ .uleb128 0x45
++ .long .LASF1672
++ .byte 0x1
++ .value 0x625
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_msleep
++ .uleb128 0x12
++ .long 0xa760
++ .long 0xbb
++ .uleb128 0x13
++ .long 0x28
++ .byte 0x14
++ .byte 0x0
++ .uleb128 0x45
++ .long .LASF1673
++ .byte 0x1
++ .value 0x634
++ .long 0xa772
++ .byte 0x5
++ .byte 0x3
++ .long __kstrtab_msleep_interruptible
++ .uleb128 0x14
++ .long 0xa750
++ .uleb128 0x45
++ .long .LASF1674
++ .byte 0x1
++ .value 0x634
++ .long 0x4fe5
++ .byte 0x5
++ .byte 0x3
++ .long __ksymtab_msleep_interruptible
++ .uleb128 0x12
++ .long 0xa794
++ .long 0x21
++ .uleb128 0x72
++ .byte 0x0
++ .uleb128 0x73
++ .long .LASF1675
++ .byte 0x38
++ .byte 0x3f
++ .long 0xa789
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x12
++ .long 0xa7ac
++ .long 0x2f
++ .uleb128 0x72
++ .byte 0x0
++ .uleb128 0x73
++ .long .LASF1676
++ .byte 0x58
++ .byte 0x30
++ .long 0xa7a1
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1677
++ .byte 0x58
++ .byte 0x3a
++ .long 0x2f
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1678
++ .byte 0x59
++ .byte 0x77
++ .long 0x923
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1679
++ .byte 0x8
++ .byte 0x97
++ .long 0x2f
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x74
++ .long .LASF1680
++ .byte 0x10
++ .byte 0x58
++ .long 0x2f
++ .byte 0x1
++ .byte 0x1
++ .byte 0x54
++ .uleb128 0x73
++ .long .LASF1681
++ .byte 0x9
++ .byte 0x9
++ .long 0x15f9
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1682
++ .byte 0x1f
++ .byte 0x5b
++ .long 0x173b
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1683
++ .byte 0x1f
++ .byte 0x5c
++ .long 0x173b
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1684
++ .byte 0x1f
++ .byte 0x5d
++ .long 0x170a
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1685
++ .byte 0x5a
++ .byte 0xc9
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x74
++ .long .LASF1686
++ .byte 0x1
++ .byte 0x2f
++ .long 0x189
++ .byte 0x1
++ .byte 0x5
++ .byte 0x3
++ .long jiffies_64
++ .uleb128 0x73
++ .long .LASF1687
++ .byte 0x5b
++ .byte 0x52
++ .long 0x8941
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x11
++ .long 0xa863
++ .byte 0x1
++ .long 0x21
++ .uleb128 0x6
++ .long 0x1e7d
++ .uleb128 0x6
++ .long 0x21
++ .byte 0x0
++ .uleb128 0x75
++ .long .LASF1688
++ .byte 0x5c
++ .value 0x132
++ .long 0xa871
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0xa84e
++ .uleb128 0x73
++ .long .LASF1689
++ .byte 0x60
++ .byte 0x16
++ .long 0x2f
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1690
++ .byte 0x61
++ .byte 0x5d
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1691
++ .byte 0x61
++ .byte 0x5f
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1692
++ .byte 0x61
++ .byte 0x60
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1693
++ .byte 0x61
++ .byte 0x61
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1694
++ .byte 0x62
++ .byte 0x7b
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF405
++ .byte 0x59
++ .byte 0x41
++ .long 0x1e83
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1695
++ .byte 0x59
++ .byte 0x72
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1696
++ .byte 0x59
++ .byte 0x75
++ .long 0x923
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF413
++ .byte 0x63
++ .byte 0x7d
++ .long 0xa8f9
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x4
++ .byte 0x4
++ .long 0x1f51
++ .uleb128 0x75
++ .long .LASF1697
++ .byte 0x18
++ .value 0x19e
++ .long 0x2d82
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x75
++ .long .LASF1698
++ .byte 0x18
++ .value 0x241
++ .long 0x2bf1
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x12
++ .long 0xa926
++ .long 0x2e6b
++ .uleb128 0x72
++ .byte 0x0
++ .uleb128 0x73
++ .long .LASF1699
++ .byte 0x3
++ .byte 0x1a
++ .long 0xa91b
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1700
++ .byte 0x25
++ .byte 0x71
++ .long 0x2ee9
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1701
++ .byte 0x25
++ .byte 0x72
++ .long 0x2ee9
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x74
++ .long .LASF1702
++ .byte 0x1
++ .byte 0x53
++ .long 0x378d
++ .byte 0x1
++ .byte 0x5
++ .byte 0x3
++ .long boot_tvec_bases
++ .uleb128 0x76
++ .long .LASF1703
++ .byte 0x1
++ .value 0x34d
++ .long 0x8dc
++ .byte 0x1
++ .byte 0x5
++ .byte 0x3
++ .long avenrun
++ .uleb128 0x73
++ .long .LASF1704
++ .byte 0xb
++ .byte 0x7c
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x75
++ .long .LASF1705
++ .byte 0xb
++ .value 0x47d
++ .long 0x3070
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1706
++ .byte 0x66
++ .byte 0x21
++ .long 0x45d4
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1707
++ .byte 0x67
++ .byte 0x19
++ .long 0x46e1
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x12
++ .long 0xa9b2
++ .long 0x942
++ .uleb128 0x72
++ .byte 0x0
++ .uleb128 0x73
++ .long .LASF1708
++ .byte 0x67
++ .byte 0x21
++ .long 0xa9a7
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1709
++ .byte 0x68
++ .byte 0xc
++ .long 0x674
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x12
++ .long 0xa9dc
++ .long 0x507e
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xdf
++ .byte 0x0
++ .uleb128 0x73
++ .long .LASF1016
++ .byte 0x6a
++ .byte 0xb2
++ .long 0xa9cc
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1710
++ .byte 0x6c
++ .byte 0xd
++ .long 0x21
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1711
++ .byte 0x6d
++ .byte 0x62
++ .long 0x534f
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1712
++ .byte 0x24
++ .byte 0xb4
++ .long 0x1680
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x12
++ .long 0xaa20
++ .long 0x36e
++ .uleb128 0x13
++ .long 0x28
++ .byte 0xf
++ .byte 0x0
++ .uleb128 0x73
++ .long .LASF1713
++ .byte 0x15
++ .byte 0xc1
++ .long 0xaa10
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1714
++ .byte 0x6e
++ .byte 0x3a
++ .long 0x86a5
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF563
++ .byte 0x6e
++ .byte 0x7a
++ .long 0x2bdb
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x73
++ .long .LASF1715
++ .byte 0x51
++ .byte 0xe2
++ .long 0x6e9c
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x75
++ .long .LASF1716
++ .byte 0x51
++ .value 0x106
++ .long 0x36ad
++ .byte 0x1
++ .byte 0x1
++ .uleb128 0x5
++ .long 0xaa73
++ .byte 0x1
++ .uleb128 0x6
++ .long 0x160b
++ .uleb128 0x6
++ .long 0x77
++ .byte 0x0
++ .uleb128 0x76
++ .long .LASF1717
++ .byte 0x1
++ .value 0x3b9
++ .long 0xaa86
++ .byte 0x1
++ .byte 0x5
++ .byte 0x3
++ .long rec_event
++ .uleb128 0x4
++ .byte 0x4
++ .long 0xaa62
++ .byte 0x0
++ .section .debug_abbrev
++ .uleb128 0x1
++ .uleb128 0x11
++ .byte 0x1
++ .uleb128 0x10
++ .uleb128 0x6
++ .uleb128 0x52
++ .uleb128 0x1
++ .uleb128 0x25
++ .uleb128 0xe
++ .uleb128 0x13
++ .uleb128 0xb
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x1b
++ .uleb128 0xe
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x2
++ .uleb128 0x24
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3e
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x24
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3e
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4
++ .uleb128 0xf
++ .byte 0x0
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5
++ .uleb128 0x15
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x27
++ .uleb128 0xc
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x7
++ .uleb128 0x16
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x8
++ .uleb128 0x16
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x9
++ .uleb128 0x13
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0xa
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x38
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0xb
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x38
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0xc
++ .uleb128 0x17
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0xd
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0xe
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0xf
++ .uleb128 0x13
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x10
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x38
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x11
++ .uleb128 0x15
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x12
++ .uleb128 0x1
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x13
++ .uleb128 0x21
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2f
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x14
++ .uleb128 0x26
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x15
++ .uleb128 0x13
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x16
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x38
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x17
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x38
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x18
++ .uleb128 0x15
++ .byte 0x0
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x19
++ .uleb128 0x15
++ .byte 0x0
++ .uleb128 0x27
++ .uleb128 0xc
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x1a
++ .uleb128 0x13
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0x5
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x1b
++ .uleb128 0x17
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0x5
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x1c
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x1d
++ .uleb128 0x13
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x1e
++ .uleb128 0x16
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x1f
++ .uleb128 0x13
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0x5
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x20
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0xd
++ .uleb128 0xb
++ .uleb128 0xc
++ .uleb128 0xb
++ .uleb128 0x38
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x21
++ .uleb128 0x13
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3c
++ .uleb128 0xc
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x22
++ .uleb128 0xf
++ .byte 0x0
++ .uleb128 0xb
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x23
++ .uleb128 0x21
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x24
++ .uleb128 0x13
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x25
++ .uleb128 0x17
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x26
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x27
++ .uleb128 0x4
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x28
++ .uleb128 0x28
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x1c
++ .uleb128 0xd
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x29
++ .uleb128 0x13
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x2a
++ .uleb128 0x13
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x2b
++ .uleb128 0x17
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x2c
++ .uleb128 0x4
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x2d
++ .uleb128 0x35
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x2e
++ .uleb128 0x13
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3c
++ .uleb128 0xc
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x2f
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0xb
++ .uleb128 0xb
++ .uleb128 0xd
++ .uleb128 0xb
++ .uleb128 0xc
++ .uleb128 0xb
++ .uleb128 0x38
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x30
++ .uleb128 0xd
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x26
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x32
++ .uleb128 0x2e
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x33
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x34
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x35
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x36
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x37
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x38
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x39
++ .uleb128 0xb
++ .byte 0x1
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3a
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3b
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3c
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3d
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3e
++ .uleb128 0xa
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x3f
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x40
++ .uleb128 0xa
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x41
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x42
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x43
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x44
++ .uleb128 0x2e
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x45
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x46
++ .uleb128 0x2e
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x47
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x48
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x49
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4a
++ .uleb128 0xb
++ .byte 0x1
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4b
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4c
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4d
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4e
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x4f
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x50
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x51
++ .uleb128 0xb
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x52
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x55
++ .uleb128 0x6
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x53
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x54
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x55
++ .uleb128 0x6
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x55
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x56
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x57
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x58
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x59
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5a
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5b
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x55
++ .uleb128 0x6
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5c
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x55
++ .uleb128 0x6
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5d
++ .uleb128 0x5
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5e
++ .uleb128 0xb
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x5f
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x34
++ .uleb128 0xc
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x60
++ .uleb128 0xb
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x55
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x61
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x62
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x63
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x64
++ .uleb128 0xb
++ .byte 0x1
++ .uleb128 0x55
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x65
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x66
++ .uleb128 0x1d
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x67
++ .uleb128 0x1d
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x68
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0x6
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x69
++ .uleb128 0xa
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x11
++ .uleb128 0x1
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6a
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0x8
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6b
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6c
++ .uleb128 0x2e
++ .byte 0x1
++ .uleb128 0x1
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6d
++ .uleb128 0x2e
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x20
++ .uleb128 0xb
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6e
++ .uleb128 0x1d
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x55
++ .uleb128 0x6
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x6f
++ .uleb128 0x2e
++ .byte 0x0
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x27
++ .uleb128 0xc
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x40
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x70
++ .uleb128 0x1d
++ .byte 0x0
++ .uleb128 0x31
++ .uleb128 0x13
++ .uleb128 0x11
++ .uleb128 0x1
++ .uleb128 0x12
++ .uleb128 0x1
++ .uleb128 0x58
++ .uleb128 0xb
++ .uleb128 0x59
++ .uleb128 0x5
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x71
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x72
++ .uleb128 0x21
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x73
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3c
++ .uleb128 0xc
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x74
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0xb
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x75
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x3c
++ .uleb128 0xc
++ .byte 0x0
++ .byte 0x0
++ .uleb128 0x76
++ .uleb128 0x34
++ .byte 0x0
++ .uleb128 0x3
++ .uleb128 0xe
++ .uleb128 0x3a
++ .uleb128 0xb
++ .uleb128 0x3b
++ .uleb128 0x5
++ .uleb128 0x49
++ .uleb128 0x13
++ .uleb128 0x3f
++ .uleb128 0xc
++ .uleb128 0x2
++ .uleb128 0xa
++ .byte 0x0
++ .byte 0x0
++ .byte 0x0
++ .section .debug_pubnames,"",@progbits
++ .long 0x2bb
++ .value 0x2
++ .long .Ldebug_info0
++ .long 0xaa8d
++ .long 0x8ccf
++ .string "__round_jiffies"
++ .long 0x8d1f
++ .string "__round_jiffies_relative"
++ .long 0x8d58
++ .string "round_jiffies"
++ .long 0x8d9b
++ .string "round_jiffies_relative"
++ .long 0x8eda
++ .string "init_timer"
++ .long 0x8f49
++ .string "init_timer_deferrable"
++ .long 0x9345
++ .string "init_timers"
++ .long 0x96cb
++ .string "do_sysinfo"
++ .long 0x97ea
++ .string "sys_sysinfo"
++ .long 0x9851
++ .string "sys_alarm"
++ .long 0x98b0
++ .string "do_timer"
++ .long 0x9926
++ .string "run_local_timers"
++ .long 0x99b3
++ .string "try_to_del_timer_sync"
++ .long 0x9a6a
++ .string "del_timer_sync"
++ .long 0x9ac3
++ .string "__mod_timer"
++ .long 0x9c15
++ .string "schedule_timeout"
++ .long 0x9cdb
++ .string "schedule_timeout_uninterruptible"
++ .long 0x9d2d
++ .string "msleep"
++ .long 0x9d67
++ .string "schedule_timeout_interruptible"
++ .long 0x9de4
++ .string "msleep_interruptible"
++ .long 0x9eb0
++ .string "update_process_times"
++ .long 0x9f33
++ .string "sys_getpid"
++ .long 0x9fd6
++ .string "sys_getppid"
++ .long 0xa03a
++ .string "sys_getuid"
++ .long 0xa07c
++ .string "sys_geteuid"
++ .long 0xa0be
++ .string "sys_getgid"
++ .long 0xa100
++ .string "sys_getegid"
++ .long 0xa142
++ .string "sys_gettid"
++ .long 0xa184
++ .string "mod_timer"
++ .long 0xa1db
++ .string "del_timer"
++ .long 0xa286
++ .string "add_timer_on"
++ .long 0xa7e0
++ .string "current_stack_pointer"
++ .long 0xa82f
++ .string "jiffies_64"
++ .long 0xa94d
++ .string "boot_tvec_bases"
++ .long 0xa95f
++ .string "avenrun"
++ .long 0xaa73
++ .string "rec_event"
++ .long 0x0
++ .section .debug_aranges,"",@progbits
++ .long 0x44
++ .value 0x2
++ .long .Ldebug_info0
++ .byte 0x4
++ .byte 0x0
++ .value 0x0
++ .value 0x0
++ .long .Ltext0
++ .long .Letext0-.Ltext0
++ .long .LFB923
++ .long .LFE923-.LFB923
++ .long .LFB924
++ .long .LFE924-.LFB924
++ .long .LFB916
++ .long .LFE916-.LFB916
++ .long .LFB918
++ .long .LFE918-.LFB918
++ .long .LFB917
++ .long .LFE917-.LFB917
++ .long 0x0
++ .long 0x0
++ .section .debug_ranges,"",@progbits
++.Ldebug_ranges0:
++ .long .LBB185
++ .long .LBE185
++ .long .LBB189
++ .long .LBE189
++ .long 0x0
++ .long 0x0
++ .long .LBB187
++ .long .LBE187
++ .long .LBB191
++ .long .LBE191
++ .long 0x0
++ .long 0x0
++ .long .LBB199
++ .long .LBE199
++ .long .LBB208
++ .long .LBE208
++ .long 0x0
++ .long 0x0
++ .long .LBB201
++ .long .LBE201
++ .long .LBB205
++ .long .LBE205
++ .long 0x0
++ .long 0x0
++ .long .LBB203
++ .long .LBE203
++ .long .LBB210
++ .long .LBE210
++ .long 0x0
++ .long 0x0
++ .long .LBB241
++ .long .LBE241
++ .long .LBB257
++ .long .LBE257
++ .long .LBB255
++ .long .LBE255
++ .long .LBB253
++ .long .LBE253
++ .long .LBB250
++ .long .LBE250
++ .long 0x0
++ .long 0x0
++ .long .LBB258
++ .long .LBE258
++ .long .LBB260
++ .long .LBE260
++ .long 0x0
++ .long 0x0
++ .long .LBB322
++ .long .LBE322
++ .long .LBB323
++ .long .LBE323
++ .long 0x0
++ .long 0x0
++ .long .LBB326
++ .long .LBE326
++ .long .LBB332
++ .long .LBE332
++ .long .LBB330
++ .long .LBE330
++ .long .LBB328
++ .long .LBE328
++ .long 0x0
++ .long 0x0
++ .long .LBB327
++ .long .LBE327
++ .long .LBB333
++ .long .LBE333
++ .long .LBB331
++ .long .LBE331
++ .long .LBB329
++ .long .LBE329
++ .long 0x0
++ .long 0x0
++ .long .LBB334
++ .long .LBE334
++ .long .LBB338
++ .long .LBE338
++ .long 0x0
++ .long 0x0
++ .long .LBB336
++ .long .LBE336
++ .long .LBB340
++ .long .LBE340
++ .long 0x0
++ .long 0x0
++ .long .LBB368
++ .long .LBE368
++ .long .LBB371
++ .long .LBE371
++ .long 0x0
++ .long 0x0
++ .long .LBB370
++ .long .LBE370
++ .long .LBB373
++ .long .LBE373
++ .long 0x0
++ .long 0x0
++ .long .LBB389
++ .long .LBE389
++ .long .LBB390
++ .long .LBE390
++ .long 0x0
++ .long 0x0
++ .long .LBB412
++ .long .LBE412
++ .long .LBB414
++ .long .LBE414
++ .long 0x0
++ .long 0x0
++ .long .LBB419
++ .long .LBE419
++ .long .LBB424
++ .long .LBE424
++ .long 0x0
++ .long 0x0
++ .section .debug_str,"MS",@progbits,1
++.LASF16:
++ .string "long long int"
++.LASF610:
++ .string "qs_pending"
++.LASF28:
++ .string "__u64"
++.LASF1708:
++ .string "idt_table"
++.LASF596:
++ .string "notifier_call"
++.LASF768:
++ .string "ki_flags"
++.LASF107:
++ .string "line"
++.LASF1360:
++ .string "link"
++.LASF1675:
++ .string "console_printk"
++.LASF828:
++ .string "vm_page_prot"
++.LASF694:
++ .string "shared_vm"
++.LASF547:
++ .string "vm_stat_diff"
++.LASF496:
++ .string "si_errno"
++.LASF1381:
++ .string "read"
++.LASF687:
++ .string "mmlist"
++.LASF1505:
++ .string "vm_set"
++.LASF1609:
++ .string "__mod_timer"
++.LASF1636:
++ .string "__kstrtab_boot_tvec_bases"
++.LASF1:
++ .string "long unsigned int"
++.LASF264:
++ .string "pi_lock"
++.LASF315:
++ .string "private"
++.LASF552:
++ .string "lowmem_reserve"
++.LASF1498:
++ .string "offset"
++.LASF1168:
++ .string "ia_valid"
++.LASF1101:
++ .string "last"
++.LASF711:
++ .string "cpu_vm_mask"
++.LASF468:
++ .string "sa_flags"
++.LASF1687:
++ .string "jiffies"
++.LASF684:
++ .string "map_count"
++.LASF406:
++ .string "smp_prepare_boot_cpu"
++.LASF681:
++ .string "free_area_cache"
++.LASF1331:
++ .string "assoc_mapping"
++.LASF139:
++ .string "fsave"
++.LASF404:
++ .string "release"
++.LASF678:
++ .string "mmap_base"
++.LASF207:
++ .string "sibling"
++.LASF1562:
++ .string "ret__"
++.LASF1431:
++ .string "file_lock_operations"
++.LASF1463:
++ .string "read_inode"
++.LASF1623:
++ .string "sys_getppid"
++.LASF394:
++ .string "coherent_dma_mask"
++.LASF356:
++ .string "mpc_config_translation"
++.LASF718:
++ .string "core_startup_done"
++.LASF455:
++ .string "semadj"
++.LASF1558:
++ .string "timer_stats_timer_set_start_info"
++.LASF95:
++ .string "___eip"
++.LASF1125:
++ .string "s_qcop"
++.LASF14:
++ .string "__kernel_gid32_t"
++.LASF905:
++ .string "kstat"
++.LASF222:
++ .string "it_prof_expires"
++.LASF1645:
++ .string "__kstrtab_round_jiffies_relative"
++.LASF1138:
++ .string "s_dirty"
++.LASF1464:
++ .string "dirty_inode"
++.LASF830:
++ .string "vm_rb"
++.LASF214:
++ .string "rt_priority"
++.LASF1295:
++ .string "set_xquota"
++.LASF881:
++ .string "SLEEP_INTERRUPTED"
++.LASF874:
++ .string "ngroups"
++.LASF1163:
++ .string "height"
++.LASF1016:
++ .string "irq_desc"
++.LASF1565:
++ .string "__round_jiffies"
++.LASF1699:
++ .string "malloc_sizes"
++.LASF17:
++ .string "umode_t"
++.LASF197:
++ .string "exit_state"
++.LASF1596:
++ .string "found"
++.LASF703:
++ .string "end_data"
++.LASF164:
++ .string "addr_limit"
++.LASF895:
++ .string "cpu_usage_stat"
++.LASF1126:
++ .string "s_export_op"
++.LASF748:
++ .string "resolution"
++.LASF662:
++ .string "i_cindex"
++.LASF1015:
++ .string "irq_flow_handler_t"
++.LASF1298:
++ .string "dqonoff_mutex"
++.LASF216:
++ .string "stime"
++.LASF509:
++ .string "list"
++.LASF1172:
++ .string "ia_size"
++.LASF359:
++ .string "trans_quad"
++.LASF1586:
++ .string "init_timers"
++.LASF284:
++ .string "raw_spinlock_t"
++.LASF407:
++ .string "smp_prepare_cpus"
++.LASF414:
++ .string "name"
++.LASF276:
++ .string "ioac"
++.LASF1203:
++ .string "d_icount"
++.LASF471:
++ .string "k_sigaction"
++.LASF692:
++ .string "total_vm"
++.LASF1455:
++ .string "fs_flags"
++.LASF1473:
++ .string "unlockfs"
++.LASF317:
++ .string "task_list"
++.LASF1131:
++ .string "s_lock"
++.LASF39:
++ .string "loff_t"
++.LASF1404:
++ .string "fl_owner"
++.LASF549:
++ .string "pages_min"
++.LASF1567:
++ .string "round_jiffies"
++.LASF1631:
++ .string "timer_stats_timer_clear_start_info"
++.LASF535:
++ .string "vfsmount"
++.LASF515:
++ .string "pwdmnt"
++.LASF1332:
++ .string "block_device"
++.LASF650:
++ .string "i_bytes"
++.LASF1337:
++ .string "bd_mount_sem"
++.LASF1077:
++ .string "device_attribute"
++.LASF765:
++ .string "iov_len"
++.LASF966:
++ .string "symtab"
++.LASF76:
++ .string "regs"
++.LASF162:
++ .string "exec_domain"
++.LASF1167:
++ .string "iattr"
++.LASF1075:
++ .string "resume"
++.LASF174:
++ .string "load_weight"
++.LASF1523:
++ .string "__list_add"
++.LASF545:
++ .string "per_cpu_pageset"
++.LASF986:
++ .string "kset_uevent_ops"
++.LASF1237:
++ .string "dqi_free_entry"
++.LASF143:
++ .string "thread_struct"
++.LASF1072:
++ .string "suspend"
++.LASF1398:
++ .string "splice_write"
++.LASF670:
++ .string "i_writecount"
++.LASF1500:
++ .string "mapping"
++.LASF305:
++ .string "rb_root"
++.LASF1178:
++ .string "qsize_t"
++.LASF1394:
++ .string "sendpage"
++.LASF232:
++ .string "group_info"
++.LASF677:
++ .string "unmap_area"
++.LASF518:
++ .string "d_count"
++.LASF982:
++ .string "list_lock"
++.LASF153:
++ .string "v86mask"
++.LASF1348:
++ .string "bd_list"
++.LASF543:
++ .string "high"
++.LASF469:
++ .string "sa_restorer"
++.LASF1423:
++ .string "ahead_start"
++.LASF689:
++ .string "_anon_rss"
++.LASF1228:
++ .string "qs_btimelimit"
++.LASF1258:
++ .string "dq_id"
++.LASF1438:
++ .string "fl_notify"
++.LASF582:
++ .string "node_id"
++.LASF821:
++ .string "internal_pages"
++.LASF1027:
++ .string "pending_mask"
++.LASF120:
++ .string "mem_unit"
++.LASF1223:
++ .string "qs_flags"
++.LASF1530:
++ .string "tbase_get_base"
++.LASF361:
++ .string "trans_local"
++.LASF1227:
++ .string "qs_incoredqs"
++.LASF1595:
++ .string "bitcount"
++.LASF466:
++ .string "sigaction"
++.LASF853:
++ .string "group_stop_count"
++.LASF1458:
++ .string "fs_supers"
++.LASF1679:
++ .string "mmu_cr4_features"
++.LASF1666:
++ .string "__ksymtab_schedule_timeout_interruptible"
++.LASF474:
++ .string "sival_int"
++.LASF201:
++ .string "personality"
++.LASF1703:
++ .string "avenrun"
++.LASF1418:
++ .string "fown_struct"
++.LASF1640:
++ .string "__ksymtab___round_jiffies"
++.LASF565:
++ .string "_pad2_"
++.LASF351:
++ .string "mpc_featureflag"
++.LASF1364:
++ .string "rmdir"
++.LASF278:
++ .string "pi_state_list"
++.LASF899:
++ .string "idle"
++.LASF438:
++ .string "phys_pkg_id"
++.LASF1406:
++ .string "fl_wait"
++.LASF1311:
++ .string "releasepage"
++.LASF1155:
++ .string "last_type"
++.LASF814:
++ .string "ring_info"
++.LASF31:
++ .string "dev_t"
++.LASF1577:
++ .string "init_timers_cpu"
++.LASF564:
++ .string "prev_priority"
++.LASF323:
++ .string "wait_lock"
++.LASF717:
++ .string "core_waiters"
++.LASF1424:
++ .string "ahead_size"
++.LASF604:
++ .string "cs_cachep"
++.LASF326:
++ .string "sleepers"
++.LASF516:
++ .string "altrootmnt"
++.LASF1477:
++ .string "umount_begin"
++.LASF1020:
++ .string "handler_data"
++.LASF301:
++ .string "rb_node"
++.LASF998:
++ .string "module_kobject"
++.LASF1444:
++ .string "nlm_lockowner"
++.LASF387:
++ .string "uevent_attr"
++.LASF271:
++ .string "backing_dev_info"
++.LASF386:
++ .string "uevent_suppress"
++.LASF864:
++ .string "cnvcsw"
++.LASF379:
++ .string "knode_parent"
++.LASF1055:
++ .string "dev_archdata"
++.LASF536:
++ .string "completion"
++.LASF739:
++ .string "pid_type"
++.LASF1646:
++ .string "__ksymtab_round_jiffies_relative"
++.LASF1004:
++ .string "MODULE_STATE_GOING"
++.LASF838:
++ .string "vm_truncate_count"
++.LASF87:
++ .string "___esi"
++.LASF487:
++ .string "_addr"
++.LASF98:
++ .string "___esp"
++.LASF941:
++ .string "unused_gpl_syms"
++.LASF67:
++ .string "eflags"
++.LASF731:
++ .string "timer_list"
++.LASF1250:
++ .string "dq_hash"
++.LASF1285:
++ .string "quota_on"
++.LASF940:
++ .string "unused_crcs"
++.LASF1341:
++ .string "bd_holder_list"
++.LASF1384:
++ .string "aio_write"
++.LASF766:
++ .string "kiocb"
++.LASF888:
++ .string "capabilities"
++.LASF1047:
++ .string "klist"
++.LASF1062:
++ .string "klist_devices"
++.LASF1185:
++ .string "dqb_curinodes"
++.LASF1248:
++ .string "qf_next"
++.LASF659:
++ .string "i_mapping"
++.LASF157:
++ .string "io_bitmap_ptr"
++.LASF1280:
++ .string "acquire_dquot"
++.LASF328:
++ .string "size"
++.LASF644:
++ .string "i_size_seqcount"
++.LASF252:
++ .string "pending"
++.LASF862:
++ .string "cutime"
++.LASF104:
++ .string "bug_entry"
++.LASF954:
++ .string "init_text_size"
++.LASF1395:
++ .string "check_flags"
++.LASF919:
++ .string "st_size"
++.LASF368:
++ .string "pm_message_t"
++.LASF15:
++ .string "__kernel_loff_t"
++.LASF402:
++ .string "devt"
++.LASF310:
++ .string "first"
++.LASF909:
++ .string "mtime"
++.LASF619:
++ .string "barrier"
++.LASF132:
++ .string "i387_soft_struct"
++.LASF1448:
++ .string "nfs4_fl"
++.LASF1056:
++ .string "acpi_handle"
++.LASF363:
++ .string "physid_mask"
++.LASF1091:
++ .string "class_data"
++.LASF190:
++ .string "time_slice"
++.LASF432:
++ .string "cpu_present_to_apicid"
++.LASF1175:
++ .string "ia_ctime"
++.LASF580:
++ .string "node_present_pages"
++.LASF419:
++ .string "int_dest_mode"
++.LASF738:
++ .string "timer_jiffies"
++.LASF1003:
++ .string "MODULE_STATE_COMING"
++.LASF1615:
++ .string "msecs"
++.LASF679:
++ .string "task_size"
++.LASF1120:
++ .string "s_dirt"
++.LASF151:
++ .string "vm86_info"
++.LASF617:
++ .string "donetail"
++.LASF1225:
++ .string "qs_uquota"
++.LASF40:
++ .string "size_t"
++.LASF598:
++ .string "blocking_notifier_head"
++.LASF449:
++ .string "kref"
++.LASF1319:
++ .string "page_tree"
++.LASF1409:
++ .string "fl_type"
++.LASF1480:
++ .string "export_operations"
++.LASF1474:
++ .string "statfs"
++.LASF1590:
++ .string "__dummy2"
++.LASF1608:
++ .string "del_timer_sync"
++.LASF1541:
++ .string "pattern"
++.LASF886:
++ .string "reclaimed_slab"
++.LASF791:
++ .string "f_path"
++.LASF1413:
++ .string "fl_break_time"
++.LASF1117:
++ .string "s_dev"
++.LASF962:
++ .string "num_bugs"
++.LASF1033:
++ .string "mask_ack"
++.LASF882:
++ .string "prio_array"
++.LASF1684:
++ .string "xtime_lock"
++.LASF444:
++ .string "apic_id_mask"
++.LASF691:
++ .string "hiwater_vm"
++.LASF762:
++ .string "res2"
++.LASF978:
++ .string "poll"
++.LASF1605:
++ .string "lock_timer_base"
++.LASF844:
++ .string "__session"
++.LASF158:
++ .string "iopl"
++.LASF367:
++ .string "event"
++.LASF42:
++ .string "time_t"
++.LASF296:
++ .string "seqcount"
++.LASF857:
++ .string "it_prof_incr"
++.LASF108:
++ .string "sysinfo"
++.LASF846:
++ .string "live"
++.LASF325:
++ .string "semaphore"
++.LASF1257:
++ .string "dq_sb"
++.LASF685:
++ .string "mmap_sem"
++.LASF1218:
++ .string "qfs_nblks"
++.LASF299:
++ .string "tv_sec"
++.LASF1333:
++ .string "bd_dev"
++.LASF295:
++ .string "seqlock_t"
++.LASF930:
++ .string "srcversion"
++.LASF1692:
++ .string "acpi_ht"
++.LASF431:
++ .string "cpu_to_logical_apicid"
++.LASF56:
++ .string "pgd_t"
++.LASF1034:
++ .string "unmask"
++.LASF345:
++ .string "mpc_config_processor"
++.LASF1097:
++ .string "raw_prio_tree_node"
++.LASF427:
++ .string "ioapic_phys_id_map"
++.LASF1425:
++ .string "mmap_hit"
++.LASF927:
++ .string "param_attrs"
++.LASF1032:
++ .string "disable"
++.LASF556:
++ .string "active_list"
++.LASF1548:
++ .string "native_irq_enable"
++.LASF1422:
++ .string "prev_index"
++.LASF1036:
++ .string "retrigger"
++.LASF1271:
++ .string "dquot_operations"
++.LASF855:
++ .string "real_timer"
++.LASF274:
++ .string "last_siginfo"
++.LASF802:
++ .string "private_data"
++.LASF554:
++ .string "_pad1_"
++.LASF546:
++ .string "stat_threshold"
++.LASF654:
++ .string "i_alloc_sem"
++.LASF1718:
++ .string "GNU C 4.1.1 (Gentoo 4.1.1-r3)"
++.LASF1385:
++ .string "readdir"
++.LASF889:
++ .string "congested_fn"
++.LASF576:
++ .string "nr_zones"
++.LASF1088:
++ .string "class_attribute"
++.LASF788:
++ .string "ki_cur_seg"
++.LASF720:
++ .string "ioctx_list_lock"
++.LASF1507:
++ .string "close"
++.LASF1439:
++ .string "fl_grant"
++.LASF396:
++ .string "dma_mem"
++.LASF1151:
++ .string "s_time_gran"
++.LASF1343:
++ .string "bd_block_size"
++.LASF258:
++ .string "security"
++.LASF1657:
++ .string "__kstrtab_try_to_del_timer_sync"
++.LASF1509:
++ .string "nopfn"
++.LASF1249:
++ .string "dquot"
++.LASF453:
++ .string "id_next"
++.LASF130:
++ .string "xmm_space"
++.LASF472:
++ .string "i387_union"
++.LASF1149:
++ .string "s_fs_info"
++.LASF1531:
++ .string "constant_test_bit"
++.LASF1508:
++ .string "nopage"
++.LASF945:
++ .string "num_gpl_future_syms"
++.LASF745:
++ .string "cpu_base"
++.LASF1198:
++ .string "d_blk_hardlimit"
++.LASF622:
++ .string "PIDTYPE_SID"
++.LASF558:
++ .string "nr_scan_active"
++.LASF749:
++ .string "get_time"
++.LASF794:
++ .string "f_flags"
++.LASF134:
++ .string "changed"
++.LASF70:
++ .string "__dsh"
++.LASF1083:
++ .string "class_attrs"
++.LASF1351:
++ .string "hd_struct"
++.LASF1628:
++ .string "sys_getegid"
++.LASF1306:
++ .string "readpages"
++.LASF831:
++ .string "shared"
++.LASF1485:
++ .string "get_dentry"
++.LASF525:
++ .string "d_lru"
++.LASF646:
++ .string "i_mtime"
++.LASF298:
++ .string "timespec"
++.LASF377:
++ .string "device"
++.LASF478:
++ .string "_uid"
++.LASF597:
++ .string "priority"
++.LASF1182:
++ .string "dqb_curspace"
++.LASF1264:
++ .string "check_quota_file"
++.LASF408:
++ .string "cpu_up"
++.LASF929:
++ .string "version"
++.LASF171:
++ .string "usage"
++.LASF1143:
++ .string "s_mtd"
++.LASF911:
++ .string "blksize"
++.LASF1499:
++ .string "_mapcount"
++.LASF815:
++ .string "aio_ring_info"
++.LASF285:
++ .string "lock"
++.LASF1516:
++ .string "tvec_t"
++.LASF355:
++ .string "mpc_bustype"
++.LASF1334:
++ .string "bd_inode"
++.LASF683:
++ .string "mm_count"
++.LASF790:
++ .string "ki_eventfd"
++.LASF231:
++ .string "fsgid"
++.LASF1320:
++ .string "tree_lock"
++.LASF1103:
++ .string "index_bits"
++.LASF1078:
++ .string "driver_attribute"
++.LASF263:
++ .string "alloc_lock"
++.LASF588:
++ .string "zones"
++.LASF268:
++ .string "bio_list"
++.LASF1434:
++ .string "fl_copy_lock"
++.LASF1190:
++ .string "dqi_bgrace"
++.LASF1146:
++ .string "s_frozen"
++.LASF1221:
++ .string "fs_quota_stat"
++.LASF1588:
++ .string "work_list"
++.LASF1430:
++ .string "fl_owner_t"
++.LASF1661:
++ .string "__kstrtab_avenrun"
++.LASF1702:
++ .string "boot_tvec_bases"
++.LASF817:
++ .string "ring_pages"
++.LASF1603:
++ .string "count_active_tasks"
++.LASF1213:
++ .string "d_rtbwarns"
++.LASF634:
++ .string "i_sb_list"
++.LASF330:
++ .string "mm_context_t"
++.LASF1576:
++ .string "__mptr"
++.LASF235:
++ .string "cap_permitted"
++.LASF1416:
++ .string "fl_u"
++.LASF18:
++ .string "__s8"
++.LASF75:
++ .string "vm86_struct"
++.LASF753:
++ .string "lock_key"
++.LASF1308:
++ .string "commit_write"
++.LASF1579:
++ .string "boot_done"
++.LASF964:
++ .string "waiter"
++.LASF996:
++ .string "test"
++.LASF1244:
++ .string "quota_format_type"
++.LASF524:
++ .string "d_name"
++.LASF555:
++ .string "lru_lock"
++.LASF1370:
++ .string "truncate"
++.LASF211:
++ .string "vfork_done"
++.LASF297:
++ .string "seqcount_t"
++.LASF792:
++ .string "f_op"
++.LASF1060:
++ .string "drivers"
++.LASF1265:
++ .string "read_file_info"
++.LASF512:
++ .string "root"
++.LASF1433:
++ .string "fl_remove"
++.LASF747:
++ .string "active"
++.LASF642:
++ .string "i_version"
++.LASF700:
++ .string "start_code"
++.LASF612:
++ .string "nxttail"
++.LASF664:
++ .string "i_dnotify_mask"
++.LASF989:
++ .string "local_t"
++.LASF452:
++ .string "proc_next"
++.LASF219:
++ .string "start_time"
++.LASF595:
++ .string "notifier_block"
++.LASF836:
++ .string "vm_file"
++.LASF1461:
++ .string "super_operations"
++.LASF243:
++ .string "sysvsem"
++.LASF212:
++ .string "set_child_tid"
++.LASF1170:
++ .string "ia_uid"
++.LASF20:
++ .string "__u8"
++.LASF641:
++ .string "i_rdev"
++.LASF1466:
++ .string "put_inode"
++.LASF1518:
++ .string "tvec_root_t"
++.LASF1229:
++ .string "qs_itimelimit"
++.LASF721:
++ .string "ioctx_list"
++.LASF858:
++ .string "it_virt_incr"
++.LASF557:
++ .string "inactive_list"
++.LASF137:
++ .string "alimit"
++.LASF1514:
++ .string "event_type"
++.LASF1038:
++ .string "set_wake"
++.LASF1207:
++ .string "d_bwarns"
++.LASF1612:
++ .string "expire"
++.LASF1268:
++ .string "read_dqblk"
++.LASF1247:
++ .string "qf_owner"
++.LASF1110:
++ .string "d_compare"
++.LASF73:
++ .string "revectored_struct"
++.LASF1193:
++ .string "dqi_valid"
++.LASF348:
++ .string "mpc_apicver"
++.LASF1625:
++ .string "sys_getuid"
++.LASF1578:
++ .string "__ret_warn_on"
++.LASF918:
++ .string "st_value"
++.LASF1104:
++ .string "qstr"
++.LASF350:
++ .string "mpc_cpufeature"
++.LASF203:
++ .string "tgid"
++.LASF1714:
++ .string "per_cpu__vm_event_states"
++.LASF800:
++ .string "f_ra"
++.LASF1338:
++ .string "bd_inodes"
++.LASF570:
++ .string "zone_start_pfn"
++.LASF467:
++ .string "sa_handler"
++.LASF257:
++ .string "notifier_mask"
++.LASF1115:
++ .string "super_block"
++.LASF411:
++ .string "smp_send_reschedule"
++.LASF1396:
++ .string "dir_notify"
++.LASF1347:
++ .string "bd_disk"
++.LASF113:
++ .string "sharedram"
++.LASF1013:
++ .string "fixup"
++.LASF1676:
++ .string "__per_cpu_offset"
++.LASF1269:
++ .string "commit_dqblk"
++.LASF78:
++ .string "cpu_type"
++.LASF1150:
++ .string "s_vfs_rename_mutex"
++.LASF1242:
++ .string "dqi_format"
++.LASF115:
++ .string "totalswap"
++.LASF562:
++ .string "reclaim_in_progress"
++.LASF437:
++ .string "enable_apic_mode"
++.LASF508:
++ .string "uidhash_list"
++.LASF1342:
++ .string "bd_contains"
++.LASF1336:
++ .string "bd_mutex"
++.LASF538:
++ .string "free_area"
++.LASF1637:
++ .string "__ksymtab_boot_tvec_bases"
++.LASF1643:
++ .string "__kstrtab_round_jiffies"
++.LASF1241:
++ .string "mem_dqinfo"
++.LASF430:
++ .string "apicid_to_node"
++.LASF502:
++ .string "processes"
++.LASF1411:
++ .string "fl_end"
++.LASF938:
++ .string "unused_syms"
++.LASF809:
++ .string "user_id"
++.LASF867:
++ .string "cmaj_flt"
++.LASF965:
++ .string "exit"
++.LASF1133:
++ .string "s_syncing"
++.LASF1435:
++ .string "fl_release_private"
++.LASF178:
++ .string "run_list"
++.LASF316:
++ .string "func"
++.LASF1713:
++ .string "protection_map"
++.LASF1325:
++ .string "truncate_count"
++.LASF126:
++ .string "status"
++.LASF446:
++ .string "send_IPI_mask"
++.LASF1671:
++ .string "__kstrtab_msleep"
++.LASF340:
++ .string "mpc_oemptr"
++.LASF1515:
++ .string "tvec_s"
++.LASF875:
++ .string "small_block"
++.LASF594:
++ .string "owner"
++.LASF354:
++ .string "mpc_busid"
++.LASF812:
++ .string "active_reqs"
++.LASF1503:
++ .string "first_page"
++.LASF1405:
++ .string "fl_pid"
++.LASF1068:
++ .string "drivers_autoprobe_attr"
++.LASF1690:
++ .string "acpi_noirq"
++.LASF1246:
++ .string "qf_ops"
++.LASF994:
++ .string "attr"
++.LASF1037:
++ .string "set_type"
++.LASF1490:
++ .string "written"
++.LASF506:
++ .string "mq_bytes"
++.LASF1220:
++ .string "fs_qfilestat_t"
++.LASF77:
++ .string "screen_bitmap"
++.LASF1532:
++ .string "addr"
++.LASF1559:
++ .string "timer_set_base"
++.LASF953:
++ .string "core_size"
++.LASF1482:
++ .string "encode_fh"
++.LASF1598:
++ .string "process_timeout"
++.LASF1382:
++ .string "write"
++.LASF1407:
++ .string "fl_file"
++.LASF908:
++ .string "atime"
++.LASF182:
++ .string "timestamp"
++.LASF1712:
++ .string "dcache_lock"
++.LASF370:
++ .string "power_state"
++.LASF1002:
++ .string "MODULE_STATE_LIVE"
++.LASF740:
++ .string "hrtimer_restart"
++.LASF1067:
++ .string "drv_attrs"
++.LASF991:
++ .string "kernel_symbol"
++.LASF1080:
++ .string "mod_name"
++.LASF1180:
++ .string "dqb_bhardlimit"
++.LASF1279:
++ .string "write_dquot"
++.LASF313:
++ .string "wait_queue_t"
++.LASF923:
++ .string "Elf32_Sym"
++.LASF1300:
++ .string "address_space_operations"
++.LASF987:
++ .string "filter"
++.LASF1371:
++ .string "permission"
++.LASF239:
++ .string "oomkilladj"
++.LASF111:
++ .string "totalram"
++.LASF194:
++ .string "ptrace_list"
++.LASF188:
++ .string "policy"
++.LASF1591:
++ .string "run_timer_softirq"
++.LASF1076:
++ .string "drivers_autoprobe"
++.LASF898:
++ .string "softirq"
++.LASF722:
++ .string "plist_head"
++.LASF1651:
++ .string "__kstrtab___mod_timer"
++.LASF461:
++ .string "sigset_t"
++.LASF1305:
++ .string "set_page_dirty"
++.LASF250:
++ .string "real_blocked"
++.LASF7:
++ .string "__kernel_ssize_t"
++.LASF497:
++ .string "si_code"
++.LASF200:
++ .string "pdeath_signal"
++.LASF1330:
++ .string "private_list"
++.LASF1367:
++ .string "readlink"
++.LASF1710:
++ .string "prof_on"
++.LASF79:
++ .string "int_revectored"
++.LASF534:
++ .string "d_iname"
++.LASF869:
++ .string "oublock"
++.LASF1688:
++ .string "platform_enable_wakeup"
++.LASF733:
++ .string "function"
++.LASF1587:
++ .string "__run_timers"
++.LASF1357:
++ .string "inode_operations"
++.LASF1236:
++ .string "dqi_free_blk"
++.LASF621:
++ .string "PIDTYPE_PGID"
++.LASF1393:
++ .string "sendfile"
++.LASF166:
++ .string "previous_esp"
++.LASF464:
++ .string "__restorefn_t"
++.LASF772:
++ .string "ki_ctx"
++.LASF352:
++ .string "mpc_reserved"
++.LASF494:
++ .string "siginfo"
++.LASF1462:
++ .string "destroy_inode"
++.LASF587:
++ .string "zlcache_ptr"
++.LASF1426:
++ .string "mmap_miss"
++.LASF523:
++ .string "d_parent"
++.LASF262:
++ .string "self_exec_id"
++.LASF302:
++ .string "rb_parent_color"
++.LASF10:
++ .string "__kernel_timer_t"
++.LASF1670:
++ .string "timers_nb"
++.LASF1084:
++ .string "class_dev_attrs"
++.LASF96:
++ .string "___cs"
++.LASF968:
++ .string "strtab"
++.LASF820:
++ .string "tail"
++.LASF709:
++ .string "env_end"
++.LASF388:
++ .string "devt_attr"
++.LASF593:
++ .string "mutex"
++.LASF459:
++ .string "sysv_sem"
++.LASF320:
++ .string "wait_queue_head_t"
++.LASF1234:
++ .string "v2_mem_dqinfo"
++.LASF1282:
++ .string "mark_dirty"
++.LASF500:
++ .string "user_struct"
++.LASF1650:
++ .string "__ksymtab_init_timer_deferrable"
++.LASF159:
++ .string "io_bitmap_max"
++.LASF91:
++ .string "___ds"
++.LASF925:
++ .string "module"
++.LASF993:
++ .string "module_attribute"
++.LASF777:
++ .string "ki_user_data"
++.LASF727:
++ .string "rlim_max"
++.LASF307:
++ .string "next"
++.LASF942:
++ .string "num_unused_gpl_syms"
++.LASF893:
++ .string "futex_pi_state"
++.LASF1488:
++ .string "mtd_info"
++.LASF1704:
++ .string "nr_threads"
++.LASF1624:
++ .string "_________p1"
++.LASF1021:
++ .string "chip_data"
++.LASF1326:
++ .string "nrpages"
++.LASF1274:
++ .string "alloc_space"
++.LASF1273:
++ .string "drop"
++.LASF155:
++ .string "saved_fs"
++.LASF1593:
++ .string "mem_total"
++.LASF410:
++ .string "smp_send_stop"
++.LASF900:
++ .string "iowait"
++.LASF540:
++ .string "nr_free"
++.LASF818:
++ .string "ring_lock"
++.LASF1501:
++ .string "lockless_freelist"
++.LASF186:
++ .string "sched_time"
++.LASF1318:
++ .string "host"
++.LASF131:
++ .string "padding"
++.LASF990:
++ .string "mod_arch_specific"
++.LASF1456:
++ .string "get_sb"
++.LASF36:
++ .string "_Bool"
++.LASF1619:
++ .string "update_process_times"
++.LASF1450:
++ .string "magic"
++.LASF93:
++ .string "___fs"
++.LASF1504:
++ .string "freelist"
++.LASF645:
++ .string "i_atime"
++.LASF548:
++ .string "zone"
++.LASF539:
++ .string "free_list"
++.LASF156:
++ .string "saved_gs"
++.LASF668:
++ .string "dirtied_when"
++.LASF1090:
++ .string "class_device"
++.LASF896:
++ .string "nice"
++.LASF415:
++ .string "probe"
++.LASF915:
++ .string "Elf32_Word"
++.LASF1469:
++ .string "put_super"
++.LASF973:
++ .string "attrs"
++.LASF215:
++ .string "utime"
++.LASF1188:
++ .string "dqb_valid"
++.LASF571:
++ .string "spanned_pages"
++.LASF751:
++ .string "softirq_time"
++.LASF1633:
++ .string "add_timer_on"
++.LASF1634:
++ .string "__kstrtab_jiffies_64"
++.LASF140:
++ .string "fxsave"
++.LASF482:
++ .string "_sigval"
++.LASF519:
++ .string "d_flags"
++.LASF736:
++ .string "tvec_t_base_s"
++.LASF208:
++ .string "group_leader"
++.LASF265:
++ .string "pi_waiters"
++.LASF995:
++ .string "setup"
++.LASF428:
++ .string "setup_apic_routing"
++.LASF1659:
++ .string "__kstrtab_del_timer_sync"
++.LASF117:
++ .string "procs"
++.LASF1486:
++ .string "find_exported_dentry"
++.LASF892:
++ .string "unplug_io_data"
++.LASF574:
++ .string "node_zones"
++.LASF1276:
++ .string "free_space"
++.LASF958:
++ .string "unsafe"
++.LASF1717:
++ .string "rec_event"
++.LASF1547:
++ .string "raw_local_irq_enable"
++.LASF1315:
++ .string "launder_page"
++.LASF66:
++ .string "__csh"
++.LASF1553:
++ .string "calc_load"
++.LASF1563:
++ .string "setup_timer"
++.LASF1647:
++ .string "__kstrtab_init_timer"
++.LASF1053:
++ .string "n_ref"
++.LASF1239:
++ .string "v1_i"
++.LASF1026:
++ .string "affinity"
++.LASF568:
++ .string "wait_table_bits"
++.LASF1696:
++ .string "cpu_callout_map"
++.LASF520:
++ .string "d_lock"
++.LASF559:
++ .string "nr_scan_inactive"
++.LASF981:
++ .string "store"
++.LASF375:
++ .string "pm_parent"
++.LASF1096:
++ .string "softirq_action"
++.LASF655:
++ .string "i_op"
++.LASF616:
++ .string "donelist"
++.LASF946:
++ .string "gpl_future_crcs"
++.LASF752:
++ .string "hrtimer_cpu_base"
++.LASF267:
++ .string "journal_info"
++.LASF220:
++ .string "min_flt"
++.LASF937:
++ .string "gpl_crcs"
++.LASF934:
++ .string "crcs"
++.LASF1291:
++ .string "set_dqblk"
++.LASF364:
++ .string "mask"
++.LASF353:
++ .string "mpc_config_bus"
++.LASF1686:
++ .string "jiffies_64"
++.LASF737:
++ .string "running_timer"
++.LASF365:
++ .string "physid_mask_t"
++.LASF32:
++ .string "mode_t"
++.LASF346:
++ .string "mpc_type"
++.LASF125:
++ .string "st_space"
++.LASF710:
++ .string "saved_auxv"
++.LASF1267:
++ .string "free_file_info"
++.LASF1415:
++ .string "fl_lmops"
++.LASF1281:
++ .string "release_dquot"
++.LASF185:
++ .string "last_ran_j"
++.LASF213:
++ .string "clear_child_tid"
++.LASF1145:
++ .string "s_dquot"
++.LASF1122:
++ .string "s_type"
++.LASF1709:
++ .string "per_cpu__irq_regs"
++.LASF481:
++ .string "_pad"
++.LASF1255:
++ .string "dq_count"
++.LASF877:
++ .string "blocks"
++.LASF59:
++ .string "restart_block"
++.LASF409:
++ .string "smp_cpus_done"
++.LASF779:
++ .string "ki_pos"
++.LASF1191:
++ .string "dqi_igrace"
++.LASF1049:
++ .string "k_list"
++.LASF988:
++ .string "uevent"
++.LASF551:
++ .string "pages_high"
++.LASF1118:
++ .string "s_blocksize"
++.LASF1557:
++ .string "timer_pending"
++.LASF975:
++ .string "k_name"
++.LASF746:
++ .string "index"
++.LASF1506:
++ .string "vm_operations_struct"
++.LASF754:
++ .string "clock_base"
++.LASF1481:
++ .string "decode_fh"
++.LASF1092:
++ .string "class_id"
++.LASF913:
++ .string "Elf32_Addr"
++.LASF702:
++ .string "start_data"
++.LASF939:
++ .string "num_unused_syms"
++.LASF1134:
++ .string "s_need_sync_fs"
++.LASF202:
++ .string "did_exec"
++.LASF852:
++ .string "notify_count"
++.LASF161:
++ .string "task"
++.LASF289:
++ .string "rwlock_t"
++.LASF249:
++ .string "blocked"
++.LASF1719:
++ .string "kernel/timer.c"
++.LASF729:
++ .string "tv64"
++.LASF1544:
++ .string "detach_timer"
++.LASF1642:
++ .string "__ksymtab___round_jiffies_relative"
++.LASF424:
++ .string "no_balance_irq"
++.LASF1604:
++ .string "do_timer"
++.LASF657:
++ .string "i_sb"
++.LASF1620:
++ .string "user_tick"
++.LASF1600:
++ .string "sys_alarm"
++.LASF826:
++ .string "vm_end"
++.LASF1491:
++ .string "error"
++.LASF246:
++ .string "nsproxy"
++.LASF68:
++ .string "__ssh"
++.LASF1349:
++ .string "bd_inode_backing_dev_info"
++.LASF34:
++ .string "timer_t"
++.LASF661:
++ .string "i_devices"
++.LASF261:
++ .string "parent_exec_id"
++.LASF880:
++ .string "SLEEP_INTERACTIVE"
++.LASF631:
++ .string "inode"
++.LASF894:
++ .string "pipe_inode_info"
++.LASF1297:
++ .string "dqio_mutex"
++.LASF1057:
++ .string "bus_attribute"
++.LASF1358:
++ .string "create"
++.LASF1556:
++ .string "rep_nop"
++.LASF1346:
++ .string "bd_invalidated"
++.LASF1070:
++ .string "match"
++.LASF362:
++ .string "trans_reserved"
++.LASF760:
++ .string "timer"
++.LASF773:
++ .string "ki_cancel"
++.LASF1535:
++ .string "flag"
++.LASF1206:
++ .string "d_iwarns"
++.LASF454:
++ .string "semid"
++.LASF1094:
++ .string "dma_coherent_mem"
++.LASF1573:
++ .string "init_timer_deferrable"
++.LASF420:
++ .string "ESR_DISABLE"
++.LASF1073:
++ .string "suspend_late"
++.LASF486:
++ .string "_stime"
++.LASF321:
++ .string "rw_semaphore"
++.LASF843:
++ .string "session"
++.LASF1379:
++ .string "file_operations"
++.LASF1459:
++ .string "s_lock_key"
++.LASF1492:
++ .string "read_descriptor_t"
++.LASF624:
++ .string "pid_chain"
++.LASF1701:
++ .string "per_cpu__rcu_bh_data"
++.LASF884:
++ .string "files_struct"
++.LASF247:
++ .string "signal"
++.LASF1400:
++ .string "file_lock"
++.LASF1648:
++ .string "__ksymtab_init_timer"
++.LASF282:
++ .string "lock_class_key"
++.LASF470:
++ .string "sa_mask"
++.LASF1194:
++ .string "fs_disk_quota"
++.LASF590:
++ .string "page"
++.LASF713:
++ .string "faultstamp"
++.LASF1365:
++ .string "mknod"
++.LASF785:
++ .string "ki_inline_vec"
++.LASF383:
++ .string "bus_id"
++.LASF1359:
++ .string "lookup"
++.LASF1310:
++ .string "invalidatepage"
++.LASF1478:
++ .string "show_options"
++.LASF344:
++ .string "reserved"
++.LASF176:
++ .string "static_prio"
++.LASF1106:
++ .string "d_child"
++.LASF119:
++ .string "freehigh"
++.LASF38:
++ .string "gid_t"
++.LASF2:
++ .string "short unsigned int"
++.LASF450:
++ .string "refcount"
++.LASF698:
++ .string "def_flags"
++.LASF542:
++ .string "per_cpu_pages"
++.LASF950:
++ .string "module_init"
++.LASF1355:
++ .string "i_cdev"
++.LASF1537:
++ .string "kmalloc"
++.LASF1130:
++ .string "s_umount"
++.LASF851:
++ .string "group_exit_task"
++.LASF1350:
++ .string "bd_private"
++.LASF477:
++ .string "_pid"
++.LASF1329:
++ .string "private_lock"
++.LASF1352:
++ .string "gendisk"
++.LASF648:
++ .string "i_blkbits"
++.LASF903:
++ .string "cpustat"
++.LASF1256:
++ .string "dq_wait_unused"
++.LASF1561:
++ .string "get_current"
++.LASF1428:
++ .string "fu_list"
++.LASF357:
++ .string "trans_len"
++.LASF251:
++ .string "saved_sigmask"
++.LASF1375:
++ .string "getxattr"
++.LASF504:
++ .string "inotify_watches"
++.LASF856:
++ .string "it_real_incr"
++.LASF803:
++ .string "f_ep_links"
++.LASF871:
++ .string "coublock"
++.LASF1017:
++ .string "handle_irq"
++.LASF599:
++ .string "rwsem"
++.LASF1129:
++ .string "s_root"
++.LASF339:
++ .string "mpc_productid"
++.LASF1475:
++ .string "remount_fs"
++.LASF332:
++ .string "cputime64_t"
++.LASF627:
++ .string "seccomp_t"
++.LASF1219:
++ .string "qfs_nextents"
++.LASF1656:
++ .string "__ksymtab_del_timer"
++.LASF742:
++ .string "HRTIMER_RESTART"
++.LASF735:
++ .string "base"
++.LASF1137:
++ .string "s_inodes"
++.LASF560:
++ .string "pages_scanned"
++.LASF1007:
++ .string "address"
++.LASF1496:
++ .string "seq_file"
++.LASF382:
++ .string "kobj"
++.LASF165:
++ .string "sysenter_return"
++.LASF327:
++ .string "wait"
++.LASF456:
++ .string "sem_undo_list"
++.LASF1208:
++ .string "d_padding2"
++.LASF1214:
++ .string "d_padding3"
++.LASF1215:
++ .string "d_padding4"
++.LASF1536:
++ .string "test_tsk_thread_flag"
++.LASF433:
++ .string "apicid_to_cpu_present"
++.LASF695:
++ .string "exec_vm"
++.LASF1585:
++ .string "init_timer_stats"
++.LASF533:
++ .string "d_mounted"
++.LASF715:
++ .string "last_interval"
++.LASF1312:
++ .string "direct_IO"
++.LASF955:
++ .string "core_text_size"
++.LASF217:
++ .string "nvcsw"
++.LASF1095:
++ .string "irq_handler_t"
++.LASF1183:
++ .string "dqb_ihardlimit"
++.LASF807:
++ .string "users"
++.LASF823:
++ .string "vm_area_struct"
++.LASF1522:
++ .string "number"
++.LASF573:
++ .string "pglist_data"
++.LASF286:
++ .string "raw_rwlock_t"
++.LASF839:
++ .string "sighand_struct"
++.LASF1164:
++ .string "gfp_mask"
++.LASF1008:
++ .string "module_sect_attrs"
++.LASF58:
++ .string "pgprot_t"
++.LASF269:
++ .string "bio_tail"
++.LASF980:
++ .string "show"
++.LASF29:
++ .string "long long unsigned int"
++.LASF1136:
++ .string "s_xattr"
++.LASF780:
++ .string "ki_bio_count"
++.LASF1290:
++ .string "get_dqblk"
++.LASF1674:
++ .string "__ksymtab_msleep_interruptible"
++.LASF1440:
++ .string "fl_break"
++.LASF1289:
++ .string "set_info"
++.LASF1520:
++ .string "event_spec"
++.LASF567:
++ .string "wait_table_hash_nr_entries"
++.LASF510:
++ .string "fs_struct"
++.LASF21:
++ .string "unsigned char"
++.LASF907:
++ .string "rdev"
++.LASF890:
++ .string "congested_data"
++.LASF373:
++ .string "prev_state"
++.LASF921:
++ .string "st_other"
++.LASF347:
++ .string "mpc_apicid"
++.LASF489:
++ .string "_kill"
++.LASF1685:
++ .string "time_status"
++.LASF1292:
++ .string "get_xstate"
++.LASF476:
++ .string "sigval_t"
++.LASF1124:
++ .string "dq_op"
++.LASF1526:
++ .string "tbase_get_deferrable"
++.LASF1429:
++ .string "fu_rcuhead"
++.LASF819:
++ .string "nr_pages"
++.LASF1621:
++ .string "sys_getpid"
++.LASF1493:
++ .string "read_actor_t"
++.LASF293:
++ .string "kernel_cap_t"
++.LASF1452:
++ .string "fa_next"
++.LASF761:
++ .string "io_event"
++.LASF798:
++ .string "f_uid"
++.LASF959:
++ .string "taints"
++.LASF283:
++ .string "slock"
++.LASF532:
++ .string "d_cookie"
++.LASF1107:
++ .string "d_rcu"
++.LASF1328:
++ .string "a_ops"
++.LASF1082:
++ .string "class_dirs"
++.LASF371:
++ .string "can_wakeup"
++.LASF878:
++ .string "SLEEP_NORMAL"
++.LASF1511:
++ .string "page_mkwrite"
++.LASF1148:
++ .string "s_id"
++.LASF11:
++ .string "__kernel_clockid_t"
++.LASF401:
++ .string "class"
++.LASF1139:
++ .string "s_io"
++.LASF1209:
++ .string "d_rtb_hardlimit"
++.LASF1366:
++ .string "rename"
++.LASF226:
++ .string "euid"
++.LASF743:
++ .string "hrtimer"
++.LASF1397:
++ .string "flock"
++.LASF529:
++ .string "d_op"
++.LASF1421:
++ .string "cache_hit"
++.LASF1533:
++ .string "variable_test_bit"
++.LASF563:
++ .string "vm_stat"
++.LASF632:
++ .string "i_hash"
++.LASF223:
++ .string "it_virt_expires"
++.LASF1487:
++ .string "xattr_handler"
++.LASF234:
++ .string "cap_inheritable"
++.LASF726:
++ .string "rlim_cur"
++.LASF358:
++ .string "trans_type"
++.LASF920:
++ .string "st_info"
++.LASF391:
++ .string "platform_data"
++.LASF1465:
++ .string "write_inode"
++.LASF141:
++ .string "soft"
++.LASF463:
++ .string "__sighandler_t"
++.LASF5:
++ .string "__kernel_pid_t"
++.LASF1158:
++ .string "open_intent"
++.LASF1599:
++ .string "__data"
++.LASF82:
++ .string "info"
++.LASF146:
++ .string "sysenter_cs"
++.LASF1014:
++ .string "irqreturn_t"
++.LASF1323:
++ .string "i_mmap_nonlinear"
++.LASF1663:
++ .string "__kstrtab_schedule_timeout"
++.LASF1552:
++ .string "read_seqretry"
++.LASF775:
++ .string "ki_dtor"
++.LASF253:
++ .string "sas_ss_sp"
++.LASF384:
++ .string "type"
++.LASF1460:
++ .string "s_umount_key"
++.LASF195:
++ .string "active_mm"
++.LASF335:
++ .string "mpc_length"
++.LASF1217:
++ .string "qfs_ino"
++.LASF1199:
++ .string "d_blk_softlimit"
++.LASF308:
++ .string "prev"
++.LASF47:
++ .string "resource_size_t"
++.LASF248:
++ .string "sighand"
++.LASF866:
++ .string "cmin_flt"
++.LASF1071:
++ .string "remove"
++.LASF1046:
++ .string "child"
++.LASF1114:
++ .string "d_dname"
++.LASF1000:
++ .string "module_ref"
++.LASF1254:
++ .string "dq_lock"
++.LASF413:
++ .string "genapic"
++.LASF630:
++ .string "list_op_pending"
++.LASF1322:
++ .string "i_mmap"
++.LASF1263:
++ .string "quota_format_ops"
++.LASF725:
++ .string "rlimit"
++.LASF97:
++ .string "___eflags"
++.LASF1238:
++ .string "mem_dqblk"
++.LASF629:
++ .string "futex_offset"
++.LASF1441:
++ .string "fl_mylease"
++.LASF279:
++ .string "pi_state_cache"
++.LASF329:
++ .string "vdso"
++.LASF537:
++ .string "done"
++.LASF1031:
++ .string "enable"
++.LASF906:
++ .string "nlink"
++.LASF618:
++ .string "blimit"
++.LASF1446:
++ .string "nfs4_lock_state"
++.LASF291:
++ .string "atomic_t"
++.LASF1161:
++ .string "path"
++.LASF825:
++ .string "vm_start"
++.LASF833:
++ .string "anon_vma"
++.LASF666:
++ .string "inotify_mutex"
++.LASF1602:
++ .string "update_times"
++.LASF1324:
++ .string "i_mmap_lock"
++.LASF1546:
++ .string "__raw_spin_unlock"
++.LASF949:
++ .string "init"
++.LASF572:
++ .string "present_pages"
++.LASF1680:
++ .string "current_stack_pointer"
++.LASF997:
++ .string "free"
++.LASF850:
++ .string "group_exit_code"
++.LASF1048:
++ .string "k_lock"
++.LASF628:
++ .string "robust_list_head"
++.LASF1065:
++ .string "bus_attrs"
++.LASF541:
++ .string "zone_padding"
++.LASF1369:
++ .string "put_link"
++.LASF688:
++ .string "_file_rss"
++.LASF1314:
++ .string "migratepage"
++.LASF1123:
++ .string "s_op"
++.LASF956:
++ .string "unwind_info"
++.LASF1019:
++ .string "msi_desc"
++.LASF1410:
++ .string "fl_start"
++.LASF1171:
++ .string "ia_gid"
++.LASF1303:
++ .string "sync_page"
++.LASF349:
++ .string "mpc_cpuflag"
++.LASF183:
++ .string "last_ran"
++.LASF1721:
++ .string "run_local_timers"
++.LASF460:
++ .string "undo_list"
++.LASF1006:
++ .string "mattr"
++.LASF399:
++ .string "devres_head"
++.LASF227:
++ .string "suid"
++.LASF1502:
++ .string "slab"
++.LASF1001:
++ .string "module_state"
++.LASF1128:
++ .string "s_magic"
++.LASF1534:
++ .string "test_ti_thread_flag"
++.LASF810:
++ .string "ctx_lock"
++.LASF244:
++ .string "thread"
++.LASF1627:
++ .string "sys_getgid"
++.LASF931:
++ .string "holders_dir"
++.LASF1086:
++ .string "class_release"
++.LASF883:
++ .string "linux_binfmt"
++.LASF1589:
++ .string "__dummy"
++.LASF441:
++ .string "mps_oem_check"
++.LASF1665:
++ .string "__kstrtab_schedule_timeout_interruptible"
++.LASF1574:
++ .string "cascade"
++.LASF658:
++ .string "i_flock"
++.LASF924:
++ .string "attribute"
++.LASF835:
++ .string "vm_pgoff"
++.LASF770:
++ .string "ki_key"
++.LASF676:
++ .string "get_unmapped_area"
++.LASF443:
++ .string "get_apic_id"
++.LASF1009:
++ .string "nsections"
++.LASF1494:
++ .string "poll_table_struct"
++.LASF1575:
++ .string "tv_list"
++.LASF625:
++ .string "pid_link"
++.LASF686:
++ .string "page_table_lock"
++.LASF170:
++ .string "stack"
++.LASF928:
++ .string "modinfo_attrs"
++.LASF1296:
++ .string "quota_info"
++.LASF290:
++ .string "counter"
++.LASF1294:
++ .string "get_xquota"
++.LASF837:
++ .string "vm_private_data"
++.LASF1119:
++ .string "s_blocksize_bits"
++.LASF255:
++ .string "notifier"
++.LASF306:
++ .string "list_head"
++.LASF1025:
++ .string "irqs_unhandled"
++.LASF312:
++ .string "pprev"
++.LASF663:
++ .string "i_generation"
++.LASF442:
++ .string "acpi_madt_oem_check"
++.LASF417:
++ .string "target_cpus"
++.LASF797:
++ .string "f_owner"
++.LASF1176:
++ .string "ia_file"
++.LASF238:
++ .string "fpu_counter"
++.LASF1412:
++ .string "fl_fasync"
++.LASF1054:
++ .string "n_removed"
++.LASF910:
++ .string "ctime"
++.LASF1200:
++ .string "d_ino_hardlimit"
++.LASF1093:
++ .string "device_type"
++.LASF1652:
++ .string "__ksymtab___mod_timer"
++.LASF135:
++ .string "lookahead"
++.LASF1581:
++ .string "tvec_base_done"
++.LASF37:
++ .string "uid_t"
++.LASF801:
++ .string "f_version"
++.LASF129:
++ .string "mxcsr_mask"
++.LASF1278:
++ .string "transfer"
++.LASF1419:
++ .string "signum"
++.LASF517:
++ .string "dentry"
++.LASF985:
++ .string "default_attrs"
++.LASF947:
++ .string "num_exentries"
++.LASF1157:
++ .string "intent"
++.LASF789:
++ .string "ki_list"
++.LASF160:
++ .string "thread_info"
++.LASF1432:
++ .string "fl_insert"
++.LASF1654:
++ .string "__ksymtab_mod_timer"
++.LASF811:
++ .string "reqs_active"
++.LASF583:
++ .string "kswapd_wait"
++.LASF707:
++ .string "arg_end"
++.LASF1387:
++ .string "unlocked_ioctl"
++.LASF1074:
++ .string "resume_early"
++.LASF860:
++ .string "tty_old_pgrp"
++.LASF1669:
++ .string "base_lock_keys"
++.LASF1420:
++ .string "file_ra_state"
++.LASF505:
++ .string "inotify_devs"
++.LASF638:
++ .string "i_nlink"
++.LASF273:
++ .string "ptrace_message"
++.LASF933:
++ .string "num_syms"
++.LASF578:
++ .string "bdata"
++.LASF1542:
++ .string "timer_stats_account_timer"
++.LASF177:
++ .string "normal_prio"
++.LASF1402:
++ .string "fl_link"
++.LASF787:
++ .string "ki_nr_segs"
++.LASF1617:
++ .string "signal_pending"
++.LASF429:
++ .string "multi_timer_check"
++.LASF840:
++ .string "action"
++.LASF977:
++ .string "ktype"
++.LASF1513:
++ .string "event_data"
++.LASF723:
++ .string "prio_list"
++.LASF1061:
++ .string "devices"
++.LASF1231:
++ .string "qs_bwarnlimit"
++.LASF609:
++ .string "passed_quiesc"
++.LASF1286:
++ .string "quota_off"
++.LASF1040:
++ .string "irqaction"
++.LASF1353:
++ .string "i_pipe"
++.LASF865:
++ .string "cnivcsw"
++.LASF730:
++ .string "ktime_t"
++.LASF49:
++ .string "arg1"
++.LASF225:
++ .string "cpu_timers"
++.LASF699:
++ .string "nr_ptes"
++.LASF1059:
++ .string "subsys"
++.LASF45:
++ .string "blkcnt_t"
++.LASF1079:
++ .string "device_driver"
++.LASF1697:
++ .string "mem_map"
++.LASF1012:
++ .string "insn"
++.LASF94:
++ .string "___orig_eax"
++.LASF8:
++ .string "__kernel_time_t"
++.LASF44:
++ .string "sector_t"
++.LASF435:
++ .string "setup_portio_remap"
++.LASF395:
++ .string "dma_pools"
++.LASF1417:
++ .string "dnotify_struct"
++.LASF366:
++ .string "pm_message"
++.LASF1251:
++ .string "dq_inuse"
++.LASF1695:
++ .string "per_cpu__cpu_number"
++.LASF704:
++ .string "start_brk"
++.LASF868:
++ .string "inblock"
++.LASF378:
++ .string "klist_children"
++.LASF418:
++ .string "int_delivery_mode"
++.LASF1253:
++ .string "dq_dirty"
++.LASF592:
++ .string "bootmem_data"
++.LASF1192:
++ .string "dqi_flags"
++.LASF1468:
++ .string "delete_inode"
++.LASF1232:
++ .string "qs_iwarnlimit"
++.LASF614:
++ .string "curlist"
++.LASF1235:
++ .string "dqi_blocks"
++.LASF1388:
++ .string "compat_ioctl"
++.LASF1716:
++ .string "swap_token_mm"
++.LASF734:
++ .string "data"
++.LASF336:
++ .string "mpc_spec"
++.LASF342:
++ .string "mpc_oemcount"
++.LASF1632:
++ .string "del_timer"
++.LASF805:
++ .string "f_mapping"
++.LASF673:
++ .string "mmap"
++.LASF1030:
++ .string "shutdown"
++.LASF660:
++ .string "i_data"
++.LASF876:
++ .string "nblocks"
++.LASF1521:
++ .string "dcookie"
++.LASF637:
++ .string "i_count"
++.LASF173:
++ .string "lock_depth"
++.LASF400:
++ .string "node"
++.LASF479:
++ .string "_tid"
++.LASF1592:
++ .string "do_sysinfo"
++.LASF1470:
++ .string "write_super"
++.LASF1705:
++ .string "cad_pid"
++.LASF1362:
++ .string "symlink"
++.LASF879:
++ .string "SLEEP_NONINTERACTIVE"
++.LASF527:
++ .string "d_alias"
++.LASF448:
++ .string "send_IPI_all"
++.LASF620:
++ .string "PIDTYPE_PID"
++.LASF1583:
++ .string "self"
++.LASF647:
++ .string "i_ctime"
++.LASF1408:
++ .string "fl_flags"
++.LASF1087:
++ .string "dev_release"
++.LASF690:
++ .string "hiwater_rss"
++.LASF1313:
++ .string "get_xip_page"
++.LASF1436:
++ .string "lock_manager_operations"
++.LASF651:
++ .string "i_mode"
++.LASF501:
++ .string "__count"
++.LASF376:
++ .string "entry"
++.LASF71:
++ .string "__fsh"
++.LASF1154:
++ .string "nameidata"
++.LASF674:
++ .string "mm_rb"
++.LASF6:
++ .string "__kernel_size_t"
++.LASF281:
++ .string "splice_pipe"
++.LASF193:
++ .string "ptrace_children"
++.LASF488:
++ .string "_band"
++.LASF1169:
++ .string "ia_mode"
++.LASF23:
++ .string "short int"
++.LASF30:
++ .string "__kernel_dev_t"
++.LASF1483:
++ .string "get_name"
++.LASF1549:
++ .string "current_thread_info"
++.LASF423:
++ .string "check_apicid_present"
++.LASF434:
++ .string "mpc_apic_id"
++.LASF601:
++ .string "kmem_cache"
++.LASF495:
++ .string "si_signo"
++.LASF1606:
++ .string "prelock_base"
++.LASF1152:
++ .string "s_subtype"
++.LASF149:
++ .string "error_code"
++.LASF106:
++ .string "file"
++.LASF1174:
++ .string "ia_mtime"
++.LASF1081:
++ .string "interfaces"
++.LASF640:
++ .string "i_gid"
++.LASF1403:
++ .string "fl_block"
++.LASF192:
++ .string "tasks"
++.LASF1085:
++ .string "dev_uevent"
++.LASF292:
++ .string "atomic_long_t"
++.LASF397:
++ .string "archdata"
++.LASF979:
++ .string "sysfs_ops"
++.LASF863:
++ .string "cstime"
++.LASF451:
++ .string "sem_undo"
++.LASF848:
++ .string "curr_target"
++.LASF179:
++ .string "array"
++.LASF1517:
++ .string "tvec_root_s"
++.LASF480:
++ .string "_overrun"
++.LASF272:
++ .string "io_context"
++.LASF816:
++ .string "mmap_size"
++.LASF60:
++ .string "vm86_regs"
++.LASF163:
++ .string "preempt_count"
++.LASF960:
++ .string "bug_list"
++.LASF254:
++ .string "sas_ss_size"
++.LASF1212:
++ .string "d_rtbtimer"
++.LASF210:
++ .string "thread_group"
++.LASF65:
++ .string "orig_eax"
++.LASF416:
++ .string "apic_id_registered"
++.LASF1662:
++ .string "__ksymtab_avenrun"
++.LASF1283:
++ .string "write_info"
++.LASF1141:
++ .string "s_files"
++.LASF719:
++ .string "core_done"
++.LASF1121:
++ .string "s_maxbytes"
++.LASF1386:
++ .string "ioctl"
++.LASF46:
++ .string "gfp_t"
++.LASF1224:
++ .string "qs_pad"
++.LASF577:
++ .string "node_mem_map"
++.LASF1245:
++ .string "qf_fmt_id"
++.LASF1390:
++ .string "fsync"
++.LASF1629:
++ .string "sys_gettid"
++.LASF672:
++ .string "mm_struct"
++.LASF242:
++ .string "total_link_count"
++.LASF976:
++ .string "kset"
++.LASF152:
++ .string "v86flags"
++.LASF187:
++ .string "sleep_type"
++.LASF83:
++ .string "___orig_eip"
++.LASF1668:
++ .string "__ksymtab_schedule_timeout_uninterruptible"
++.LASF1011:
++ .string "exception_table_entry"
++.LASF1140:
++ .string "s_anon"
++.LASF914:
++ .string "Elf32_Half"
++.LASF967:
++ .string "num_symtab"
++.LASF3:
++ .string "long int"
++.LASF943:
++ .string "unused_gpl_crcs"
++.LASF714:
++ .string "token_priority"
++.LASF503:
++ .string "sigpending"
++.LASF1525:
++ .string "INIT_LIST_HEAD"
++.LASF422:
++ .string "check_apicid_used"
++.LASF936:
++ .string "num_gpl_syms"
++.LASF1045:
++ .string "start"
++.LASF706:
++ .string "arg_start"
++.LASF1029:
++ .string "startup"
++.LASF1345:
++ .string "bd_part_count"
++.LASF873:
++ .string "tty_struct"
++.LASF1442:
++ .string "fl_change"
++.LASF392:
++ .string "power"
++.LASF983:
++ .string "uevent_ops"
++.LASF1066:
++ .string "dev_attrs"
++.LASF602:
++ .string "cache_sizes"
++.LASF1205:
++ .string "d_btimer"
++.LASF1317:
++ .string "address_space"
++.LASF969:
++ .string "sect_attrs"
++.LASF1304:
++ .string "writepages"
++.LASF1233:
++ .string "v1_mem_dqinfo"
++.LASF101:
++ .string "___vm86_ds"
++.LASF337:
++ .string "mpc_checksum"
++.LASF786:
++ .string "ki_iovec"
++.LASF1372:
++ .string "setattr"
++.LASF804:
++ .string "f_ep_lock"
++.LASF1528:
++ .string "__list_del"
++.LASF169:
++ .string "state"
++.LASF795:
++ .string "f_mode"
++.LASF1667:
++ .string "__kstrtab_schedule_timeout_uninterruptible"
++.LASF100:
++ .string "___vm86_es"
++.LASF118:
++ .string "totalhigh"
++.LASF233:
++ .string "cap_effective"
++.LASF1356:
++ .string "cdev"
++.LASF778:
++ .string "ki_wait"
++.LASF360:
++ .string "trans_global"
++.LASF1380:
++ .string "llseek"
++.LASF81:
++ .string "pt_regs"
++.LASF245:
++ .string "files"
++.LASF270:
++ .string "reclaim_state"
++.LASF1266:
++ .string "write_file_info"
++.LASF1063:
++ .string "klist_drivers"
++.LASF544:
++ .string "batch"
++.LASF1069:
++ .string "drivers_probe_attr"
++.LASF1144:
++ .string "s_instances"
++.LASF579:
++ .string "node_start_pfn"
++.LASF499:
++ .string "siginfo_t"
++.LASF1655:
++ .string "__kstrtab_del_timer"
++.LASF133:
++ .string "ftop"
++.LASF1335:
++ .string "bd_openers"
++.LASF102:
++ .string "___vm86_fs"
++.LASF693:
++ .string "locked_vm"
++.LASF1316:
++ .string "writeback_control"
++.LASF649:
++ .string "i_blocks"
++.LASF1550:
++ .string "list_empty"
++.LASF1354:
++ .string "i_bdev"
++.LASF1197:
++ .string "d_id"
++.LASF1309:
++ .string "bmap"
++.LASF1476:
++ .string "clear_inode"
++.LASF1539:
++ .string "kmalloc_node"
++.LASF1302:
++ .string "readpage"
++.LASF485:
++ .string "_utime"
++.LASF54:
++ .string "time"
++.LASF103:
++ .string "___vm86_gs"
++.LASF69:
++ .string "__esh"
++.LASF144:
++ .string "tls_array"
++.LASF1272:
++ .string "initialize"
++.LASF1162:
++ .string "radix_tree_root"
++.LASF260:
++ .string "seccomp"
++.LASF776:
++ .string "ki_obj"
++.LASF9:
++ .string "__kernel_clock_t"
++.LASF1630:
++ .string "mod_timer"
++.LASF1601:
++ .string "seconds"
++.LASF665:
++ .string "i_dnotify"
++.LASF492:
++ .string "_sigfault"
++.LASF1373:
++ .string "getattr"
++.LASF1519:
++ .string "tvec_base_t"
++.LASF184:
++ .string "last_interrupted"
++.LASF338:
++ .string "mpc_oem"
++.LASF1472:
++ .string "write_super_lockfs"
++.LASF412:
++ .string "smp_call_function_mask"
++.LASF1098:
++ .string "left"
++.LASF701:
++ .string "end_code"
++.LASF1109:
++ .string "d_revalidate"
++.LASF1681:
++ .string "per_cpu__current_task"
++.LASF1377:
++ .string "removexattr"
++.LASF1135:
++ .string "s_active"
++.LASF764:
++ .string "iov_base"
++.LASF712:
++ .string "context"
++.LASF1658:
++ .string "__ksymtab_try_to_del_timer_sync"
++.LASF575:
++ .string "node_zonelists"
++.LASF507:
++ .string "locked_shm"
++.LASF901:
++ .string "steal"
++.LASF1277:
++ .string "free_inode"
++.LASF1041:
++ .string "handler"
++.LASF1043:
++ .string "proc_dir_entry"
++.LASF89:
++ .string "___ebp"
++.LASF1443:
++ .string "nfs_lock_info"
++.LASF92:
++ .string "___es"
++.LASF300:
++ .string "tv_nsec"
++.LASF483:
++ .string "_sys_private"
++.LASF531:
++ .string "d_fsdata"
++.LASF380:
++ .string "knode_driver"
++.LASF1195:
++ .string "d_version"
++.LASF951:
++ .string "module_core"
++.LASF436:
++ .string "check_phys_apicid_present"
++.LASF680:
++ .string "cached_hole_size"
++.LASF917:
++ .string "st_name"
++.LASF732:
++ .string "expires"
++.LASF1374:
++ .string "setxattr"
++.LASF1649:
++ .string "__kstrtab_init_timer_deferrable"
++.LASF277:
++ .string "robust_list"
++.LASF1042:
++ .string "dev_id"
++.LASF206:
++ .string "children"
++.LASF1275:
++ .string "alloc_inode"
++.LASF266:
++ .string "pi_blocked_on"
++.LASF1327:
++ .string "writeback_index"
++.LASF128:
++ .string "mxcsr"
++.LASF832:
++ .string "anon_vma_node"
++.LASF1527:
++ .string "list_add_tail"
++.LASF498:
++ .string "_sifields"
++.LASF569:
++ .string "zone_pgdat"
++.LASF922:
++ .string "st_shndx"
++.LASF783:
++ .string "ki_buf"
++.LASF218:
++ .string "nivcsw"
++.LASF175:
++ .string "prio"
++.LASF1166:
++ .string "radix_tree_node"
++.LASF275:
++ .string "io_wait"
++.LASF372:
++ .string "should_wakeup"
++.LASF633:
++ .string "i_list"
++.LASF439:
++ .string "mpc_oem_bus_info"
++.LASF1226:
++ .string "qs_gquota"
++.LASF606:
++ .string "rcu_head"
++.LASF1571:
++ .string "__ptr"
++.LASF334:
++ .string "mpc_signature"
++.LASF744:
++ .string "hrtimer_clock_base"
++.LASF759:
++ .string "work"
++.LASF756:
++ .string "work_func_t"
++.LASF1376:
++ .string "listxattr"
++.LASF1050:
++ .string "klist_node"
++.LASF136:
++ .string "no_update"
++.LASF462:
++ .string "__signalfn_t"
++.LASF1112:
++ .string "d_release"
++.LASF1399:
++ .string "splice_read"
++.LASF1427:
++ .string "prev_offset"
++.LASF767:
++ .string "ki_run_list"
++.LASF608:
++ .string "quiescbatch"
++.LASF256:
++ .string "notifier_data"
++.LASF1638:
++ .string "per_cpu__tvec_bases"
++.LASF1361:
++ .string "unlink"
++.LASF403:
++ .string "groups"
++.LASF1105:
++ .string "hash"
++.LASF1560:
++ .string "new_base"
++.LASF114:
++ .string "bufferram"
++.LASF35:
++ .string "clockid_t"
++.LASF331:
++ .string "cputime_t"
++.LASF1715:
++ .string "swapper_space"
++.LASF1132:
++ .string "s_count"
++.LASF932:
++ .string "syms"
++.LASF667:
++ .string "i_state"
++.LASF341:
++ .string "mpc_oemsize"
++.LASF566:
++ .string "wait_table"
++.LASF1010:
++ .string "module_param_attrs"
++.LASF343:
++ .string "mpc_lapic"
++.LASF303:
++ .string "rb_right"
++.LASF774:
++ .string "ki_retry"
++.LASF1165:
++ .string "rnode"
++.LASF19:
++ .string "signed char"
++.LASF112:
++ .string "freeram"
++.LASF656:
++ .string "i_fop"
++.LASF887:
++ .string "ra_pages"
++.LASF944:
++ .string "gpl_future_syms"
++.LASF1693:
++ .string "acpi_pci_disabled"
++.LASF1672:
++ .string "__ksymtab_msleep"
++.LASF1614:
++ .string "msleep"
++.LASF935:
++ .string "gpl_syms"
++.LASF1540:
++ .string "__constant_c_and_count_memset"
++.LASF1039:
++ .string "typename"
++.LASF209:
++ .string "pids"
++.LASF1580:
++ .string "__func__"
++.LASF322:
++ .string "count"
++.LASF1529:
++ .string "list_replace_init"
++.LASF1186:
++ .string "dqb_btime"
++.LASF1543:
++ .string "set_running_timer"
++.LASF589:
++ .string "zonelist_cache"
++.LASF957:
++ .string "arch"
++.LASF1660:
++ .string "__ksymtab_del_timer_sync"
++.LASF1664:
++ .string "__ksymtab_schedule_timeout"
++.LASF1457:
++ .string "kill_sb"
++.LASF1414:
++ .string "fl_ops"
++.LASF1564:
++ .string "original"
++.LASF490:
++ .string "_timer"
++.LASF484:
++ .string "_status"
++.LASF150:
++ .string "i387"
++.LASF904:
++ .string "irqs"
++.LASF1284:
++ .string "quotactl_ops"
++.LASF294:
++ .string "sequence"
++.LASF1181:
++ .string "dqb_bsoftlimit"
++.LASF1613:
++ .string "schedule_timeout_uninterruptible"
++.LASF526:
++ .string "d_subdirs"
++.LASF671:
++ .string "i_private"
++.LASF806:
++ .string "kioctx"
++.LASF854:
++ .string "posix_timers"
++.LASF796:
++ .string "f_pos"
++.LASF311:
++ .string "hlist_node"
++.LASF1147:
++ .string "s_wait_unfrozen"
++.LASF491:
++ .string "_sigchld"
++.LASF530:
++ .string "d_sb"
++.LASF22:
++ .string "__s16"
++.LASF240:
++ .string "comm"
++.LASF859:
++ .string "pgrp"
++.LASF1035:
++ .string "set_affinity"
++.LASF1568:
++ .string "round_jiffies_relative"
++.LASF682:
++ .string "mm_users"
++.LASF473:
++ .string "sigval"
++.LASF1005:
++ .string "module_sect_attr"
++.LASF1340:
++ .string "bd_holders"
++.LASF1582:
++ .string "timer_cpu_notify"
++.LASF1545:
++ .string "clear_pending"
++.LASF728:
++ .string "ktime"
++.LASF1673:
++ .string "__kstrtab_msleep_interruptible"
++.LASF1023:
++ .string "wake_depth"
++.LASF1653:
++ .string "__kstrtab_mod_timer"
++.LASF1570:
++ .string "init_timer"
++.LASF842:
++ .string "signalfd_list"
++.LASF1259:
++ .string "dq_off"
++.LASF80:
++ .string "int21_revectored"
++.LASF1497:
++ .string "inuse"
++.LASF1495:
++ .string "kstatfs"
++.LASF1173:
++ .string "ia_atime"
++.LASF1694:
++ .string "skip_ioapic_setup"
++.LASF799:
++ .string "f_gid"
++.LASF600:
++ .string "head"
++.LASF813:
++ .string "max_reqs"
++.LASF1184:
++ .string "dqb_isoftlimit"
++.LASF1720:
++ .string "/usr/src/linux-2.6.22.19-chopstix"
++.LASF1018:
++ .string "chip"
++.LASF1260:
++ .string "dq_flags"
++.LASF697:
++ .string "reserved_vm"
++.LASF33:
++ .string "pid_t"
++.LASF109:
++ .string "uptime"
++.LASF72:
++ .string "__gsh"
++.LASF43:
++ .string "clock_t"
++.LASF369:
++ .string "dev_pm_info"
++.LASF1569:
++ .string "internal_add_timer"
++.LASF229:
++ .string "egid"
++.LASF822:
++ .string "mm_counter_t"
++.LASF13:
++ .string "__kernel_uid32_t"
++.LASF1230:
++ .string "qs_rtbtimelimit"
++.LASF259:
++ .string "audit_context"
++.LASF1489:
++ .string "filldir_t"
++.LASF204:
++ .string "real_parent"
++.LASF1639:
++ .string "__kstrtab___round_jiffies"
++.LASF897:
++ .string "system"
++.LASF1453:
++ .string "fa_file"
++.LASF1378:
++ .string "truncate_range"
++.LASF1159:
++ .string "create_mode"
++.LASF1243:
++ .string "dqi_dirty_list"
++.LASF25:
++ .string "__s32"
++.LASF385:
++ .string "is_registered"
++.LASF319:
++ .string "__wait_queue_head"
++.LASF1099:
++ .string "right"
++.LASF1700:
++ .string "per_cpu__rcu_data"
++.LASF1321:
++ .string "i_mmap_writable"
++.LASF1607:
++ .string "try_to_del_timer_sync"
++.LASF561:
++ .string "all_unreclaimable"
++.LASF1447:
++ .string "nfs_fl"
++.LASF1471:
++ .string "sync_fs"
++.LASF51:
++ .string "arg3"
++.LASF1210:
++ .string "d_rtb_softlimit"
++.LASF425:
++ .string "no_ioapic_check"
++.LASF781:
++ .string "ki_opcode"
++.LASF1142:
++ .string "s_bdev"
++.LASF1437:
++ .string "fl_compare_owner"
++.LASF1022:
++ .string "depth"
++.LASF1451:
++ .string "fa_fd"
++.LASF963:
++ .string "modules_which_use_me"
++.LASF1512:
++ .string "vm_event_state"
++.LASF1584:
++ .string "hcpu"
++.LASF1689:
++ .string "__FIXADDR_TOP"
++.LASF1711:
++ .string "ioport_resource"
++.LASF926:
++ .string "mkobj"
++.LASF916:
++ .string "elf32_sym"
++.LASF1287:
++ .string "quota_sync"
++.LASF643:
++ .string "i_size"
++.LASF613:
++ .string "qlen"
++.LASF1524:
++ .string "list_replace"
++.LASF1682:
++ .string "xtime"
++.LASF288:
++ .string "spinlock_t"
++.LASF724:
++ .string "node_list"
++.LASF607:
++ .string "rcu_data"
++.LASF199:
++ .string "exit_signal"
++.LASF1510:
++ .string "populate"
++.LASF1626:
++ .string "sys_geteuid"
++.LASF1202:
++ .string "d_bcount"
++.LASF457:
++ .string "refcnt"
++.LASF757:
++ .string "work_struct"
++.LASF974:
++ .string "kobject"
++.LASF1551:
++ .string "read_seqbegin"
++.LASF1189:
++ .string "if_dqinfo"
++.LASF121:
++ .string "bits"
++.LASF984:
++ .string "kobj_type"
++.LASF405:
++ .string "smp_ops"
++.LASF1024:
++ .string "irq_count"
++.LASF53:
++ .string "flags"
++.LASF224:
++ .string "it_sched_expires"
++.LASF196:
++ .string "binfmt"
++.LASF1261:
++ .string "dq_type"
++.LASF237:
++ .string "user"
++.LASF861:
++ .string "leader"
++.LASF1678:
++ .string "cpu_possible_map"
++.LASF381:
++ .string "knode_bus"
++.LASF228:
++ .string "fsuid"
++.LASF653:
++ .string "i_mutex"
++.LASF1187:
++ .string "dqb_itime"
++.LASF1211:
++ .string "d_rtbcount"
++.LASF513:
++ .string "altroot"
++.LASF1179:
++ .string "if_dqblk"
++.LASF314:
++ .string "__wait_queue"
++.LASF605:
++ .string "cs_dmacachep"
++.LASF145:
++ .string "esp0"
++.LASF12:
++ .string "char"
++.LASF1252:
++ .string "dq_free"
++.LASF970:
++ .string "percpu"
++.LASF304:
++ .string "rb_left"
++.LASF52:
++ .string "uaddr"
++.LASF1262:
++ .string "dq_dqb"
++.LASF827:
++ .string "vm_next"
++.LASF1028:
++ .string "irq_chip"
++.LASF1216:
++ .string "fs_qfilestat"
++.LASF741:
++ .string "HRTIMER_NORESTART"
++.LASF1618:
++ .string "msleep_interruptible"
++.LASF390:
++ .string "driver_data"
++.LASF1222:
++ .string "qs_version"
++.LASF1363:
++ .string "mkdir"
++.LASF230:
++ .string "sgid"
++.LASF675:
++ .string "mmap_cache"
++.LASF952:
++ .string "init_size"
++.LASF1116:
++ .string "s_list"
++.LASF280:
++ .string "fs_excl"
++.LASF1204:
++ .string "d_itimer"
++.LASF4:
++ .string "__kernel_mode_t"
++.LASF168:
++ .string "task_struct"
++.LASF116:
++ .string "freeswap"
++.LASF793:
++ .string "f_count"
++.LASF61:
++ .string "__null_ds"
++.LASF1153:
++ .string "dcookie_struct"
++.LASF769:
++ .string "ki_users"
++.LASF1392:
++ .string "fasync"
++.LASF521:
++ .string "d_inode"
++.LASF27:
++ .string "__s64"
++.LASF1368:
++ .string "follow_link"
++.LASF586:
++ .string "zonelist"
++.LASF603:
++ .string "cs_size"
++.LASF181:
++ .string "sleep_avg"
++.LASF1677:
++ .string "per_cpu__this_cpu_off"
++.LASF834:
++ .string "vm_ops"
++.LASF127:
++ .string "i387_fxsave_struct"
++.LASF1383:
++ .string "aio_read"
++.LASF62:
++ .string "__null_es"
++.LASF189:
++ .string "cpus_allowed"
++.LASF167:
++ .string "supervisor_stack"
++.LASF872:
++ .string "rlim"
++.LASF1113:
++ .string "d_iput"
++.LASF948:
++ .string "extable"
++.LASF771:
++ .string "ki_filp"
++.LASF849:
++ .string "shared_pending"
++.LASF1594:
++ .string "sav_total"
++.LASF528:
++ .string "d_time"
++.LASF1111:
++ .string "d_delete"
++.LASF105:
++ .string "bug_addr"
++.LASF63:
++ .string "__null_fs"
++.LASF1240:
++ .string "v2_i"
++.LASF1572:
++ .string "timer_set_deferrable"
++.LASF1445:
++ .string "nfs4_lock_info"
++.LASF999:
++ .string "drivers_dir"
++.LASF615:
++ .string "curtail"
++.LASF1044:
++ .string "resource"
++.LASF1160:
++ .string "open"
++.LASF1102:
++ .string "prio_tree_root"
++.LASF1301:
++ .string "writepage"
++.LASF716:
++ .string "dumpable"
++.LASF514:
++ .string "rootmnt"
++.LASF110:
++ .string "loads"
++.LASF550:
++ .string "pages_low"
++.LASF64:
++ .string "__null_gs"
++.LASF961:
++ .string "bug_table"
++.LASF902:
++ .string "kernel_stat"
++.LASF1127:
++ .string "s_flags"
++.LASF1339:
++ .string "bd_holder"
++.LASF1389:
++ .string "flush"
++.LASF1610:
++ .string "schedule_timeout"
++.LASF626:
++ .string "mode"
++.LASF1058:
++ .string "bus_type"
++.LASF652:
++ .string "i_lock"
++.LASF553:
++ .string "pageset"
++.LASF972:
++ .string "attribute_group"
++.LASF1706:
++ .string "per_cpu__kstat"
++.LASF669:
++ .string "i_flags"
++.LASF1064:
++ .string "bus_notifier"
++.LASF398:
++ .string "devres_lock"
++.LASF1691:
++ .string "acpi_disabled"
++.LASF123:
++ .string "desc_struct"
++.LASF1201:
++ .string "d_ino_softlimit"
++.LASF635:
++ .string "i_dentry"
++.LASF1401:
++ .string "fl_next"
++.LASF324:
++ .string "wait_list"
++.LASF458:
++ .string "proc_list"
++.LASF1052:
++ .string "n_node"
++.LASF1391:
++ .string "aio_fsync"
++.LASF1484:
++ .string "get_parent"
++.LASF611:
++ .string "nxtlist"
++.LASF154:
++ .string "saved_esp0"
++.LASF705:
++ .string "start_stack"
++.LASF1597:
++ .string "sys_sysinfo"
++.LASF1108:
++ .string "dentry_operations"
++.LASF623:
++ .string "PIDTYPE_MAX"
++.LASF221:
++ .string "maj_flt"
++.LASF891:
++ .string "unplug_io_fn"
++.LASF287:
++ .string "raw_lock"
++.LASF465:
++ .string "__sigrestore_t"
++.LASF1307:
++ .string "prepare_write"
++.LASF1611:
++ .string "timeout"
++.LASF708:
++ .string "env_start"
++.LASF1299:
++ .string "dqptr_sem"
++.LASF1270:
++ .string "release_dqblk"
++.LASF124:
++ .string "i387_fsave_struct"
++.LASF1622:
++ .string "espec"
++.LASF1479:
++ .string "show_stats"
++.LASF1698:
++ .string "contig_page_data"
++.LASF318:
++ .string "wait_queue_func_t"
++.LASF845:
++ .string "signal_struct"
++.LASF1707:
++ .string "per_cpu__gdt_page"
++.LASF636:
++ .string "i_ino"
++.LASF241:
++ .string "link_count"
++.LASF782:
++ .string "ki_nbytes"
++.LASF1449:
++ .string "fasync_struct"
++.LASF374:
++ .string "saved_state"
++.LASF808:
++ .string "dead"
++.LASF522:
++ .string "d_hash"
++.LASF1293:
++ .string "set_xstate"
++.LASF1100:
++ .string "prio_tree_node"
++.LASF696:
++ .string "stack_vm"
++.LASF591:
++ .string "_count"
++.LASF1089:
++ .string "class_device_attribute"
++.LASF24:
++ .string "__u16"
++.LASF55:
++ .string "futex"
++.LASF180:
++ .string "ioprio"
++.LASF755:
++ .string "task_io_accounting"
++.LASF236:
++ .string "keep_capabilities"
++.LASF426:
++ .string "init_apic_ldr"
++.LASF639:
++ .string "i_uid"
++.LASF147:
++ .string "debugreg"
++.LASF829:
++ .string "vm_flags"
++.LASF333:
++ .string "mp_config_table"
++.LASF912:
++ .string "gdt_page"
++.LASF585:
++ .string "kswapd_max_order"
++.LASF48:
++ .string "arg0"
++.LASF50:
++ .string "arg2"
++.LASF57:
++ .string "pgprot"
++.LASF1288:
++ .string "get_info"
++.LASF475:
++ .string "sival_ptr"
++.LASF191:
++ .string "first_time_slice"
++.LASF90:
++ .string "___eax"
++.LASF584:
++ .string "kswapd"
++.LASF847:
++ .string "wait_chldexit"
++.LASF142:
++ .string "mm_segment_t"
++.LASF1196:
++ .string "d_fieldmask"
++.LASF41:
++ .string "ssize_t"
++.LASF971:
++ .string "args"
++.LASF74:
++ .string "__map"
++.LASF26:
++ .string "__u32"
++.LASF84:
++ .string "___ebx"
++.LASF122:
++ .string "cpumask_t"
++.LASF1644:
++ .string "__ksymtab_round_jiffies"
++.LASF763:
++ .string "iovec"
++.LASF870:
++ .string "cinblock"
++.LASF1538:
++ .string "oldbit"
++.LASF581:
++ .string "node_spanned_pages"
++.LASF1566:
++ .string "__round_jiffies_relative"
++.LASF1344:
++ .string "bd_part"
++.LASF1635:
++ .string "__ksymtab_jiffies_64"
++.LASF992:
++ .string "value"
++.LASF445:
++ .string "cpu_mask_to_apicid"
++.LASF1555:
++ .string "active_tasks"
++.LASF440:
++ .string "mpc_oem_pci_bus"
++.LASF885:
++ .string "rt_mutex_waiter"
++.LASF447:
++ .string "send_IPI_allbutself"
++.LASF1156:
++ .string "saved_names"
++.LASF784:
++ .string "ki_left"
++.LASF85:
++ .string "___ecx"
++.LASF1683:
++ .string "wall_to_monotonic"
++.LASF1454:
++ .string "file_system_type"
++.LASF1177:
++ .string "qid_t"
++.LASF198:
++ .string "exit_code"
++.LASF99:
++ .string "___ss"
++.LASF1467:
++ .string "drop_inode"
++.LASF511:
++ .string "umask"
++.LASF421:
++ .string "apic_destination_logical"
++.LASF148:
++ .string "trap_no"
++.LASF172:
++ .string "ptrace"
++.LASF393:
++ .string "dma_mask"
++.LASF758:
++ .string "delayed_work"
++.LASF1554:
++ .string "ticks"
++.LASF1641:
++ .string "__kstrtab___round_jiffies_relative"
++.LASF205:
++ .string "parent"
++.LASF88:
++ .string "___edi"
++.LASF841:
++ .string "siglock"
++.LASF1616:
++ .string "schedule_timeout_interruptible"
++.LASF1051:
++ .string "n_klist"
++.LASF86:
++ .string "___edx"
++.LASF750:
++ .string "get_softirq_time"
++.LASF493:
++ .string "_sigpoll"
++.LASF389:
++ .string "driver"
++.LASF0:
++ .string "unsigned int"
++.LASF309:
++ .string "hlist_head"
++.LASF824:
++ .string "vm_mm"
++.LASF138:
++ .string "entry_eip"
++ .ident "GCC: (GNU) 4.1.1 (Gentoo 4.1.1-r3)"
++ .section .note.GNU-stack,"",@progbits
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/memory.c linux-2.6.22-590/mm/memory.c
+--- linux-2.6.22-580/mm/memory.c 2009-02-18 09:56:03.000000000 -0500
++++ linux-2.6.22-590/mm/memory.c 2009-02-18 09:57:23.000000000 -0500
+@@ -59,6 +59,7 @@
+
+ #include <linux/swapops.h>
+ #include <linux/elf.h>
++#include <linux/arrays.h>
+
+ #ifndef CONFIG_NEED_MULTIPLE_NODES
+ /* use the per-pgdat data instead for discontigmem - mbligh */
+@@ -2601,6 +2602,15 @@
+ return ret;
+ }
+
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++
++
+ /*
+ * By the time we get here, we already hold the mm semaphore
+ */
+@@ -2630,6 +2640,24 @@
+ if (!pte)
+ return VM_FAULT_OOM;
+
++#ifdef CONFIG_CHOPSTIX
++ if (rec_event) {
++ struct event event;
++ struct event_spec espec;
++ struct pt_regs *regs;
++ unsigned int pc;
++ regs = task_pt_regs(current);
++ pc = regs->eip & (unsigned int) ~4095;
++
++ espec.reason = 0; /* alloc */
++ event.event_data=&espec;
++ event.task = current;
++ espec.pc=pc;
++ event.event_type=5;
++ (*rec_event)(&event, 1);
++ }
++#endif
++
+ return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+ }
+
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/memory.c.orig linux-2.6.22-590/mm/memory.c.orig
+--- linux-2.6.22-580/mm/memory.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/mm/memory.c.orig 2009-02-18 09:56:03.000000000 -0500
+@@ -0,0 +1,2841 @@
++/*
++ * linux/mm/memory.c
++ *
++ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
++ */
++
++/*
++ * demand-loading started 01.12.91 - seems it is high on the list of
++ * things wanted, and it should be easy to implement. - Linus
++ */
++
++/*
++ * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
++ * pages started 02.12.91, seems to work. - Linus.
++ *
++ * Tested sharing by executing about 30 /bin/sh: under the old kernel it
++ * would have taken more than the 6M I have free, but it worked well as
++ * far as I could see.
++ *
++ * Also corrected some "invalidate()"s - I wasn't doing enough of them.
++ */
++
++/*
++ * Real VM (paging to/from disk) started 18.12.91. Much more work and
++ * thought has to go into this. Oh, well..
++ * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
++ * Found it. Everything seems to work now.
++ * 20.12.91 - Ok, making the swap-device changeable like the root.
++ */
++
++/*
++ * 05.04.94 - Multi-page memory management added for v1.1.
++ * Idea by Alex Bligh (alex@cconcepts.co.uk)
++ *
++ * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
++ * (Gerhard.Wichert@pdb.siemens.de)
++ *
++ * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
++ */
++
++#include <linux/kernel_stat.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/mman.h>
++#include <linux/swap.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/rmap.h>
++#include <linux/module.h>
++#include <linux/delayacct.h>
++#include <linux/init.h>
++#include <linux/writeback.h>
++
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++
++#include <linux/swapops.h>
++#include <linux/elf.h>
++
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++/* use the per-pgdat data instead for discontigmem - mbligh */
++unsigned long max_mapnr;
++struct page *mem_map;
++
++EXPORT_SYMBOL(max_mapnr);
++EXPORT_SYMBOL(mem_map);
++#endif
++
++unsigned long num_physpages;
++/*
++ * A number of key systems in x86 including ioremap() rely on the assumption
++ * that high_memory defines the upper bound on direct map memory, then end
++ * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
++ * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
++ * and ZONE_HIGHMEM.
++ */
++void * high_memory;
++unsigned long vmalloc_earlyreserve;
++
++EXPORT_SYMBOL(num_physpages);
++EXPORT_SYMBOL(high_memory);
++EXPORT_SYMBOL(vmalloc_earlyreserve);
++
++int randomize_va_space __read_mostly = 1;
++
++static int __init disable_randmaps(char *s)
++{
++ randomize_va_space = 0;
++ return 1;
++}
++__setup("norandmaps", disable_randmaps);
++
++
++/*
++ * If a p?d_bad entry is found while walking page tables, report
++ * the error, before resetting entry to p?d_none. Usually (but
++ * very seldom) called out from the p?d_none_or_clear_bad macros.
++ */
++
++void pgd_clear_bad(pgd_t *pgd)
++{
++ pgd_ERROR(*pgd);
++ pgd_clear(pgd);
++}
++
++void pud_clear_bad(pud_t *pud)
++{
++ pud_ERROR(*pud);
++ pud_clear(pud);
++}
++
++void pmd_clear_bad(pmd_t *pmd)
++{
++ pmd_ERROR(*pmd);
++ pmd_clear(pmd);
++}
++
++/*
++ * Note: this doesn't free the actual pages themselves. That
++ * has been handled earlier when unmapping all the memory regions.
++ */
++static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
++{
++ struct page *page = pmd_page(*pmd);
++ pmd_clear(pmd);
++ pte_lock_deinit(page);
++ pte_free_tlb(tlb, page);
++ dec_zone_page_state(page, NR_PAGETABLE);
++ tlb->mm->nr_ptes--;
++}
++
++static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ unsigned long floor, unsigned long ceiling)
++{
++ pmd_t *pmd;
++ unsigned long next;
++ unsigned long start;
++
++ start = addr;
++ pmd = pmd_offset(pud, addr);
++ do {
++ next = pmd_addr_end(addr, end);
++ if (pmd_none_or_clear_bad(pmd))
++ continue;
++ free_pte_range(tlb, pmd);
++ } while (pmd++, addr = next, addr != end);
++
++ start &= PUD_MASK;
++ if (start < floor)
++ return;
++ if (ceiling) {
++ ceiling &= PUD_MASK;
++ if (!ceiling)
++ return;
++ }
++ if (end - 1 > ceiling - 1)
++ return;
++
++ pmd = pmd_offset(pud, start);
++ pud_clear(pud);
++ pmd_free_tlb(tlb, pmd);
++}
++
++static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ unsigned long floor, unsigned long ceiling)
++{
++ pud_t *pud;
++ unsigned long next;
++ unsigned long start;
++
++ start = addr;
++ pud = pud_offset(pgd, addr);
++ do {
++ next = pud_addr_end(addr, end);
++ if (pud_none_or_clear_bad(pud))
++ continue;
++ free_pmd_range(tlb, pud, addr, next, floor, ceiling);
++ } while (pud++, addr = next, addr != end);
++
++ start &= PGDIR_MASK;
++ if (start < floor)
++ return;
++ if (ceiling) {
++ ceiling &= PGDIR_MASK;
++ if (!ceiling)
++ return;
++ }
++ if (end - 1 > ceiling - 1)
++ return;
++
++ pud = pud_offset(pgd, start);
++ pgd_clear(pgd);
++ pud_free_tlb(tlb, pud);
++}
++
++/*
++ * This function frees user-level page tables of a process.
++ *
++ * Must be called with pagetable lock held.
++ */
++void free_pgd_range(struct mmu_gather **tlb,
++ unsigned long addr, unsigned long end,
++ unsigned long floor, unsigned long ceiling)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long start;
++
++ /*
++ * The next few lines have given us lots of grief...
++ *
++ * Why are we testing PMD* at this top level? Because often
++ * there will be no work to do at all, and we'd prefer not to
++ * go all the way down to the bottom just to discover that.
++ *
++ * Why all these "- 1"s? Because 0 represents both the bottom
++ * of the address space and the top of it (using -1 for the
++ * top wouldn't help much: the masks would do the wrong thing).
++ * The rule is that addr 0 and floor 0 refer to the bottom of
++ * the address space, but end 0 and ceiling 0 refer to the top
++ * Comparisons need to use "end - 1" and "ceiling - 1" (though
++ * that end 0 case should be mythical).
++ *
++ * Wherever addr is brought up or ceiling brought down, we must
++ * be careful to reject "the opposite 0" before it confuses the
++ * subsequent tests. But what about where end is brought down
++ * by PMD_SIZE below? no, end can't go down to 0 there.
++ *
++ * Whereas we round start (addr) and ceiling down, by different
++ * masks at different levels, in order to test whether a table
++ * now has no other vmas using it, so can be freed, we don't
++ * bother to round floor or end up - the tests don't need that.
++ */
++
++ addr &= PMD_MASK;
++ if (addr < floor) {
++ addr += PMD_SIZE;
++ if (!addr)
++ return;
++ }
++ if (ceiling) {
++ ceiling &= PMD_MASK;
++ if (!ceiling)
++ return;
++ }
++ if (end - 1 > ceiling - 1)
++ end -= PMD_SIZE;
++ if (addr > end - 1)
++ return;
++
++ start = addr;
++ pgd = pgd_offset((*tlb)->mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ if (pgd_none_or_clear_bad(pgd))
++ continue;
++ free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
++ } while (pgd++, addr = next, addr != end);
++
++ if (!(*tlb)->fullmm)
++ flush_tlb_pgtables((*tlb)->mm, start, end);
++}
++
++void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
++ unsigned long floor, unsigned long ceiling)
++{
++ while (vma) {
++ struct vm_area_struct *next = vma->vm_next;
++ unsigned long addr = vma->vm_start;
++
++ /*
++ * Hide vma from rmap and vmtruncate before freeing pgtables
++ */
++ anon_vma_unlink(vma);
++ unlink_file_vma(vma);
++
++ if (is_vm_hugetlb_page(vma)) {
++ hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
++ floor, next? next->vm_start: ceiling);
++ } else {
++ /*
++ * Optimization: gather nearby vmas into one call down
++ */
++ while (next && next->vm_start <= vma->vm_end + PMD_SIZE
++ && !is_vm_hugetlb_page(next)) {
++ vma = next;
++ next = vma->vm_next;
++ anon_vma_unlink(vma);
++ unlink_file_vma(vma);
++ }
++ free_pgd_range(tlb, addr, vma->vm_end,
++ floor, next? next->vm_start: ceiling);
++ }
++ vma = next;
++ }
++}
++
++int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
++{
++ struct page *new = pte_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ pte_lock_init(new);
++ spin_lock(&mm->page_table_lock);
++ if (pmd_present(*pmd)) { /* Another has populated it */
++ pte_lock_deinit(new);
++ pte_free(new);
++ } else {
++ mm->nr_ptes++;
++ inc_zone_page_state(new, NR_PAGETABLE);
++ pmd_populate(mm, pmd, new);
++ }
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
++
++int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
++{
++ pte_t *new = pte_alloc_one_kernel(&init_mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ spin_lock(&init_mm.page_table_lock);
++ if (pmd_present(*pmd)) /* Another has populated it */
++ pte_free_kernel(new);
++ else
++ pmd_populate_kernel(&init_mm, pmd, new);
++ spin_unlock(&init_mm.page_table_lock);
++ return 0;
++}
++
++static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
++{
++ if (file_rss)
++ add_mm_counter(mm, file_rss, file_rss);
++ if (anon_rss)
++ add_mm_counter(mm, anon_rss, anon_rss);
++}
++
++/*
++ * This function is called to print an error when a bad pte
++ * is found. For example, we might have a PFN-mapped pte in
++ * a region that doesn't allow it.
++ *
++ * The calling function must still handle the error.
++ */
++void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
++{
++ printk(KERN_ERR "Bad pte = %08llx, process = %s, "
++ "vm_flags = %lx, vaddr = %lx\n",
++ (long long)pte_val(pte),
++ (vma->vm_mm == current->mm ? current->comm : "???"),
++ vma->vm_flags, vaddr);
++ dump_stack();
++}
++
++static inline int is_cow_mapping(unsigned int flags)
++{
++ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
++}
++
++/*
++ * This function gets the "struct page" associated with a pte.
++ *
++ * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
++ * will have each page table entry just pointing to a raw page frame
++ * number, and as far as the VM layer is concerned, those do not have
++ * pages associated with them - even if the PFN might point to memory
++ * that otherwise is perfectly fine and has a "struct page".
++ *
++ * The way we recognize those mappings is through the rules set up
++ * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
++ * and the vm_pgoff will point to the first PFN mapped: thus every
++ * page that is a raw mapping will always honor the rule
++ *
++ * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
++ *
++ * and if that isn't true, the page has been COW'ed (in which case it
++ * _does_ have a "struct page" associated with it even if it is in a
++ * VM_PFNMAP range).
++ */
++struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
++{
++ unsigned long pfn = pte_pfn(pte);
++
++ if (unlikely(vma->vm_flags & VM_PFNMAP)) {
++ unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
++ if (pfn == vma->vm_pgoff + off)
++ return NULL;
++ if (!is_cow_mapping(vma->vm_flags))
++ return NULL;
++ }
++
++ /*
++ * Add some anal sanity checks for now. Eventually,
++ * we should just do "return pfn_to_page(pfn)", but
++ * in the meantime we check that we get a valid pfn,
++ * and that the resulting page looks ok.
++ */
++ if (unlikely(!pfn_valid(pfn))) {
++ print_bad_pte(vma, pte, addr);
++ return NULL;
++ }
++
++ /*
++ * NOTE! We still have PageReserved() pages in the page
++ * tables.
++ *
++ * The PAGE_ZERO() pages and various VDSO mappings can
++ * cause them to exist.
++ */
++ return pfn_to_page(pfn);
++}
++
++/*
++ * copy one vm_area from one task to the other. Assumes the page tables
++ * already present in the new task to be cleared in the whole range
++ * covered by this vma.
++ */
++
++static inline void
++copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
++ unsigned long addr, int *rss)
++{
++ unsigned long vm_flags = vma->vm_flags;
++ pte_t pte = *src_pte;
++ struct page *page;
++
++ /* pte contains position in swap or file, so copy. */
++ if (unlikely(!pte_present(pte))) {
++ if (!pte_file(pte)) {
++ swp_entry_t entry = pte_to_swp_entry(pte);
++
++ swap_duplicate(entry);
++ /* make sure dst_mm is on swapoff's mmlist. */
++ if (unlikely(list_empty(&dst_mm->mmlist))) {
++ spin_lock(&mmlist_lock);
++ if (list_empty(&dst_mm->mmlist))
++ list_add(&dst_mm->mmlist,
++ &src_mm->mmlist);
++ spin_unlock(&mmlist_lock);
++ }
++ if (is_write_migration_entry(entry) &&
++ is_cow_mapping(vm_flags)) {
++ /*
++ * COW mappings require pages in both parent
++ * and child to be set to read.
++ */
++ make_migration_entry_read(&entry);
++ pte = swp_entry_to_pte(entry);
++ set_pte_at(src_mm, addr, src_pte, pte);
++ }
++ }
++ goto out_set_pte;
++ }
++
++ /*
++ * If it's a COW mapping, write protect it both
++ * in the parent and the child
++ */
++ if (is_cow_mapping(vm_flags)) {
++ ptep_set_wrprotect(src_mm, addr, src_pte);
++ pte = pte_wrprotect(pte);
++ }
++
++ /*
++ * If it's a shared mapping, mark it clean in
++ * the child
++ */
++ if (vm_flags & VM_SHARED)
++ pte = pte_mkclean(pte);
++ pte = pte_mkold(pte);
++
++ page = vm_normal_page(vma, addr, pte);
++ if (page) {
++ get_page(page);
++ page_dup_rmap(page, vma, addr);
++ rss[!!PageAnon(page)]++;
++ }
++
++out_set_pte:
++ set_pte_at(dst_mm, addr, dst_pte, pte);
++}
++
++static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end)
++{
++ pte_t *src_pte, *dst_pte;
++ spinlock_t *src_ptl, *dst_ptl;
++ int progress = 0;
++ int rss[2];
++
++ if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1)))
++ return -ENOMEM;
++
++again:
++ rss[1] = rss[0] = 0;
++ dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
++ if (!dst_pte)
++ return -ENOMEM;
++ src_pte = pte_offset_map_nested(src_pmd, addr);
++ src_ptl = pte_lockptr(src_mm, src_pmd);
++ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
++ arch_enter_lazy_mmu_mode();
++
++ do {
++ /*
++ * We are holding two locks at this point - either of them
++ * could generate latencies in another task on another CPU.
++ */
++ if (progress >= 32) {
++ progress = 0;
++ if (need_resched() ||
++ need_lockbreak(src_ptl) ||
++ need_lockbreak(dst_ptl))
++ break;
++ }
++ if (pte_none(*src_pte)) {
++ progress++;
++ continue;
++ }
++ copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
++ progress += 8;
++ } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
++
++ arch_leave_lazy_mmu_mode();
++ spin_unlock(src_ptl);
++ pte_unmap_nested(src_pte - 1);
++ add_mm_rss(dst_mm, rss[0], rss[1]);
++ pte_unmap_unlock(dst_pte - 1, dst_ptl);
++ cond_resched();
++ if (addr != end)
++ goto again;
++ return 0;
++}
++
++static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end)
++{
++ pmd_t *src_pmd, *dst_pmd;
++ unsigned long next;
++
++ dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
++ if (!dst_pmd)
++ return -ENOMEM;
++ src_pmd = pmd_offset(src_pud, addr);
++ do {
++ next = pmd_addr_end(addr, end);
++ if (pmd_none_or_clear_bad(src_pmd))
++ continue;
++ if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
++ vma, addr, next))
++ return -ENOMEM;
++ } while (dst_pmd++, src_pmd++, addr = next, addr != end);
++ return 0;
++}
++
++static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end)
++{
++ pud_t *src_pud, *dst_pud;
++ unsigned long next;
++
++ dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
++ if (!dst_pud)
++ return -ENOMEM;
++ src_pud = pud_offset(src_pgd, addr);
++ do {
++ next = pud_addr_end(addr, end);
++ if (pud_none_or_clear_bad(src_pud))
++ continue;
++ if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
++ vma, addr, next))
++ return -ENOMEM;
++ } while (dst_pud++, src_pud++, addr = next, addr != end);
++ return 0;
++}
++
++int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++ struct vm_area_struct *vma)
++{
++ pgd_t *src_pgd, *dst_pgd;
++ unsigned long next;
++ unsigned long addr = vma->vm_start;
++ unsigned long end = vma->vm_end;
++
++ /*
++ * Don't copy ptes where a page fault will fill them correctly.
++ * Fork becomes much lighter when there are big shared or private
++ * readonly mappings. The tradeoff is that copy_page_range is more
++ * efficient than faulting.
++ */
++ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
++ if (!vma->anon_vma)
++ return 0;
++ }
++
++ if (is_vm_hugetlb_page(vma))
++ return copy_hugetlb_page_range(dst_mm, src_mm, vma);
++
++ dst_pgd = pgd_offset(dst_mm, addr);
++ src_pgd = pgd_offset(src_mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ if (pgd_none_or_clear_bad(src_pgd))
++ continue;
++ if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
++ vma, addr, next))
++ return -ENOMEM;
++ } while (dst_pgd++, src_pgd++, addr = next, addr != end);
++ return 0;
++}
++
++static unsigned long zap_pte_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ struct mm_struct *mm = tlb->mm;
++ pte_t *pte;
++ spinlock_t *ptl;
++ int file_rss = 0;
++ int anon_rss = 0;
++
++ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ arch_enter_lazy_mmu_mode();
++ do {
++ pte_t ptent = *pte;
++ if (pte_none(ptent)) {
++ (*zap_work)--;
++ continue;
++ }
++
++ (*zap_work) -= PAGE_SIZE;
++
++ if (pte_present(ptent)) {
++ struct page *page;
++
++ page = vm_normal_page(vma, addr, ptent);
++ if (unlikely(details) && page) {
++ /*
++ * unmap_shared_mapping_pages() wants to
++ * invalidate cache without truncating:
++ * unmap shared but keep private pages.
++ */
++ if (details->check_mapping &&
++ details->check_mapping != page->mapping)
++ continue;
++ /*
++ * Each page->index must be checked when
++ * invalidating or truncating nonlinear.
++ */
++ if (details->nonlinear_vma &&
++ (page->index < details->first_index ||
++ page->index > details->last_index))
++ continue;
++ }
++ ptent = ptep_get_and_clear_full(mm, addr, pte,
++ tlb->fullmm);
++ tlb_remove_tlb_entry(tlb, pte, addr);
++ if (unlikely(!page))
++ continue;
++ if (unlikely(details) && details->nonlinear_vma
++ && linear_page_index(details->nonlinear_vma,
++ addr) != page->index)
++ set_pte_at(mm, addr, pte,
++ pgoff_to_pte(page->index));
++ if (PageAnon(page))
++ anon_rss--;
++ else {
++ if (pte_dirty(ptent))
++ set_page_dirty(page);
++ if (pte_young(ptent))
++ SetPageReferenced(page);
++ file_rss--;
++ }
++ page_remove_rmap(page, vma);
++ tlb_remove_page(tlb, page);
++ continue;
++ }
++ /*
++ * If details->check_mapping, we leave swap entries;
++ * if details->nonlinear_vma, we leave file entries.
++ */
++ if (unlikely(details))
++ continue;
++ if (!pte_file(ptent))
++ free_swap_and_cache(pte_to_swp_entry(ptent));
++ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
++ } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
++
++ add_mm_rss(mm, file_rss, anon_rss);
++ arch_leave_lazy_mmu_mode();
++ pte_unmap_unlock(pte - 1, ptl);
++
++ return addr;
++}
++
++static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ pmd_t *pmd;
++ unsigned long next;
++
++ pmd = pmd_offset(pud, addr);
++ do {
++ next = pmd_addr_end(addr, end);
++ if (pmd_none_or_clear_bad(pmd)) {
++ (*zap_work)--;
++ continue;
++ }
++ next = zap_pte_range(tlb, vma, pmd, addr, next,
++ zap_work, details);
++ } while (pmd++, addr = next, (addr != end && *zap_work > 0));
++
++ return addr;
++}
++
++static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ pud_t *pud;
++ unsigned long next;
++
++ pud = pud_offset(pgd, addr);
++ do {
++ next = pud_addr_end(addr, end);
++ if (pud_none_or_clear_bad(pud)) {
++ (*zap_work)--;
++ continue;
++ }
++ next = zap_pmd_range(tlb, vma, pud, addr, next,
++ zap_work, details);
++ } while (pud++, addr = next, (addr != end && *zap_work > 0));
++
++ return addr;
++}
++
++static unsigned long unmap_page_range(struct mmu_gather *tlb,
++ struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end,
++ long *zap_work, struct zap_details *details)
++{
++ pgd_t *pgd;
++ unsigned long next;
++
++ if (details && !details->check_mapping && !details->nonlinear_vma)
++ details = NULL;
++
++ BUG_ON(addr >= end);
++ tlb_start_vma(tlb, vma);
++ pgd = pgd_offset(vma->vm_mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ if (pgd_none_or_clear_bad(pgd)) {
++ (*zap_work)--;
++ continue;
++ }
++ next = zap_pud_range(tlb, vma, pgd, addr, next,
++ zap_work, details);
++ } while (pgd++, addr = next, (addr != end && *zap_work > 0));
++ tlb_end_vma(tlb, vma);
++
++ return addr;
++}
++
++#ifdef CONFIG_PREEMPT
++# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
++#else
++/* No preempt: go for improved straight-line efficiency */
++# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
++#endif
++
++/**
++ * unmap_vmas - unmap a range of memory covered by a list of vma's
++ * @tlbp: address of the caller's struct mmu_gather
++ * @vma: the starting vma
++ * @start_addr: virtual address at which to start unmapping
++ * @end_addr: virtual address at which to end unmapping
++ * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
++ * @details: details of nonlinear truncation or shared cache invalidation
++ *
++ * Returns the end address of the unmapping (restart addr if interrupted).
++ *
++ * Unmap all pages in the vma list.
++ *
++ * We aim to not hold locks for too long (for scheduling latency reasons).
++ * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
++ * return the ending mmu_gather to the caller.
++ *
++ * Only addresses between `start' and `end' will be unmapped.
++ *
++ * The VMA list must be sorted in ascending virtual address order.
++ *
++ * unmap_vmas() assumes that the caller will flush the whole unmapped address
++ * range after unmap_vmas() returns. So the only responsibility here is to
++ * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
++ * drops the lock and schedules.
++ */
++unsigned long unmap_vmas(struct mmu_gather **tlbp,
++ struct vm_area_struct *vma, unsigned long start_addr,
++ unsigned long end_addr, unsigned long *nr_accounted,
++ struct zap_details *details)
++{
++ long zap_work = ZAP_BLOCK_SIZE;
++ unsigned long tlb_start = 0; /* For tlb_finish_mmu */
++ int tlb_start_valid = 0;
++ unsigned long start = start_addr;
++ spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
++ int fullmm = (*tlbp)->fullmm;
++
++ for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
++ unsigned long end;
++
++ start = max(vma->vm_start, start_addr);
++ if (start >= vma->vm_end)
++ continue;
++ end = min(vma->vm_end, end_addr);
++ if (end <= vma->vm_start)
++ continue;
++
++ if (vma->vm_flags & VM_ACCOUNT)
++ *nr_accounted += (end - start) >> PAGE_SHIFT;
++
++ while (start != end) {
++ if (!tlb_start_valid) {
++ tlb_start = start;
++ tlb_start_valid = 1;
++ }
++
++ if (unlikely(is_vm_hugetlb_page(vma))) {
++ unmap_hugepage_range(vma, start, end);
++ zap_work -= (end - start) /
++ (HPAGE_SIZE / PAGE_SIZE);
++ start = end;
++ } else
++ start = unmap_page_range(*tlbp, vma,
++ start, end, &zap_work, details);
++
++ if (zap_work > 0) {
++ BUG_ON(start != end);
++ break;
++ }
++
++ tlb_finish_mmu(*tlbp, tlb_start, start);
++
++ if (need_resched() ||
++ (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
++ if (i_mmap_lock) {
++ *tlbp = NULL;
++ goto out;
++ }
++ cond_resched();
++ }
++
++ *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
++ tlb_start_valid = 0;
++ zap_work = ZAP_BLOCK_SIZE;
++ }
++ }
++out:
++ return start; /* which is now the end (or restart) address */
++}
++
++/**
++ * zap_page_range - remove user pages in a given range
++ * @vma: vm_area_struct holding the applicable pages
++ * @address: starting address of pages to zap
++ * @size: number of bytes to zap
++ * @details: details of nonlinear truncation or shared cache invalidation
++ */
++unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
++ unsigned long size, struct zap_details *details)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct mmu_gather *tlb;
++ unsigned long end = address + size;
++ unsigned long nr_accounted = 0;
++
++ lru_add_drain();
++ tlb = tlb_gather_mmu(mm, 0);
++ update_hiwater_rss(mm);
++ end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
++ if (tlb)
++ tlb_finish_mmu(tlb, address, end);
++ return end;
++}
++
++/*
++ * Do a quick page-table lookup for a single page.
++ */
++struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
++ unsigned int flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *ptep, pte;
++ spinlock_t *ptl;
++ struct page *page;
++ struct mm_struct *mm = vma->vm_mm;
++
++ page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
++ if (!IS_ERR(page)) {
++ BUG_ON(flags & FOLL_GET);
++ goto out;
++ }
++
++ page = NULL;
++ pgd = pgd_offset(mm, address);
++ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
++ goto no_page_table;
++
++ pud = pud_offset(pgd, address);
++ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
++ goto no_page_table;
++
++ pmd = pmd_offset(pud, address);
++ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
++ goto no_page_table;
++
++ if (pmd_huge(*pmd)) {
++ BUG_ON(flags & FOLL_GET);
++ page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
++ goto out;
++ }
++
++ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (!ptep)
++ goto out;
++
++ pte = *ptep;
++ if (!pte_present(pte))
++ goto unlock;
++ if ((flags & FOLL_WRITE) && !pte_write(pte))
++ goto unlock;
++ page = vm_normal_page(vma, address, pte);
++ if (unlikely(!page))
++ goto unlock;
++
++ if (flags & FOLL_GET)
++ get_page(page);
++ if (flags & FOLL_TOUCH) {
++ if ((flags & FOLL_WRITE) &&
++ !pte_dirty(pte) && !PageDirty(page))
++ set_page_dirty(page);
++ mark_page_accessed(page);
++ }
++unlock:
++ pte_unmap_unlock(ptep, ptl);
++out:
++ return page;
++
++no_page_table:
++ /*
++ * When core dumping an enormous anonymous area that nobody
++ * has touched so far, we don't want to allocate page tables.
++ */
++ if (flags & FOLL_ANON) {
++ page = ZERO_PAGE(address);
++ if (flags & FOLL_GET)
++ get_page(page);
++ BUG_ON(flags & FOLL_WRITE);
++ }
++ return page;
++}
++
++int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
++ unsigned long start, int len, int write, int force,
++ struct page **pages, struct vm_area_struct **vmas)
++{
++ int i;
++ unsigned int vm_flags;
++
++ if (len <= 0)
++ return 0;
++ /*
++ * Require read or write permissions.
++ * If 'force' is set, we only require the "MAY" flags.
++ */
++ vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
++ vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
++ i = 0;
++
++ do {
++ struct vm_area_struct *vma;
++ unsigned int foll_flags;
++
++ vma = find_extend_vma(mm, start);
++ if (!vma && in_gate_area(tsk, start)) {
++ unsigned long pg = start & PAGE_MASK;
++ struct vm_area_struct *gate_vma = get_gate_vma(tsk);
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ if (write) /* user gate pages are read-only */
++ return i ? : -EFAULT;
++ if (pg > TASK_SIZE)
++ pgd = pgd_offset_k(pg);
++ else
++ pgd = pgd_offset_gate(mm, pg);
++ BUG_ON(pgd_none(*pgd));
++ pud = pud_offset(pgd, pg);
++ BUG_ON(pud_none(*pud));
++ pmd = pmd_offset(pud, pg);
++ if (pmd_none(*pmd))
++ return i ? : -EFAULT;
++ pte = pte_offset_map(pmd, pg);
++ if (pte_none(*pte)) {
++ pte_unmap(pte);
++ return i ? : -EFAULT;
++ }
++ if (pages) {
++ struct page *page = vm_normal_page(gate_vma, start, *pte);
++ pages[i] = page;
++ if (page)
++ get_page(page);
++ }
++ pte_unmap(pte);
++ if (vmas)
++ vmas[i] = gate_vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ continue;
++ }
++
++ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
++ || !(vm_flags & vma->vm_flags))
++ return i ? : -EFAULT;
++
++ if (is_vm_hugetlb_page(vma)) {
++ i = follow_hugetlb_page(mm, vma, pages, vmas,
++ &start, &len, i);
++ continue;
++ }
++
++ foll_flags = FOLL_TOUCH;
++ if (pages)
++ foll_flags |= FOLL_GET;
++ if (!write && !(vma->vm_flags & VM_LOCKED) &&
++ (!vma->vm_ops || !vma->vm_ops->nopage))
++ foll_flags |= FOLL_ANON;
++
++ do {
++ struct page *page;
++
++ if (write)
++ foll_flags |= FOLL_WRITE;
++
++ cond_resched();
++ while (!(page = follow_page(vma, start, foll_flags))) {
++ int ret;
++ ret = __handle_mm_fault(mm, vma, start,
++ foll_flags & FOLL_WRITE);
++ /*
++ * The VM_FAULT_WRITE bit tells us that do_wp_page has
++ * broken COW when necessary, even if maybe_mkwrite
++ * decided not to set pte_write. We can thus safely do
++ * subsequent page lookups as if they were reads.
++ */
++ if (ret & VM_FAULT_WRITE)
++ foll_flags &= ~FOLL_WRITE;
++
++ switch (ret & ~VM_FAULT_WRITE) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ return i ? i : -EFAULT;
++ case VM_FAULT_OOM:
++ return i ? i : -ENOMEM;
++ default:
++ BUG();
++ }
++ cond_resched();
++ }
++ if (pages) {
++ pages[i] = page;
++
++ flush_anon_page(vma, page, start);
++ flush_dcache_page(page);
++ }
++ if (vmas)
++ vmas[i] = vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ } while (len && start < vma->vm_end);
++ } while (len);
++ return i;
++}
++EXPORT_SYMBOL(get_user_pages);
++
++static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t prot)
++{
++ pte_t *pte;
++ spinlock_t *ptl;
++ int err = 0;
++
++ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
++ if (!pte)
++ return -EAGAIN;
++ arch_enter_lazy_mmu_mode();
++ do {
++ struct page *page = ZERO_PAGE(addr);
++ pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
++
++ if (unlikely(!pte_none(*pte))) {
++ err = -EEXIST;
++ pte++;
++ break;
++ }
++ page_cache_get(page);
++ page_add_file_rmap(page);
++ inc_mm_counter(mm, file_rss);
++ set_pte_at(mm, addr, pte, zero_pte);
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++ arch_leave_lazy_mmu_mode();
++ pte_unmap_unlock(pte - 1, ptl);
++ return err;
++}
++
++static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
++ unsigned long addr, unsigned long end, pgprot_t prot)
++{
++ pmd_t *pmd;
++ unsigned long next;
++ int err;
++
++ pmd = pmd_alloc(mm, pud, addr);
++ if (!pmd)
++ return -EAGAIN;
++ do {
++ next = pmd_addr_end(addr, end);
++ err = zeromap_pte_range(mm, pmd, addr, next, prot);
++ if (err)
++ break;
++ } while (pmd++, addr = next, addr != end);
++ return err;
++}
++
++static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long addr, unsigned long end, pgprot_t prot)
++{
++ pud_t *pud;
++ unsigned long next;
++ int err;
++
++ pud = pud_alloc(mm, pgd, addr);
++ if (!pud)
++ return -EAGAIN;
++ do {
++ next = pud_addr_end(addr, end);
++ err = zeromap_pmd_range(mm, pud, addr, next, prot);
++ if (err)
++ break;
++ } while (pud++, addr = next, addr != end);
++ return err;
++}
++
++int zeromap_page_range(struct vm_area_struct *vma,
++ unsigned long addr, unsigned long size, pgprot_t prot)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long end = addr + size;
++ struct mm_struct *mm = vma->vm_mm;
++ int err;
++
++ BUG_ON(addr >= end);
++ pgd = pgd_offset(mm, addr);
++ flush_cache_range(vma, addr, end);
++ do {
++ next = pgd_addr_end(addr, end);
++ err = zeromap_pud_range(mm, pgd, addr, next, prot);
++ if (err)
++ break;
++ } while (pgd++, addr = next, addr != end);
++ return err;
++}
++
++pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
++{
++ pgd_t * pgd = pgd_offset(mm, addr);
++ pud_t * pud = pud_alloc(mm, pgd, addr);
++ if (pud) {
++ pmd_t * pmd = pmd_alloc(mm, pud, addr);
++ if (pmd)
++ return pte_alloc_map_lock(mm, pmd, addr, ptl);
++ }
++ return NULL;
++}
++
++/*
++ * This is the old fallback for page remapping.
++ *
++ * For historical reasons, it only allows reserved pages. Only
++ * old drivers should use this, and they needed to mark their
++ * pages reserved for the old functions anyway.
++ */
++static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
++{
++ int retval;
++ pte_t *pte;
++ spinlock_t *ptl;
++
++ retval = -EINVAL;
++ if (PageAnon(page))
++ goto out;
++ retval = -ENOMEM;
++ flush_dcache_page(page);
++ pte = get_locked_pte(mm, addr, &ptl);
++ if (!pte)
++ goto out;
++ retval = -EBUSY;
++ if (!pte_none(*pte))
++ goto out_unlock;
++
++ /* Ok, finally just insert the thing.. */
++ get_page(page);
++ inc_mm_counter(mm, file_rss);
++ page_add_file_rmap(page);
++ set_pte_at(mm, addr, pte, mk_pte(page, prot));
++
++ retval = 0;
++out_unlock:
++ pte_unmap_unlock(pte, ptl);
++out:
++ return retval;
++}
++
++/**
++ * vm_insert_page - insert single page into user vma
++ * @vma: user vma to map to
++ * @addr: target user address of this page
++ * @page: source kernel page
++ *
++ * This allows drivers to insert individual pages they've allocated
++ * into a user vma.
++ *
++ * The page has to be a nice clean _individual_ kernel allocation.
++ * If you allocate a compound page, you need to have marked it as
++ * such (__GFP_COMP), or manually just split the page up yourself
++ * (see split_page()).
++ *
++ * NOTE! Traditionally this was done with "remap_pfn_range()" which
++ * took an arbitrary page protection parameter. This doesn't allow
++ * that. Your vma protection will have to be set up correctly, which
++ * means that if you want a shared writable mapping, you'd better
++ * ask for a shared writable mapping!
++ *
++ * The page does not need to be reserved.
++ */
++int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
++{
++ if (addr < vma->vm_start || addr >= vma->vm_end)
++ return -EFAULT;
++ if (!page_count(page))
++ return -EINVAL;
++ vma->vm_flags |= VM_INSERTPAGE;
++ return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_insert_page);
++
++/**
++ * vm_insert_pfn - insert single pfn into user vma
++ * @vma: user vma to map to
++ * @addr: target user address of this page
++ * @pfn: source kernel pfn
++ *
++ * Similar to vm_inert_page, this allows drivers to insert individual pages
++ * they've allocated into a user vma. Same comments apply.
++ *
++ * This function should only be called from a vm_ops->fault handler, and
++ * in that case the handler should return NULL.
++ */
++int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ int retval;
++ pte_t *pte, entry;
++ spinlock_t *ptl;
++
++ BUG_ON(!(vma->vm_flags & VM_PFNMAP));
++ BUG_ON(is_cow_mapping(vma->vm_flags));
++
++ retval = -ENOMEM;
++ pte = get_locked_pte(mm, addr, &ptl);
++ if (!pte)
++ goto out;
++ retval = -EBUSY;
++ if (!pte_none(*pte))
++ goto out_unlock;
++
++ /* Ok, finally just insert the thing.. */
++ entry = pfn_pte(pfn, vma->vm_page_prot);
++ set_pte_at(mm, addr, pte, entry);
++ update_mmu_cache(vma, addr, entry);
++
++ retval = 0;
++out_unlock:
++ pte_unmap_unlock(pte, ptl);
++
++out:
++ return retval;
++}
++EXPORT_SYMBOL(vm_insert_pfn);
++
++/*
++ * maps a range of physical memory into the requested pages. the old
++ * mappings are removed. any references to nonexistent pages results
++ * in null mappings (currently treated as "copy-on-access")
++ */
++static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ unsigned long pfn, pgprot_t prot)
++{
++ pte_t *pte;
++ spinlock_t *ptl;
++
++ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
++ if (!pte)
++ return -ENOMEM;
++ arch_enter_lazy_mmu_mode();
++ do {
++ BUG_ON(!pte_none(*pte));
++ set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
++ pfn++;
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++ arch_leave_lazy_mmu_mode();
++ pte_unmap_unlock(pte - 1, ptl);
++ return 0;
++}
++
++static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ unsigned long pfn, pgprot_t prot)
++{
++ pmd_t *pmd;
++ unsigned long next;
++
++ pfn -= addr >> PAGE_SHIFT;
++ pmd = pmd_alloc(mm, pud, addr);
++ if (!pmd)
++ return -ENOMEM;
++ do {
++ next = pmd_addr_end(addr, end);
++ if (remap_pte_range(mm, pmd, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot))
++ return -ENOMEM;
++ } while (pmd++, addr = next, addr != end);
++ return 0;
++}
++
++static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ unsigned long pfn, pgprot_t prot)
++{
++ pud_t *pud;
++ unsigned long next;
++
++ pfn -= addr >> PAGE_SHIFT;
++ pud = pud_alloc(mm, pgd, addr);
++ if (!pud)
++ return -ENOMEM;
++ do {
++ next = pud_addr_end(addr, end);
++ if (remap_pmd_range(mm, pud, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot))
++ return -ENOMEM;
++ } while (pud++, addr = next, addr != end);
++ return 0;
++}
++
++/**
++ * remap_pfn_range - remap kernel memory to userspace
++ * @vma: user vma to map to
++ * @addr: target user address to start at
++ * @pfn: physical address of kernel memory
++ * @size: size of map area
++ * @prot: page protection flags for this mapping
++ *
++ * Note: this is only safe if the mm semaphore is held when called.
++ */
++int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long end = addr + PAGE_ALIGN(size);
++ struct mm_struct *mm = vma->vm_mm;
++ int err;
++
++ /*
++ * Physically remapped pages are special. Tell the
++ * rest of the world about it:
++ * VM_IO tells people not to look at these pages
++ * (accesses can have side effects).
++ * VM_RESERVED is specified all over the place, because
++ * in 2.4 it kept swapout's vma scan off this vma; but
++ * in 2.6 the LRU scan won't even find its pages, so this
++ * flag means no more than count its pages in reserved_vm,
++ * and omit it from core dump, even when VM_IO turned off.
++ * VM_PFNMAP tells the core MM that the base pages are just
++ * raw PFN mappings, and do not have a "struct page" associated
++ * with them.
++ *
++ * There's a horrible special case to handle copy-on-write
++ * behaviour that some programs depend on. We mark the "original"
++ * un-COW'ed pages by matching them up with "vma->vm_pgoff".
++ */
++ if (is_cow_mapping(vma->vm_flags)) {
++ if (addr != vma->vm_start || end != vma->vm_end)
++ return -EINVAL;
++ vma->vm_pgoff = pfn;
++ }
++
++ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
++
++ BUG_ON(addr >= end);
++ pfn -= addr >> PAGE_SHIFT;
++ pgd = pgd_offset(mm, addr);
++ flush_cache_range(vma, addr, end);
++ do {
++ next = pgd_addr_end(addr, end);
++ err = remap_pud_range(mm, pgd, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ break;
++ } while (pgd++, addr = next, addr != end);
++ return err;
++}
++EXPORT_SYMBOL(remap_pfn_range);
++
++static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pte_t *pte;
++ int err;
++ struct page *pmd_page;
++ spinlock_t *uninitialized_var(ptl);
++
++ pte = (mm == &init_mm) ?
++ pte_alloc_kernel(pmd, addr) :
++ pte_alloc_map_lock(mm, pmd, addr, &ptl);
++ if (!pte)
++ return -ENOMEM;
++
++ BUG_ON(pmd_huge(*pmd));
++
++ pmd_page = pmd_page(*pmd);
++
++ do {
++ err = fn(pte, pmd_page, addr, data);
++ if (err)
++ break;
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++
++ if (mm != &init_mm)
++ pte_unmap_unlock(pte-1, ptl);
++ return err;
++}
++
++static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pmd_t *pmd;
++ unsigned long next;
++ int err;
++
++ pmd = pmd_alloc(mm, pud, addr);
++ if (!pmd)
++ return -ENOMEM;
++ do {
++ next = pmd_addr_end(addr, end);
++ err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pmd++, addr = next, addr != end);
++ return err;
++}
++
++static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pud_t *pud;
++ unsigned long next;
++ int err;
++
++ pud = pud_alloc(mm, pgd, addr);
++ if (!pud)
++ return -ENOMEM;
++ do {
++ next = pud_addr_end(addr, end);
++ err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pud++, addr = next, addr != end);
++ return err;
++}
++
++/*
++ * Scan a region of virtual memory, filling in page tables as necessary
++ * and calling a provided function on each leaf page table.
++ */
++int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
++ unsigned long size, pte_fn_t fn, void *data)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long end = addr + size;
++ int err;
++
++ BUG_ON(addr >= end);
++ pgd = pgd_offset(mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pgd++, addr = next, addr != end);
++ return err;
++}
++EXPORT_SYMBOL_GPL(apply_to_page_range);
++
++/*
++ * handle_pte_fault chooses page fault handler according to an entry
++ * which was read non-atomically. Before making any commitment, on
++ * those architectures or configurations (e.g. i386 with PAE) which
++ * might give a mix of unmatched parts, do_swap_page and do_file_page
++ * must check under lock before unmapping the pte and proceeding
++ * (but do_wp_page is only called after already making such a check;
++ * and do_anonymous_page and do_no_page can safely check later on).
++ */
++static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
++ pte_t *page_table, pte_t orig_pte)
++{
++ int same = 1;
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
++ if (sizeof(pte_t) > sizeof(unsigned long)) {
++ spinlock_t *ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
++ same = pte_same(*page_table, orig_pte);
++ spin_unlock(ptl);
++ }
++#endif
++ pte_unmap(page_table);
++ return same;
++}
++
++/*
++ * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
++ * servicing faults for write access. In the normal case, do always want
++ * pte_mkwrite. But get_user_pages can cause write faults for mappings
++ * that do not have writing enabled, when used by access_process_vm.
++ */
++static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
++{
++ if (likely(vma->vm_flags & VM_WRITE))
++ pte = pte_mkwrite(pte);
++ return pte;
++}
++
++static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
++{
++ /*
++ * If the source page was a PFN mapping, we don't have
++ * a "struct page" for it. We do a best-effort copy by
++ * just copying from the original user address. If that
++ * fails, we just zero-fill it. Live with it.
++ */
++ if (unlikely(!src)) {
++ void *kaddr = kmap_atomic(dst, KM_USER0);
++ void __user *uaddr = (void __user *)(va & PAGE_MASK);
++
++ /*
++ * This really shouldn't fail, because the page is there
++ * in the page tables. But it might just be unreadable,
++ * in which case we just give up and fill the result with
++ * zeroes.
++ */
++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
++ memset(kaddr, 0, PAGE_SIZE);
++ kunmap_atomic(kaddr, KM_USER0);
++ flush_dcache_page(dst);
++ return;
++
++ }
++ copy_user_highpage(dst, src, va, vma);
++}
++
++/*
++ * This routine handles present pages, when users try to write
++ * to a shared page. It is done by copying the page to a new address
++ * and decrementing the shared-page counter for the old page.
++ *
++ * Note that this routine assumes that the protection checks have been
++ * done by the caller (the low-level page fault routine in most cases).
++ * Thus we can safely just mark it writable once we've done any necessary
++ * COW.
++ *
++ * We also mark the page dirty at this point even though the page will
++ * change only once the write actually happens. This avoids a few races,
++ * and potentially makes it more efficient.
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), with pte both mapped and locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ spinlock_t *ptl, pte_t orig_pte)
++{
++ struct page *old_page, *new_page;
++ pte_t entry;
++ int reuse = 0, ret = VM_FAULT_MINOR;
++ struct page *dirty_page = NULL;
++
++ old_page = vm_normal_page(vma, address, orig_pte);
++ if (!old_page)
++ goto gotten;
++
++ /*
++ * Take out anonymous pages first, anonymous shared vmas are
++ * not dirty accountable.
++ */
++ if (PageAnon(old_page)) {
++ if (!TestSetPageLocked(old_page)) {
++ reuse = can_share_swap_page(old_page);
++ unlock_page(old_page);
++ }
++ } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
++ (VM_WRITE|VM_SHARED))) {
++ /*
++ * Only catch write-faults on shared writable pages,
++ * read-only shared pages can get COWed by
++ * get_user_pages(.write=1, .force=1).
++ */
++ if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
++ /*
++ * Notify the address space that the page is about to
++ * become writable so that it can prohibit this or wait
++ * for the page to get into an appropriate state.
++ *
++ * We do this without the lock held, so that it can
++ * sleep if it needs to.
++ */
++ page_cache_get(old_page);
++ pte_unmap_unlock(page_table, ptl);
++
++ if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
++ goto unwritable_page;
++
++ /*
++ * Since we dropped the lock we need to revalidate
++ * the PTE as someone else may have changed it. If
++ * they did, we just return, as we can count on the
++ * MMU to tell us if they didn't also make it writable.
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address,
++ &ptl);
++ page_cache_release(old_page);
++ if (!pte_same(*page_table, orig_pte))
++ goto unlock;
++ }
++ dirty_page = old_page;
++ get_page(dirty_page);
++ reuse = 1;
++ }
++
++ if (reuse) {
++ flush_cache_page(vma, address, pte_pfn(orig_pte));
++ entry = pte_mkyoung(orig_pte);
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++ if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
++ update_mmu_cache(vma, address, entry);
++ lazy_mmu_prot_update(entry);
++ }
++ ret |= VM_FAULT_WRITE;
++ goto unlock;
++ }
++
++ /*
++ * Ok, we need to copy. Oh, well..
++ */
++ page_cache_get(old_page);
++gotten:
++ pte_unmap_unlock(page_table, ptl);
++
++ if (unlikely(anon_vma_prepare(vma)))
++ goto oom;
++ if (old_page == ZERO_PAGE(address)) {
++ new_page = alloc_zeroed_user_highpage(vma, address);
++ if (!new_page)
++ goto oom;
++ } else {
++ new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
++ if (!new_page)
++ goto oom;
++ cow_user_page(new_page, old_page, address, vma);
++ }
++
++ /*
++ * Re-check the pte - we dropped the lock
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (likely(pte_same(*page_table, orig_pte))) {
++ if (old_page) {
++ page_remove_rmap(old_page, vma);
++ if (!PageAnon(old_page)) {
++ dec_mm_counter(mm, file_rss);
++ inc_mm_counter(mm, anon_rss);
++ }
++ } else
++ inc_mm_counter(mm, anon_rss);
++ flush_cache_page(vma, address, pte_pfn(orig_pte));
++ entry = mk_pte(new_page, vma->vm_page_prot);
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++ lazy_mmu_prot_update(entry);
++ /*
++ * Clear the pte entry and flush it first, before updating the
++ * pte with the new entry. This will avoid a race condition
++ * seen in the presence of one thread doing SMC and another
++ * thread doing COW.
++ */
++ ptep_clear_flush(vma, address, page_table);
++ set_pte_at(mm, address, page_table, entry);
++ update_mmu_cache(vma, address, entry);
++ lru_cache_add_active(new_page);
++ page_add_new_anon_rmap(new_page, vma, address);
++
++ /* Free the old page.. */
++ new_page = old_page;
++ ret |= VM_FAULT_WRITE;
++ }
++ if (new_page)
++ page_cache_release(new_page);
++ if (old_page)
++ page_cache_release(old_page);
++unlock:
++ pte_unmap_unlock(page_table, ptl);
++ if (dirty_page) {
++ set_page_dirty_balance(dirty_page);
++ put_page(dirty_page);
++ }
++ return ret;
++oom:
++ if (old_page)
++ page_cache_release(old_page);
++ return VM_FAULT_OOM;
++
++unwritable_page:
++ page_cache_release(old_page);
++ return VM_FAULT_SIGBUS;
++}
++
++/*
++ * Helper functions for unmap_mapping_range().
++ *
++ * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
++ *
++ * We have to restart searching the prio_tree whenever we drop the lock,
++ * since the iterator is only valid while the lock is held, and anyway
++ * a later vma might be split and reinserted earlier while lock dropped.
++ *
++ * The list of nonlinear vmas could be handled more efficiently, using
++ * a placeholder, but handle it in the same way until a need is shown.
++ * It is important to search the prio_tree before nonlinear list: a vma
++ * may become nonlinear and be shifted from prio_tree to nonlinear list
++ * while the lock is dropped; but never shifted from list to prio_tree.
++ *
++ * In order to make forward progress despite restarting the search,
++ * vm_truncate_count is used to mark a vma as now dealt with, so we can
++ * quickly skip it next time around. Since the prio_tree search only
++ * shows us those vmas affected by unmapping the range in question, we
++ * can't efficiently keep all vmas in step with mapping->truncate_count:
++ * so instead reset them all whenever it wraps back to 0 (then go to 1).
++ * mapping->truncate_count and vma->vm_truncate_count are protected by
++ * i_mmap_lock.
++ *
++ * In order to make forward progress despite repeatedly restarting some
++ * large vma, note the restart_addr from unmap_vmas when it breaks out:
++ * and restart from that address when we reach that vma again. It might
++ * have been split or merged, shrunk or extended, but never shifted: so
++ * restart_addr remains valid so long as it remains in the vma's range.
++ * unmap_mapping_range forces truncate_count to leap over page-aligned
++ * values so we can save vma's restart_addr in its truncate_count field.
++ */
++#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
++
++static void reset_vma_truncate_counts(struct address_space *mapping)
++{
++ struct vm_area_struct *vma;
++ struct prio_tree_iter iter;
++
++ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
++ vma->vm_truncate_count = 0;
++ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
++ vma->vm_truncate_count = 0;
++}
++
++static int unmap_mapping_range_vma(struct vm_area_struct *vma,
++ unsigned long start_addr, unsigned long end_addr,
++ struct zap_details *details)
++{
++ unsigned long restart_addr;
++ int need_break;
++
++again:
++ restart_addr = vma->vm_truncate_count;
++ if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
++ start_addr = restart_addr;
++ if (start_addr >= end_addr) {
++ /* Top of vma has been split off since last time */
++ vma->vm_truncate_count = details->truncate_count;
++ return 0;
++ }
++ }
++
++ restart_addr = zap_page_range(vma, start_addr,
++ end_addr - start_addr, details);
++ need_break = need_resched() ||
++ need_lockbreak(details->i_mmap_lock);
++
++ if (restart_addr >= end_addr) {
++ /* We have now completed this vma: mark it so */
++ vma->vm_truncate_count = details->truncate_count;
++ if (!need_break)
++ return 0;
++ } else {
++ /* Note restart_addr in vma's truncate_count field */
++ vma->vm_truncate_count = restart_addr;
++ if (!need_break)
++ goto again;
++ }
++
++ spin_unlock(details->i_mmap_lock);
++ cond_resched();
++ spin_lock(details->i_mmap_lock);
++ return -EINTR;
++}
++
++static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
++ struct zap_details *details)
++{
++ struct vm_area_struct *vma;
++ struct prio_tree_iter iter;
++ pgoff_t vba, vea, zba, zea;
++
++restart:
++ vma_prio_tree_foreach(vma, &iter, root,
++ details->first_index, details->last_index) {
++ /* Skip quickly over those we have already dealt with */
++ if (vma->vm_truncate_count == details->truncate_count)
++ continue;
++
++ vba = vma->vm_pgoff;
++ vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
++ /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
++ zba = details->first_index;
++ if (zba < vba)
++ zba = vba;
++ zea = details->last_index;
++ if (zea > vea)
++ zea = vea;
++
++ if (unmap_mapping_range_vma(vma,
++ ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
++ ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
++ details) < 0)
++ goto restart;
++ }
++}
++
++static inline void unmap_mapping_range_list(struct list_head *head,
++ struct zap_details *details)
++{
++ struct vm_area_struct *vma;
++
++ /*
++ * In nonlinear VMAs there is no correspondence between virtual address
++ * offset and file offset. So we must perform an exhaustive search
++ * across *all* the pages in each nonlinear VMA, not just the pages
++ * whose virtual address lies outside the file truncation point.
++ */
++restart:
++ list_for_each_entry(vma, head, shared.vm_set.list) {
++ /* Skip quickly over those we have already dealt with */
++ if (vma->vm_truncate_count == details->truncate_count)
++ continue;
++ details->nonlinear_vma = vma;
++ if (unmap_mapping_range_vma(vma, vma->vm_start,
++ vma->vm_end, details) < 0)
++ goto restart;
++ }
++}
++
++/**
++ * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
++ * @mapping: the address space containing mmaps to be unmapped.
++ * @holebegin: byte in first page to unmap, relative to the start of
++ * the underlying file. This will be rounded down to a PAGE_SIZE
++ * boundary. Note that this is different from vmtruncate(), which
++ * must keep the partial page. In contrast, we must get rid of
++ * partial pages.
++ * @holelen: size of prospective hole in bytes. This will be rounded
++ * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
++ * end of the file.
++ * @even_cows: 1 when truncating a file, unmap even private COWed pages;
++ * but 0 when invalidating pagecache, don't throw away private data.
++ */
++void unmap_mapping_range(struct address_space *mapping,
++ loff_t const holebegin, loff_t const holelen, int even_cows)
++{
++ struct zap_details details;
++ pgoff_t hba = holebegin >> PAGE_SHIFT;
++ pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ /* Check for overflow. */
++ if (sizeof(holelen) > sizeof(hlen)) {
++ long long holeend =
++ (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (holeend & ~(long long)ULONG_MAX)
++ hlen = ULONG_MAX - hba + 1;
++ }
++
++ details.check_mapping = even_cows? NULL: mapping;
++ details.nonlinear_vma = NULL;
++ details.first_index = hba;
++ details.last_index = hba + hlen - 1;
++ if (details.last_index < details.first_index)
++ details.last_index = ULONG_MAX;
++ details.i_mmap_lock = &mapping->i_mmap_lock;
++
++ spin_lock(&mapping->i_mmap_lock);
++
++ /* serialize i_size write against truncate_count write */
++ smp_wmb();
++ /* Protect against page faults, and endless unmapping loops */
++ mapping->truncate_count++;
++ /*
++ * For archs where spin_lock has inclusive semantics like ia64
++ * this smp_mb() will prevent to read pagetable contents
++ * before the truncate_count increment is visible to
++ * other cpus.
++ */
++ smp_mb();
++ if (unlikely(is_restart_addr(mapping->truncate_count))) {
++ if (mapping->truncate_count == 0)
++ reset_vma_truncate_counts(mapping);
++ mapping->truncate_count++;
++ }
++ details.truncate_count = mapping->truncate_count;
++
++ if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
++ unmap_mapping_range_tree(&mapping->i_mmap, &details);
++ if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
++ unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
++ spin_unlock(&mapping->i_mmap_lock);
++}
++EXPORT_SYMBOL(unmap_mapping_range);
++
++/**
++ * vmtruncate - unmap mappings "freed" by truncate() syscall
++ * @inode: inode of the file used
++ * @offset: file offset to start truncating
++ *
++ * NOTE! We have to be ready to update the memory sharing
++ * between the file and the memory map for a potential last
++ * incomplete page. Ugly, but necessary.
++ */
++int vmtruncate(struct inode * inode, loff_t offset)
++{
++ struct address_space *mapping = inode->i_mapping;
++ unsigned long limit;
++
++ if (inode->i_size < offset)
++ goto do_expand;
++ /*
++ * truncation of in-use swapfiles is disallowed - it would cause
++ * subsequent swapout to scribble on the now-freed blocks.
++ */
++ if (IS_SWAPFILE(inode))
++ goto out_busy;
++ i_size_write(inode, offset);
++ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
++ truncate_inode_pages(mapping, offset);
++ goto out_truncate;
++
++do_expand:
++ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
++ if (limit != RLIM_INFINITY && offset > limit)
++ goto out_sig;
++ if (offset > inode->i_sb->s_maxbytes)
++ goto out_big;
++ i_size_write(inode, offset);
++
++out_truncate:
++ if (inode->i_op && inode->i_op->truncate)
++ inode->i_op->truncate(inode);
++ return 0;
++out_sig:
++ send_sig(SIGXFSZ, current, 0);
++out_big:
++ return -EFBIG;
++out_busy:
++ return -ETXTBSY;
++}
++EXPORT_SYMBOL(vmtruncate);
++
++int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
++{
++ struct address_space *mapping = inode->i_mapping;
++
++ /*
++ * If the underlying filesystem is not going to provide
++ * a way to truncate a range of blocks (punch a hole) -
++ * we should return failure right now.
++ */
++ if (!inode->i_op || !inode->i_op->truncate_range)
++ return -ENOSYS;
++
++ mutex_lock(&inode->i_mutex);
++ down_write(&inode->i_alloc_sem);
++ unmap_mapping_range(mapping, offset, (end - offset), 1);
++ truncate_inode_pages_range(mapping, offset, end);
++ inode->i_op->truncate_range(inode, offset, end);
++ up_write(&inode->i_alloc_sem);
++ mutex_unlock(&inode->i_mutex);
++
++ return 0;
++}
++
++/**
++ * swapin_readahead - swap in pages in hope we need them soon
++ * @entry: swap entry of this memory
++ * @addr: address to start
++ * @vma: user vma this addresses belong to
++ *
++ * Primitive swap readahead code. We simply read an aligned block of
++ * (1 << page_cluster) entries in the swap area. This method is chosen
++ * because it doesn't cost us any seek time. We also make sure to queue
++ * the 'original' request together with the readahead ones...
++ *
++ * This has been extended to use the NUMA policies from the mm triggering
++ * the readahead.
++ *
++ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
++ */
++void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
++{
++#ifdef CONFIG_NUMA
++ struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
++#endif
++ int i, num;
++ struct page *new_page;
++ unsigned long offset;
++
++ /*
++ * Get the number of handles we should do readahead io to.
++ */
++ num = valid_swaphandles(entry, &offset);
++ for (i = 0; i < num; offset++, i++) {
++ /* Ok, do the async read-ahead now */
++ new_page = read_swap_cache_async(swp_entry(swp_type(entry),
++ offset), vma, addr);
++ if (!new_page)
++ break;
++ page_cache_release(new_page);
++#ifdef CONFIG_NUMA
++ /*
++ * Find the next applicable VMA for the NUMA policy.
++ */
++ addr += PAGE_SIZE;
++ if (addr == 0)
++ vma = NULL;
++ if (vma) {
++ if (addr >= vma->vm_end) {
++ vma = next_vma;
++ next_vma = vma ? vma->vm_next : NULL;
++ }
++ if (vma && addr < vma->vm_start)
++ vma = NULL;
++ } else {
++ if (next_vma && addr >= next_vma->vm_start) {
++ vma = next_vma;
++ next_vma = vma->vm_next;
++ }
++ }
++#endif
++ }
++ lru_add_drain(); /* Push any new pages onto the LRU now */
++}
++
++/*
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access, pte_t orig_pte)
++{
++ spinlock_t *ptl;
++ struct page *page;
++ swp_entry_t entry;
++ pte_t pte;
++ int ret = VM_FAULT_MINOR;
++
++ if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
++ goto out;
++
++ entry = pte_to_swp_entry(orig_pte);
++ if (is_migration_entry(entry)) {
++ migration_entry_wait(mm, pmd, address);
++ goto out;
++ }
++ delayacct_set_flag(DELAYACCT_PF_SWAPIN);
++ page = lookup_swap_cache(entry);
++ if (!page) {
++ grab_swap_token(); /* Contend for token _before_ read-in */
++ swapin_readahead(entry, address, vma);
++ page = read_swap_cache_async(entry, vma, address);
++ if (!page) {
++ /*
++ * Back out if somebody else faulted in this pte
++ * while we released the pte lock.
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (likely(pte_same(*page_table, orig_pte)))
++ ret = VM_FAULT_OOM;
++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
++ goto unlock;
++ }
++
++ /* Had to read the page from swap area: Major fault */
++ ret = VM_FAULT_MAJOR;
++ count_vm_event(PGMAJFAULT);
++ }
++
++ if (!vx_rss_avail(mm, 1)) {
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
++
++ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
++ mark_page_accessed(page);
++ lock_page(page);
++
++ /*
++ * Back out if somebody else already faulted in this pte.
++ */
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (unlikely(!pte_same(*page_table, orig_pte)))
++ goto out_nomap;
++
++ if (unlikely(!PageUptodate(page))) {
++ ret = VM_FAULT_SIGBUS;
++ goto out_nomap;
++ }
++
++ /* The page isn't present yet, go ahead with the fault. */
++
++ inc_mm_counter(mm, anon_rss);
++ pte = mk_pte(page, vma->vm_page_prot);
++ if (write_access && can_share_swap_page(page)) {
++ pte = maybe_mkwrite(pte_mkdirty(pte), vma);
++ write_access = 0;
++ }
++
++ flush_icache_page(vma, page);
++ set_pte_at(mm, address, page_table, pte);
++ page_add_anon_rmap(page, vma, address);
++
++ swap_free(entry);
++ if (vm_swap_full())
++ remove_exclusive_swap_page(page);
++ unlock_page(page);
++
++ if (write_access) {
++ if (do_wp_page(mm, vma, address,
++ page_table, pmd, ptl, pte) == VM_FAULT_OOM)
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
++
++ /* No need to invalidate - it was non-present before */
++ update_mmu_cache(vma, address, pte);
++ lazy_mmu_prot_update(pte);
++unlock:
++ pte_unmap_unlock(page_table, ptl);
++out:
++ return ret;
++out_nomap:
++ pte_unmap_unlock(page_table, ptl);
++ unlock_page(page);
++ page_cache_release(page);
++ return ret;
++}
++
++/*
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access)
++{
++ struct page *page;
++ spinlock_t *ptl;
++ pte_t entry;
++
++ if (write_access) {
++ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
++ if (!vx_rss_avail(mm, 1))
++ goto oom;
++ if (unlikely(anon_vma_prepare(vma)))
++ goto oom;
++ page = alloc_zeroed_user_highpage(vma, address);
++ if (!page)
++ goto oom;
++
++ entry = mk_pte(page, vma->vm_page_prot);
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (!pte_none(*page_table))
++ goto release;
++ inc_mm_counter(mm, anon_rss);
++ lru_cache_add_active(page);
++ page_add_new_anon_rmap(page, vma, address);
++ } else {
++ /* Map the ZERO_PAGE - vm_page_prot is readonly */
++ page = ZERO_PAGE(address);
++ page_cache_get(page);
++ entry = mk_pte(page, vma->vm_page_prot);
++
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
++ if (!pte_none(*page_table))
++ goto release;
++ inc_mm_counter(mm, file_rss);
++ page_add_file_rmap(page);
++ }
++
++ set_pte_at(mm, address, page_table, entry);
++
++ /* No need to invalidate - it was non-present before */
++ update_mmu_cache(vma, address, entry);
++ lazy_mmu_prot_update(entry);
++unlock:
++ pte_unmap_unlock(page_table, ptl);
++ return VM_FAULT_MINOR;
++release:
++ page_cache_release(page);
++ goto unlock;
++oom:
++ return VM_FAULT_OOM;
++}
++
++/*
++ * do_no_page() tries to create a new page mapping. It aggressively
++ * tries to share with existing pages, but makes a separate copy if
++ * the "write_access" parameter is true in order to avoid the next
++ * page fault.
++ *
++ * As this is called only for pages that do not currently exist, we
++ * do not need to flush old virtual caches or the TLB.
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access)
++{
++ spinlock_t *ptl;
++ struct page *new_page;
++ struct address_space *mapping = NULL;
++ pte_t entry;
++ unsigned int sequence = 0;
++ int ret = VM_FAULT_MINOR;
++ int anon = 0;
++ struct page *dirty_page = NULL;
++
++ pte_unmap(page_table);
++ BUG_ON(vma->vm_flags & VM_PFNMAP);
++
++ if (!vx_rss_avail(mm, 1))
++ return VM_FAULT_OOM;
++
++ if (vma->vm_file) {
++ mapping = vma->vm_file->f_mapping;
++ sequence = mapping->truncate_count;
++ smp_rmb(); /* serializes i_size against truncate_count */
++ }
++retry:
++ new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
++ /*
++ * No smp_rmb is needed here as long as there's a full
++ * spin_lock/unlock sequence inside the ->nopage callback
++ * (for the pagecache lookup) that acts as an implicit
++ * smp_mb() and prevents the i_size read to happen
++ * after the next truncate_count read.
++ */
++
++ /* no page was available -- either SIGBUS, OOM or REFAULT */
++ if (unlikely(new_page == NOPAGE_SIGBUS))
++ return VM_FAULT_SIGBUS;
++ else if (unlikely(new_page == NOPAGE_OOM))
++ return VM_FAULT_OOM;
++ else if (unlikely(new_page == NOPAGE_REFAULT))
++ return VM_FAULT_MINOR;
++
++ /*
++ * Should we do an early C-O-W break?
++ */
++ if (write_access) {
++ if (!(vma->vm_flags & VM_SHARED)) {
++ struct page *page;
++
++ if (unlikely(anon_vma_prepare(vma)))
++ goto oom;
++ page = alloc_page_vma(GFP_HIGHUSER, vma, address);
++ if (!page)
++ goto oom;
++ copy_user_highpage(page, new_page, address, vma);
++ page_cache_release(new_page);
++ new_page = page;
++ anon = 1;
++
++ } else {
++ /* if the page will be shareable, see if the backing
++ * address space wants to know that the page is about
++ * to become writable */
++ if (vma->vm_ops->page_mkwrite &&
++ vma->vm_ops->page_mkwrite(vma, new_page) < 0
++ ) {
++ page_cache_release(new_page);
++ return VM_FAULT_SIGBUS;
++ }
++ }
++ }
++
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ /*
++ * For a file-backed vma, someone could have truncated or otherwise
++ * invalidated this page. If unmap_mapping_range got called,
++ * retry getting the page.
++ */
++ if (mapping && unlikely(sequence != mapping->truncate_count)) {
++ pte_unmap_unlock(page_table, ptl);
++ page_cache_release(new_page);
++ cond_resched();
++ sequence = mapping->truncate_count;
++ smp_rmb();
++ goto retry;
++ }
++
++ /*
++ * This silly early PAGE_DIRTY setting removes a race
++ * due to the bad i386 page protection. But it's valid
++ * for other architectures too.
++ *
++ * Note that if write_access is true, we either now have
++ * an exclusive copy of the page, or this is a shared mapping,
++ * so we can make it writable and dirty to avoid having to
++ * handle that later.
++ */
++ /* Only go through if we didn't race with anybody else... */
++ if (pte_none(*page_table)) {
++ flush_icache_page(vma, new_page);
++ entry = mk_pte(new_page, vma->vm_page_prot);
++ if (write_access)
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++ set_pte_at(mm, address, page_table, entry);
++ if (anon) {
++ inc_mm_counter(mm, anon_rss);
++ lru_cache_add_active(new_page);
++ page_add_new_anon_rmap(new_page, vma, address);
++ } else {
++ inc_mm_counter(mm, file_rss);
++ page_add_file_rmap(new_page);
++ if (write_access) {
++ dirty_page = new_page;
++ get_page(dirty_page);
++ }
++ }
++ } else {
++ /* One of our sibling threads was faster, back out. */
++ page_cache_release(new_page);
++ goto unlock;
++ }
++
++ /* no need to invalidate: a not-present page shouldn't be cached */
++ update_mmu_cache(vma, address, entry);
++ lazy_mmu_prot_update(entry);
++unlock:
++ pte_unmap_unlock(page_table, ptl);
++ if (dirty_page) {
++ set_page_dirty_balance(dirty_page);
++ put_page(dirty_page);
++ }
++ return ret;
++oom:
++ page_cache_release(new_page);
++ return VM_FAULT_OOM;
++}
++
++/*
++ * do_no_pfn() tries to create a new page mapping for a page without
++ * a struct_page backing it
++ *
++ * As this is called only for pages that do not currently exist, we
++ * do not need to flush old virtual caches or the TLB.
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ *
++ * It is expected that the ->nopfn handler always returns the same pfn
++ * for a given virtual mapping.
++ *
++ * Mark this `noinline' to prevent it from bloating the main pagefault code.
++ */
++static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access)
++{
++ spinlock_t *ptl;
++ pte_t entry;
++ unsigned long pfn;
++ int ret = VM_FAULT_MINOR;
++
++ pte_unmap(page_table);
++ BUG_ON(!(vma->vm_flags & VM_PFNMAP));
++ BUG_ON(is_cow_mapping(vma->vm_flags));
++
++ pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
++ if (unlikely(pfn == NOPFN_OOM))
++ return VM_FAULT_OOM;
++ else if (unlikely(pfn == NOPFN_SIGBUS))
++ return VM_FAULT_SIGBUS;
++ else if (unlikely(pfn == NOPFN_REFAULT))
++ return VM_FAULT_MINOR;
++
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++
++ /* Only go through if we didn't race with anybody else... */
++ if (pte_none(*page_table)) {
++ entry = pfn_pte(pfn, vma->vm_page_prot);
++ if (write_access)
++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
++ set_pte_at(mm, address, page_table, entry);
++ }
++ pte_unmap_unlock(page_table, ptl);
++ return ret;
++}
++
++/*
++ * Fault of a previously existing named mapping. Repopulate the pte
++ * from the encoded file_pte if possible. This enables swappable
++ * nonlinear vmas.
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, pte_t *page_table, pmd_t *pmd,
++ int write_access, pte_t orig_pte)
++{
++ pgoff_t pgoff;
++ int err;
++
++ if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
++ return VM_FAULT_MINOR;
++
++ if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
++ /*
++ * Page table corrupted: show pte and kill process.
++ */
++ print_bad_pte(vma, orig_pte, address);
++ return VM_FAULT_OOM;
++ }
++ /* We can then assume vm->vm_ops && vma->vm_ops->populate */
++
++ pgoff = pte_to_pgoff(orig_pte);
++ err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
++ vma->vm_page_prot, pgoff, 0);
++ if (err == -ENOMEM)
++ return VM_FAULT_OOM;
++ if (err)
++ return VM_FAULT_SIGBUS;
++ return VM_FAULT_MAJOR;
++}
++
++/*
++ * These routines also need to handle stuff like marking pages dirty
++ * and/or accessed for architectures that don't do it in hardware (most
++ * RISC architectures). The early dirtying is also good on the i386.
++ *
++ * There is also a hook called "update_mmu_cache()" that architectures
++ * with external mmu caches can use to update those (ie the Sparc or
++ * PowerPC hashed page tables that act as extended TLBs).
++ *
++ * We enter with non-exclusive mmap_sem (to exclude vma changes,
++ * but allow concurrent faults), and pte mapped but not yet locked.
++ * We return with mmap_sem still held, but pte unmapped and unlocked.
++ */
++static inline int handle_pte_fault(struct mm_struct *mm,
++ struct vm_area_struct *vma, unsigned long address,
++ pte_t *pte, pmd_t *pmd, int write_access)
++{
++ pte_t entry;
++ spinlock_t *ptl;
++ int ret, type = VXPT_UNKNOWN;
++
++ entry = *pte;
++ if (!pte_present(entry)) {
++ if (pte_none(entry)) {
++ if (vma->vm_ops) {
++ if (vma->vm_ops->nopage)
++ return do_no_page(mm, vma, address,
++ pte, pmd,
++ write_access);
++ if (unlikely(vma->vm_ops->nopfn))
++ return do_no_pfn(mm, vma, address, pte,
++ pmd, write_access);
++ }
++ return do_anonymous_page(mm, vma, address,
++ pte, pmd, write_access);
++ }
++ if (pte_file(entry))
++ return do_file_page(mm, vma, address,
++ pte, pmd, write_access, entry);
++ return do_swap_page(mm, vma, address,
++ pte, pmd, write_access, entry);
++ }
++
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
++ if (unlikely(!pte_same(*pte, entry)))
++ goto unlock;
++ if (write_access) {
++ if (!pte_write(entry)) {
++ ret = do_wp_page(mm, vma, address,
++ pte, pmd, ptl, entry);
++ type = VXPT_WRITE;
++ goto out;
++ }
++ entry = pte_mkdirty(entry);
++ }
++ entry = pte_mkyoung(entry);
++ if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
++ update_mmu_cache(vma, address, entry);
++ lazy_mmu_prot_update(entry);
++ } else {
++ /*
++ * This is needed only for protection faults but the arch code
++ * is not yet telling us if this is a protection fault or not.
++ * This still avoids useless tlb flushes for .text page faults
++ * with threads.
++ */
++ if (write_access)
++ flush_tlb_page(vma, address);
++ }
++unlock:
++ pte_unmap_unlock(pte, ptl);
++ ret = VM_FAULT_MINOR;
++out:
++ vx_page_fault(mm, vma, type, ret);
++ return ret;
++}
++
++/*
++ * By the time we get here, we already hold the mm semaphore
++ */
++int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
++ unsigned long address, int write_access)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ __set_current_state(TASK_RUNNING);
++
++ count_vm_event(PGFAULT);
++
++ if (unlikely(is_vm_hugetlb_page(vma)))
++ return hugetlb_fault(mm, vma, address, write_access);
++
++ pgd = pgd_offset(mm, address);
++ pud = pud_alloc(mm, pgd, address);
++ if (!pud)
++ return VM_FAULT_OOM;
++ pmd = pmd_alloc(mm, pud, address);
++ if (!pmd)
++ return VM_FAULT_OOM;
++ pte = pte_alloc_map(mm, pmd, address);
++ if (!pte)
++ return VM_FAULT_OOM;
++
++ return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
++}
++
++EXPORT_SYMBOL_GPL(__handle_mm_fault);
++
++#ifndef __PAGETABLE_PUD_FOLDED
++/*
++ * Allocate page upper directory.
++ * We've already handled the fast-path in-line.
++ */
++int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++ pud_t *new = pud_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ spin_lock(&mm->page_table_lock);
++ if (pgd_present(*pgd)) /* Another has populated it */
++ pud_free(new);
++ else
++ pgd_populate(mm, pgd, new);
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
++#endif /* __PAGETABLE_PUD_FOLDED */
++
++#ifndef __PAGETABLE_PMD_FOLDED
++/*
++ * Allocate page middle directory.
++ * We've already handled the fast-path in-line.
++ */
++int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++ pmd_t *new = pmd_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ spin_lock(&mm->page_table_lock);
++#ifndef __ARCH_HAS_4LEVEL_HACK
++ if (pud_present(*pud)) /* Another has populated it */
++ pmd_free(new);
++ else
++ pud_populate(mm, pud, new);
++#else
++ if (pgd_present(*pud)) /* Another has populated it */
++ pmd_free(new);
++ else
++ pgd_populate(mm, pud, new);
++#endif /* __ARCH_HAS_4LEVEL_HACK */
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
++#endif /* __PAGETABLE_PMD_FOLDED */
++
++int make_pages_present(unsigned long addr, unsigned long end)
++{
++ int ret, len, write;
++ struct vm_area_struct * vma;
++
++ vma = find_vma(current->mm, addr);
++ if (!vma)
++ return -1;
++ write = (vma->vm_flags & VM_WRITE) != 0;
++ BUG_ON(addr >= end);
++ BUG_ON(end > vma->vm_end);
++ len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
++ ret = get_user_pages(current, current->mm, addr,
++ len, write, 0, NULL, NULL);
++ if (ret < 0)
++ return ret;
++ return ret == len ? 0 : -1;
++}
++
++/*
++ * Map a vmalloc()-space virtual address to the physical page.
++ */
++struct page * vmalloc_to_page(void * vmalloc_addr)
++{
++ unsigned long addr = (unsigned long) vmalloc_addr;
++ struct page *page = NULL;
++ pgd_t *pgd = pgd_offset_k(addr);
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *ptep, pte;
++
++ if (!pgd_none(*pgd)) {
++ pud = pud_offset(pgd, addr);
++ if (!pud_none(*pud)) {
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_none(*pmd)) {
++ ptep = pte_offset_map(pmd, addr);
++ pte = *ptep;
++ if (pte_present(pte))
++ page = pte_page(pte);
++ pte_unmap(ptep);
++ }
++ }
++ }
++ return page;
++}
++
++EXPORT_SYMBOL(vmalloc_to_page);
++
++/*
++ * Map a vmalloc()-space virtual address to the physical page frame number.
++ */
++unsigned long vmalloc_to_pfn(void * vmalloc_addr)
++{
++ return page_to_pfn(vmalloc_to_page(vmalloc_addr));
++}
++
++EXPORT_SYMBOL(vmalloc_to_pfn);
++
++#if !defined(__HAVE_ARCH_GATE_AREA)
++
++#if defined(AT_SYSINFO_EHDR)
++static struct vm_area_struct gate_vma;
++
++static int __init gate_vma_init(void)
++{
++ gate_vma.vm_mm = NULL;
++ gate_vma.vm_start = FIXADDR_USER_START;
++ gate_vma.vm_end = FIXADDR_USER_END;
++ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
++ gate_vma.vm_page_prot = __P101;
++ /*
++ * Make sure the vDSO gets into every core dump.
++ * Dumping its contents makes post-mortem fully interpretable later
++ * without matching up the same kernel and hardware config to see
++ * what PC values meant.
++ */
++ gate_vma.vm_flags |= VM_ALWAYSDUMP;
++ return 0;
++}
++__initcall(gate_vma_init);
++#endif
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++#ifdef AT_SYSINFO_EHDR
++ return &gate_vma;
++#else
++ return NULL;
++#endif
++}
++
++int in_gate_area_no_task(unsigned long addr)
++{
++#ifdef AT_SYSINFO_EHDR
++ if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
++ return 1;
++#endif
++ return 0;
++}
++
++#endif /* __HAVE_ARCH_GATE_AREA */
++
++/*
++ * Access another process' address space.
++ * Source/target buffer must be kernel space,
++ * Do not walk the page table directly, use get_user_pages
++ */
++int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
++{
++ struct mm_struct *mm;
++ struct vm_area_struct *vma;
++ struct page *page;
++ void *old_buf = buf;
++
++ mm = get_task_mm(tsk);
++ if (!mm)
++ return 0;
++
++ down_read(&mm->mmap_sem);
++ /* ignore errors, just check how much was sucessfully transfered */
++ while (len) {
++ int bytes, ret, offset;
++ void *maddr;
++
++ ret = get_user_pages(tsk, mm, addr, 1,
++ write, 1, &page, &vma);
++ if (ret <= 0)
++ break;
++
++ bytes = len;
++ offset = addr & (PAGE_SIZE-1);
++ if (bytes > PAGE_SIZE-offset)
++ bytes = PAGE_SIZE-offset;
++
++ maddr = kmap(page);
++ if (write) {
++ copy_to_user_page(vma, page, addr,
++ maddr + offset, buf, bytes);
++ set_page_dirty_lock(page);
++ } else {
++ copy_from_user_page(vma, page, addr,
++ buf, maddr + offset, bytes);
++ }
++ kunmap(page);
++ page_cache_release(page);
++ len -= bytes;
++ buf += bytes;
++ addr += bytes;
++ }
++ up_read(&mm->mmap_sem);
++ mmput(mm);
++
++ return buf - old_buf;
++}
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/slab.c linux-2.6.22-590/mm/slab.c
+--- linux-2.6.22-580/mm/slab.c 2009-02-18 09:56:03.000000000 -0500
++++ linux-2.6.22-590/mm/slab.c 2009-02-18 10:00:42.000000000 -0500
+@@ -110,11 +110,13 @@
+ #include <linux/fault-inject.h>
+ #include <linux/rtmutex.h>
+ #include <linux/reciprocal_div.h>
++#include <linux/arrays.h>
+
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+ #include <asm/page.h>
+
++
+ /*
+ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
+ * 0 for faster, smaller code (especially in the critical paths).
+@@ -249,6 +251,14 @@
+ void *addr;
+ };
+
++extern void (*rec_event)(void *,unsigned int);
++struct event_spec {
++ unsigned long pc;
++ unsigned long dcookie;
++ unsigned count;
++ unsigned char reason;
++};
++
+ /*
+ * struct array_cache
+ *
+@@ -3443,6 +3453,19 @@
+ local_irq_restore(save_flags);
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ prefetchw(objp);
++#ifdef CONFIG_CHOPSTIX
+ if (rec_event && objp) {
+ struct event event;
+ struct event_spec espec;
+
-+ espec.reason = 0; /* alloc */
-+ event.event_data=&espec;
-+ event.task = current;
-+ espec.pc=caller;
-+ event.event_type=4;
-+ (*rec_event)(&event, cachep->buffer_size);
++ espec.reason = 0; /* alloc */
++ event.event_data=&espec;
++ event.task = current;
++ espec.pc=caller;
++ event.event_type=5;
++ (*rec_event)(&event, cachep->buffer_size);
++ }
++#endif
+
+ return objp;
+ }
+@@ -3549,12 +3572,26 @@
+ * Release an obj back to its cache. If the obj has a constructed state, it must
+ * be in this state _before_ it is released. Called with disabled ints.
+ */
+-static inline void __cache_free(struct kmem_cache *cachep, void *objp)
++static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
+ {
+ struct array_cache *ac = cpu_cache_get(cachep);
+
+ check_irq_off();
+- objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
++ objp = cache_free_debugcheck(cachep, objp, caller);
++ #ifdef CONFIG_CHOPSTIX
++ if (rec_event && objp) {
++ struct event event;
++ struct event_spec espec;
++
++ espec.reason = 1; /* free */
++ event.event_data=&espec;
++ event.task = current;
++ espec.pc=caller;
++ event.event_type=4;
++ (*rec_event)(&event, cachep->buffer_size);
++ }
++ #endif
++
+ vx_slab_free(cachep);
+
+ if (cache_free_alien(cachep, objp))
+@@ -3651,16 +3688,19 @@
+ __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(kmem_cache_alloc_node);
+-
+ static __always_inline void *
+ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
+ {
+ struct kmem_cache *cachep;
++ void *ret;
++
+
+ cachep = kmem_find_general_cachep(size, flags);
+ if (unlikely(cachep == NULL))
+ return NULL;
+- return kmem_cache_alloc_node(cachep, flags, node);
++ ret = kmem_cache_alloc_node(cachep, flags, node);
++
++ return ret;
+ }
+
+ #ifdef CONFIG_DEBUG_SLAB
+@@ -3696,6 +3736,7 @@
+ void *caller)
+ {
+ struct kmem_cache *cachep;
++ void *ret;
+
+ /* If you want to save a few bytes .text space: replace
+ * __ with kmem_.
+@@ -3705,9 +3746,10 @@
+ cachep = __find_general_cachep(size, flags);
+ if (unlikely(cachep == NULL))
+ return NULL;
+- return __cache_alloc(cachep, flags, caller);
+-}
++ ret = __cache_alloc(cachep, flags, caller);
+
++ return ret;
++}
+
+ #ifdef CONFIG_DEBUG_SLAB
+ void *__kmalloc(size_t size, gfp_t flags)
+@@ -3723,10 +3765,17 @@
+ EXPORT_SYMBOL(__kmalloc_track_caller);
+
+ #else
++#ifdef CONFIG_CHOPSTIX
++void *__kmalloc(size_t size, gfp_t flags)
++{
++ return __do_kmalloc(size, flags, __builtin_return_address(0));
++}
++#else
+ void *__kmalloc(size_t size, gfp_t flags)
+ {
+ return __do_kmalloc(size, flags, NULL);
+ }
++#endif
+ EXPORT_SYMBOL(__kmalloc);
+ #endif
+
+@@ -3792,7 +3841,7 @@
+
+ local_irq_save(flags);
+ debug_check_no_locks_freed(objp, obj_size(cachep));
+- __cache_free(cachep, objp);
++ __cache_free(cachep, objp,__builtin_return_address(0));
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(kmem_cache_free);
+@@ -3817,7 +3866,7 @@
+ kfree_debugcheck(objp);
+ c = virt_to_cache(objp);
+ debug_check_no_locks_freed(objp, obj_size(c));
+- __cache_free(c, (void *)objp);
++ __cache_free(c, (void *)objp,__builtin_return_address(0));
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(kfree);
+diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/slab.c.orig linux-2.6.22-590/mm/slab.c.orig
+--- linux-2.6.22-580/mm/slab.c.orig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.22-590/mm/slab.c.orig 2009-02-18 09:56:03.000000000 -0500
+@@ -0,0 +1,4523 @@
++/*
++ * linux/mm/slab.c
++ * Written by Mark Hemment, 1996/97.
++ * (markhe@nextd.demon.co.uk)
++ *
++ * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
++ *
++ * Major cleanup, different bufctl logic, per-cpu arrays
++ * (c) 2000 Manfred Spraul
++ *
++ * Cleanup, make the head arrays unconditional, preparation for NUMA
++ * (c) 2002 Manfred Spraul
++ *
++ * An implementation of the Slab Allocator as described in outline in;
++ * UNIX Internals: The New Frontiers by Uresh Vahalia
++ * Pub: Prentice Hall ISBN 0-13-101908-2
++ * or with a little more detail in;
++ * The Slab Allocator: An Object-Caching Kernel Memory Allocator
++ * Jeff Bonwick (Sun Microsystems).
++ * Presented at: USENIX Summer 1994 Technical Conference
++ *
++ * The memory is organized in caches, one cache for each object type.
++ * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
++ * Each cache consists out of many slabs (they are small (usually one
++ * page long) and always contiguous), and each slab contains multiple
++ * initialized objects.
++ *
++ * This means, that your constructor is used only for newly allocated
++ * slabs and you must pass objects with the same intializations to
++ * kmem_cache_free.
++ *
++ * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
++ * normal). If you need a special memory type, then must create a new
++ * cache for that memory type.
++ *
++ * In order to reduce fragmentation, the slabs are sorted in 3 groups:
++ * full slabs with 0 free objects
++ * partial slabs
++ * empty slabs with no allocated objects
++ *
++ * If partial slabs exist, then new allocations come from these slabs,
++ * otherwise from empty slabs or new slabs are allocated.
++ *
++ * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
++ * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
++ *
++ * Each cache has a short per-cpu head array, most allocs
++ * and frees go into that array, and if that array overflows, then 1/2
++ * of the entries in the array are given back into the global cache.
++ * The head array is strictly LIFO and should improve the cache hit rates.
++ * On SMP, it additionally reduces the spinlock operations.
++ *
++ * The c_cpuarray may not be read with enabled local interrupts -
++ * it's changed with a smp_call_function().
++ *
++ * SMP synchronization:
++ * constructors and destructors are called without any locking.
++ * Several members in struct kmem_cache and struct slab never change, they
++ * are accessed without any locking.
++ * The per-cpu arrays are never accessed from the wrong cpu, no locking,
++ * and local interrupts are disabled so slab code is preempt-safe.
++ * The non-constant members are protected with a per-cache irq spinlock.
++ *
++ * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
++ * in 2000 - many ideas in the current implementation are derived from
++ * his patch.
++ *
++ * Further notes from the original documentation:
++ *
++ * 11 April '97. Started multi-threading - markhe
++ * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
++ * The sem is only needed when accessing/extending the cache-chain, which
++ * can never happen inside an interrupt (kmem_cache_create(),
++ * kmem_cache_shrink() and kmem_cache_reap()).
++ *
++ * At present, each engine can be growing a cache. This should be blocked.
++ *
++ * 15 March 2005. NUMA slab allocator.
++ * Shai Fultheim <shai@scalex86.org>.
++ * Shobhit Dayal <shobhit@calsoftinc.com>
++ * Alok N Kataria <alokk@calsoftinc.com>
++ * Christoph Lameter <christoph@lameter.com>
++ *
++ * Modified the slab allocator to be node aware on NUMA systems.
++ * Each node has its own list of partial, free and full slabs.
++ * All object allocations for a node occur from node specific slab lists.
++ */
++
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/poison.h>
++#include <linux/swap.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/compiler.h>
++#include <linux/cpuset.h>
++#include <linux/seq_file.h>
++#include <linux/notifier.h>
++#include <linux/kallsyms.h>
++#include <linux/cpu.h>
++#include <linux/sysctl.h>
++#include <linux/module.h>
++#include <linux/rcupdate.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++#include <linux/nodemask.h>
++#include <linux/mempolicy.h>
++#include <linux/mutex.h>
++#include <linux/fault-inject.h>
++#include <linux/rtmutex.h>
++#include <linux/reciprocal_div.h>
++
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/page.h>
++
++/*
++ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
++ * 0 for faster, smaller code (especially in the critical paths).
++ *
++ * STATS - 1 to collect stats for /proc/slabinfo.
++ * 0 for faster, smaller code (especially in the critical paths).
++ *
++ * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
++ */
++
++#ifdef CONFIG_DEBUG_SLAB
++#define DEBUG 1
++#define STATS 1
++#define FORCED_DEBUG 1
++#else
++#define DEBUG 0
++#define STATS 0
++#define FORCED_DEBUG 0
++#endif
++
++/* Shouldn't this be in a header file somewhere? */
++#define BYTES_PER_WORD sizeof(void *)
++#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
++
++#ifndef cache_line_size
++#define cache_line_size() L1_CACHE_BYTES
++#endif
++
++#ifndef ARCH_KMALLOC_MINALIGN
++/*
++ * Enforce a minimum alignment for the kmalloc caches.
++ * Usually, the kmalloc caches are cache_line_size() aligned, except when
++ * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
++ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
++ * alignment larger than the alignment of a 64-bit integer.
++ * ARCH_KMALLOC_MINALIGN allows that.
++ * Note that increasing this value may disable some debug features.
++ */
++#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
++#endif
++
++#ifndef ARCH_SLAB_MINALIGN
++/*
++ * Enforce a minimum alignment for all caches.
++ * Intended for archs that get misalignment faults even for BYTES_PER_WORD
++ * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
++ * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
++ * some debug features.
++ */
++#define ARCH_SLAB_MINALIGN 0
++#endif
++
++#ifndef ARCH_KMALLOC_FLAGS
++#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
++#endif
++
++/* Legal flag mask for kmem_cache_create(). */
++#if DEBUG
++# define CREATE_MASK (SLAB_RED_ZONE | \
++ SLAB_POISON | SLAB_HWCACHE_ALIGN | \
++ SLAB_CACHE_DMA | \
++ SLAB_STORE_USER | \
++ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
++ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
++#else
++# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
++ SLAB_CACHE_DMA | \
++ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
++ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
++#endif
++
++/*
++ * kmem_bufctl_t:
++ *
++ * Bufctl's are used for linking objs within a slab
++ * linked offsets.
++ *
++ * This implementation relies on "struct page" for locating the cache &
++ * slab an object belongs to.
++ * This allows the bufctl structure to be small (one int), but limits
++ * the number of objects a slab (not a cache) can contain when off-slab
++ * bufctls are used. The limit is the size of the largest general cache
++ * that does not use off-slab slabs.
++ * For 32bit archs with 4 kB pages, is this 56.
++ * This is not serious, as it is only for large objects, when it is unwise
++ * to have too many per slab.
++ * Note: This limit can be raised by introducing a general cache whose size
++ * is less than 512 (PAGE_SIZE<<3), but greater than 256.
++ */
++
++typedef unsigned int kmem_bufctl_t;
++#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
++#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
++#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
++#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
++
++/*
++ * struct slab
++ *
++ * Manages the objs in a slab. Placed either at the beginning of mem allocated
++ * for a slab, or allocated from an general cache.
++ * Slabs are chained into three list: fully used, partial, fully free slabs.
++ */
++struct slab {
++ struct list_head list;
++ unsigned long colouroff;
++ void *s_mem; /* including colour offset */
++ unsigned int inuse; /* num of objs active in slab */
++ kmem_bufctl_t free;
++ unsigned short nodeid;
++};
++
++/*
++ * struct slab_rcu
++ *
++ * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
++ * arrange for kmem_freepages to be called via RCU. This is useful if
++ * we need to approach a kernel structure obliquely, from its address
++ * obtained without the usual locking. We can lock the structure to
++ * stabilize it and check it's still at the given address, only if we
++ * can be sure that the memory has not been meanwhile reused for some
++ * other kind of object (which our subsystem's lock might corrupt).
++ *
++ * rcu_read_lock before reading the address, then rcu_read_unlock after
++ * taking the spinlock within the structure expected at that address.
++ *
++ * We assume struct slab_rcu can overlay struct slab when destroying.
++ */
++struct slab_rcu {
++ struct rcu_head head;
++ struct kmem_cache *cachep;
++ void *addr;
++};
++
++/*
++ * struct array_cache
++ *
++ * Purpose:
++ * - LIFO ordering, to hand out cache-warm objects from _alloc
++ * - reduce the number of linked list operations
++ * - reduce spinlock operations
++ *
++ * The limit is stored in the per-cpu structure to reduce the data cache
++ * footprint.
++ *
++ */
++struct array_cache {
++ unsigned int avail;
++ unsigned int limit;
++ unsigned int batchcount;
++ unsigned int touched;
++ spinlock_t lock;
++ void *entry[0]; /*
++ * Must have this definition in here for the proper
++ * alignment of array_cache. Also simplifies accessing
++ * the entries.
++ * [0] is for gcc 2.95. It should really be [].
++ */
++};
++
++/*
++ * bootstrap: The caches do not work without cpuarrays anymore, but the
++ * cpuarrays are allocated from the generic caches...
++ */
++#define BOOT_CPUCACHE_ENTRIES 1
++struct arraycache_init {
++ struct array_cache cache;
++ void *entries[BOOT_CPUCACHE_ENTRIES];
++};
++
++/*
++ * The slab lists for all objects.
++ */
++struct kmem_list3 {
++ struct list_head slabs_partial; /* partial list first, better asm code */
++ struct list_head slabs_full;
++ struct list_head slabs_free;
++ unsigned long free_objects;
++ unsigned int free_limit;
++ unsigned int colour_next; /* Per-node cache coloring */
++ spinlock_t list_lock;
++ struct array_cache *shared; /* shared per node */
++ struct array_cache **alien; /* on other nodes */
++ unsigned long next_reap; /* updated without locking */
++ int free_touched; /* updated without locking */
++};
++
++/*
++ * Need this for bootstrapping a per node allocator.
++ */
++#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
++struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
++#define CACHE_CACHE 0
++#define SIZE_AC 1
++#define SIZE_L3 (1 + MAX_NUMNODES)
++
++static int drain_freelist(struct kmem_cache *cache,
++ struct kmem_list3 *l3, int tofree);
++static void free_block(struct kmem_cache *cachep, void **objpp, int len,
++ int node);
++static int enable_cpucache(struct kmem_cache *cachep);
++static void cache_reap(struct work_struct *unused);
++
++/*
++ * This function must be completely optimized away if a constant is passed to
++ * it. Mostly the same as what is in linux/slab.h except it returns an index.
++ */
++static __always_inline int index_of(const size_t size)
++{
++ extern void __bad_size(void);
++
++ if (__builtin_constant_p(size)) {
++ int i = 0;
++
++#define CACHE(x) \
++ if (size <=x) \
++ return i; \
++ else \
++ i++;
++#include "linux/kmalloc_sizes.h"
++#undef CACHE
++ __bad_size();
++ } else
++ __bad_size();
++ return 0;
++}
++
++static int slab_early_init = 1;
++
++#define INDEX_AC index_of(sizeof(struct arraycache_init))
++#define INDEX_L3 index_of(sizeof(struct kmem_list3))
++
++static void kmem_list3_init(struct kmem_list3 *parent)
++{
++ INIT_LIST_HEAD(&parent->slabs_full);
++ INIT_LIST_HEAD(&parent->slabs_partial);
++ INIT_LIST_HEAD(&parent->slabs_free);
++ parent->shared = NULL;
++ parent->alien = NULL;
++ parent->colour_next = 0;
++ spin_lock_init(&parent->list_lock);
++ parent->free_objects = 0;
++ parent->free_touched = 0;
++}
++
++#define MAKE_LIST(cachep, listp, slab, nodeid) \
++ do { \
++ INIT_LIST_HEAD(listp); \
++ list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
++ } while (0)
++
++#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
++ do { \
++ MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
++ MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
++ MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
++ } while (0)
++
++/*
++ * struct kmem_cache
++ *
++ * manages a cache.
++ */
++
++struct kmem_cache {
++/* 1) per-cpu data, touched during every alloc/free */
++ struct array_cache *array[NR_CPUS];
++/* 2) Cache tunables. Protected by cache_chain_mutex */
++ unsigned int batchcount;
++ unsigned int limit;
++ unsigned int shared;
++
++ unsigned int buffer_size;
++ u32 reciprocal_buffer_size;
++/* 3) touched by every alloc & free from the backend */
++
++ unsigned int flags; /* constant flags */
++ unsigned int num; /* # of objs per slab */
++
++/* 4) cache_grow/shrink */
++ /* order of pgs per slab (2^n) */
++ unsigned int gfporder;
++
++ /* force GFP flags, e.g. GFP_DMA */
++ gfp_t gfpflags;
++
++ size_t colour; /* cache colouring range */
++ unsigned int colour_off; /* colour offset */
++ struct kmem_cache *slabp_cache;
++ unsigned int slab_size;
++ unsigned int dflags; /* dynamic flags */
++
++ /* constructor func */
++ void (*ctor) (void *, struct kmem_cache *, unsigned long);
++
++/* 5) cache creation/removal */
++ const char *name;
++ struct list_head next;
++
++/* 6) statistics */
++#if STATS
++ unsigned long num_active;
++ unsigned long num_allocations;
++ unsigned long high_mark;
++ unsigned long grown;
++ unsigned long reaped;
++ unsigned long errors;
++ unsigned long max_freeable;
++ unsigned long node_allocs;
++ unsigned long node_frees;
++ unsigned long node_overflow;
++ atomic_t allochit;
++ atomic_t allocmiss;
++ atomic_t freehit;
++ atomic_t freemiss;
++#endif
++#if DEBUG
++ /*
++ * If debugging is enabled, then the allocator can add additional
++ * fields and/or padding to every object. buffer_size contains the total
++ * object size including these internal fields, the following two
++ * variables contain the offset to the user object and its size.
++ */
++ int obj_offset;
++ int obj_size;
++#endif
++ /*
++ * We put nodelists[] at the end of kmem_cache, because we want to size
++ * this array to nr_node_ids slots instead of MAX_NUMNODES
++ * (see kmem_cache_init())
++ * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
++ * is statically defined, so we reserve the max number of nodes.
++ */
++ struct kmem_list3 *nodelists[MAX_NUMNODES];
++ /*
++ * Do not add fields after nodelists[]
++ */
++};
++
++#define CFLGS_OFF_SLAB (0x80000000UL)
++#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
++
++#define BATCHREFILL_LIMIT 16
++/*
++ * Optimization question: fewer reaps means less probability for unnessary
++ * cpucache drain/refill cycles.
++ *
++ * OTOH the cpuarrays can contain lots of objects,
++ * which could lock up otherwise freeable slabs.
++ */
++#define REAPTIMEOUT_CPUC (2*HZ)
++#define REAPTIMEOUT_LIST3 (4*HZ)
++
++#if STATS
++#define STATS_INC_ACTIVE(x) ((x)->num_active++)
++#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
++#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
++#define STATS_INC_GROWN(x) ((x)->grown++)
++#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
++#define STATS_SET_HIGH(x) \
++ do { \
++ if ((x)->num_active > (x)->high_mark) \
++ (x)->high_mark = (x)->num_active; \
++ } while (0)
++#define STATS_INC_ERR(x) ((x)->errors++)
++#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
++#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
++#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
++#define STATS_SET_FREEABLE(x, i) \
++ do { \
++ if ((x)->max_freeable < i) \
++ (x)->max_freeable = i; \
++ } while (0)
++#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
++#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
++#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
++#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
++#else
++#define STATS_INC_ACTIVE(x) do { } while (0)
++#define STATS_DEC_ACTIVE(x) do { } while (0)
++#define STATS_INC_ALLOCED(x) do { } while (0)
++#define STATS_INC_GROWN(x) do { } while (0)
++#define STATS_ADD_REAPED(x,y) do { } while (0)
++#define STATS_SET_HIGH(x) do { } while (0)
++#define STATS_INC_ERR(x) do { } while (0)
++#define STATS_INC_NODEALLOCS(x) do { } while (0)
++#define STATS_INC_NODEFREES(x) do { } while (0)
++#define STATS_INC_ACOVERFLOW(x) do { } while (0)
++#define STATS_SET_FREEABLE(x, i) do { } while (0)
++#define STATS_INC_ALLOCHIT(x) do { } while (0)
++#define STATS_INC_ALLOCMISS(x) do { } while (0)
++#define STATS_INC_FREEHIT(x) do { } while (0)
++#define STATS_INC_FREEMISS(x) do { } while (0)
++#endif
++
++#include "slab_vs.h"
++
++#if DEBUG
++
++/*
++ * memory layout of objects:
++ * 0 : objp
++ * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
++ * the end of an object is aligned with the end of the real
++ * allocation. Catches writes behind the end of the allocation.
++ * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
++ * redzone word.
++ * cachep->obj_offset: The real object.
++ * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
++ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
++ * [BYTES_PER_WORD long]
++ */
++static int obj_offset(struct kmem_cache *cachep)
++{
++ return cachep->obj_offset;
++}
++
++static int obj_size(struct kmem_cache *cachep)
++{
++ return cachep->obj_size;
++}
++
++static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
++{
++ BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
++ return (unsigned long long*) (objp + obj_offset(cachep) -
++ sizeof(unsigned long long));
++}
++
++static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
++{
++ BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
++ if (cachep->flags & SLAB_STORE_USER)
++ return (unsigned long long *)(objp + cachep->buffer_size -
++ sizeof(unsigned long long) -
++ REDZONE_ALIGN);
++ return (unsigned long long *) (objp + cachep->buffer_size -
++ sizeof(unsigned long long));
++}
++
++static void **dbg_userword(struct kmem_cache *cachep, void *objp)
++{
++ BUG_ON(!(cachep->flags & SLAB_STORE_USER));
++ return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
++}
++
++#else
++
++#define obj_offset(x) 0
++#define obj_size(cachep) (cachep->buffer_size)
++#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
++#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
++#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
++
++#endif
++
++/*
++ * Do not go above this order unless 0 objects fit into the slab.
++ */
++#define BREAK_GFP_ORDER_HI 1
++#define BREAK_GFP_ORDER_LO 0
++static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
++
++/*
++ * Functions for storing/retrieving the cachep and or slab from the page
++ * allocator. These are used to find the slab an obj belongs to. With kfree(),
++ * these are used to find the cache which an obj belongs to.
++ */
++static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
++{
++ page->lru.next = (struct list_head *)cache;
++}
++
++static inline struct kmem_cache *page_get_cache(struct page *page)
++{
++ page = compound_head(page);
++ BUG_ON(!PageSlab(page));
++ return (struct kmem_cache *)page->lru.next;
++}
++
++static inline void page_set_slab(struct page *page, struct slab *slab)
++{
++ page->lru.prev = (struct list_head *)slab;
++}
++
++static inline struct slab *page_get_slab(struct page *page)
++{
++ BUG_ON(!PageSlab(page));
++ return (struct slab *)page->lru.prev;
++}
++
++static inline struct kmem_cache *virt_to_cache(const void *obj)
++{
++ struct page *page = virt_to_head_page(obj);
++ return page_get_cache(page);
++}
++
++static inline struct slab *virt_to_slab(const void *obj)
++{
++ struct page *page = virt_to_head_page(obj);
++ return page_get_slab(page);
++}
++
++static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
++ unsigned int idx)
++{
++ return slab->s_mem + cache->buffer_size * idx;
++}
++
++/*
++ * We want to avoid an expensive divide : (offset / cache->buffer_size)
++ * Using the fact that buffer_size is a constant for a particular cache,
++ * we can replace (offset / cache->buffer_size) by
++ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
++ */
++static inline unsigned int obj_to_index(const struct kmem_cache *cache,
++ const struct slab *slab, void *obj)
++{
++ u32 offset = (obj - slab->s_mem);
++ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
++}
++
++/*
++ * These are the default caches for kmalloc. Custom caches can have other sizes.
++ */
++struct cache_sizes malloc_sizes[] = {
++#define CACHE(x) { .cs_size = (x) },
++#include <linux/kmalloc_sizes.h>
++ CACHE(ULONG_MAX)
++#undef CACHE
++};
++EXPORT_SYMBOL(malloc_sizes);
++
++/* Must match cache_sizes above. Out of line to keep cache footprint low. */
++struct cache_names {
++ char *name;
++ char *name_dma;
++};
++
++static struct cache_names __initdata cache_names[] = {
++#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
++#include <linux/kmalloc_sizes.h>
++ {NULL,}
++#undef CACHE
++};
++
++static struct arraycache_init initarray_cache __initdata =
++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
++static struct arraycache_init initarray_generic =
++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
++
++/* internal cache of cache description objs */
++static struct kmem_cache cache_cache = {
++ .batchcount = 1,
++ .limit = BOOT_CPUCACHE_ENTRIES,
++ .shared = 1,
++ .buffer_size = sizeof(struct kmem_cache),
++ .name = "kmem_cache",
++};
++
++#define BAD_ALIEN_MAGIC 0x01020304ul
++
++#ifdef CONFIG_LOCKDEP
++
++/*
++ * Slab sometimes uses the kmalloc slabs to store the slab headers
++ * for other slabs "off slab".
++ * The locking for this is tricky in that it nests within the locks
++ * of all other slabs in a few places; to deal with this special
++ * locking we put on-slab caches into a separate lock-class.
++ *
++ * We set lock class for alien array caches which are up during init.
++ * The lock annotation will be lost if all cpus of a node goes down and
++ * then comes back up during hotplug
++ */
++static struct lock_class_key on_slab_l3_key;
++static struct lock_class_key on_slab_alc_key;
++
++static inline void init_lock_keys(void)
++
++{
++ int q;
++ struct cache_sizes *s = malloc_sizes;
++
++ while (s->cs_size != ULONG_MAX) {
++ for_each_node(q) {
++ struct array_cache **alc;
++ int r;
++ struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
++ if (!l3 || OFF_SLAB(s->cs_cachep))
++ continue;
++ lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
++ alc = l3->alien;
++ /*
++ * FIXME: This check for BAD_ALIEN_MAGIC
++ * should go away when common slab code is taught to
++ * work even without alien caches.
++ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
++ * for alloc_alien_cache,
++ */
++ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
++ continue;
++ for_each_node(r) {
++ if (alc[r])
++ lockdep_set_class(&alc[r]->lock,
++ &on_slab_alc_key);
++ }
++ }
++ s++;
++ }
++}
++#else
++static inline void init_lock_keys(void)
++{
++}
++#endif
++
++/*
++ * 1. Guard access to the cache-chain.
++ * 2. Protect sanity of cpu_online_map against cpu hotplug events
++ */
++static DEFINE_MUTEX(cache_chain_mutex);
++static struct list_head cache_chain;
++
++/*
++ * chicken and egg problem: delay the per-cpu array allocation
++ * until the general caches are up.
++ */
++static enum {
++ NONE,
++ PARTIAL_AC,
++ PARTIAL_L3,
++ FULL
++} g_cpucache_up;
++
++/*
++ * used by boot code to determine if it can use slab based allocator
++ */
++int slab_is_available(void)
++{
++ return g_cpucache_up == FULL;
++}
++
++static DEFINE_PER_CPU(struct delayed_work, reap_work);
++
++static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
++{
++ return cachep->array[smp_processor_id()];
++}
++
++static inline struct kmem_cache *__find_general_cachep(size_t size,
++ gfp_t gfpflags)
++{
++ struct cache_sizes *csizep = malloc_sizes;
++
++#if DEBUG
++ /* This happens if someone tries to call
++ * kmem_cache_create(), or __kmalloc(), before
++ * the generic caches are initialized.
++ */
++ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
++#endif
++ while (size > csizep->cs_size)
++ csizep++;
++
++ /*
++ * Really subtle: The last entry with cs->cs_size==ULONG_MAX
++ * has cs_{dma,}cachep==NULL. Thus no special case
++ * for large kmalloc calls required.
++ */
++#ifdef CONFIG_ZONE_DMA
++ if (unlikely(gfpflags & GFP_DMA))
++ return csizep->cs_dmacachep;
++#endif
++ return csizep->cs_cachep;
++}
++
++static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
++{
++ return __find_general_cachep(size, gfpflags);
++}
++
++static size_t slab_mgmt_size(size_t nr_objs, size_t align)
++{
++ return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
++}
++
++/*
++ * Calculate the number of objects and left-over bytes for a given buffer size.
++ */
++static void cache_estimate(unsigned long gfporder, size_t buffer_size,
++ size_t align, int flags, size_t *left_over,
++ unsigned int *num)
++{
++ int nr_objs;
++ size_t mgmt_size;
++ size_t slab_size = PAGE_SIZE << gfporder;
++
++ /*
++ * The slab management structure can be either off the slab or
++ * on it. For the latter case, the memory allocated for a
++ * slab is used for:
++ *
++ * - The struct slab
++ * - One kmem_bufctl_t for each object
++ * - Padding to respect alignment of @align
++ * - @buffer_size bytes for each object
++ *
++ * If the slab management structure is off the slab, then the
++ * alignment will already be calculated into the size. Because
++ * the slabs are all pages aligned, the objects will be at the
++ * correct alignment when allocated.
++ */
++ if (flags & CFLGS_OFF_SLAB) {
++ mgmt_size = 0;
++ nr_objs = slab_size / buffer_size;
++
++ if (nr_objs > SLAB_LIMIT)
++ nr_objs = SLAB_LIMIT;
++ } else {
++ /*
++ * Ignore padding for the initial guess. The padding
++ * is at most @align-1 bytes, and @buffer_size is at
++ * least @align. In the worst case, this result will
++ * be one greater than the number of objects that fit
++ * into the memory allocation when taking the padding
++ * into account.
++ */
++ nr_objs = (slab_size - sizeof(struct slab)) /
++ (buffer_size + sizeof(kmem_bufctl_t));
++
++ /*
++ * This calculated number will be either the right
++ * amount, or one greater than what we want.
++ */
++ if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
++ > slab_size)
++ nr_objs--;
++
++ if (nr_objs > SLAB_LIMIT)
++ nr_objs = SLAB_LIMIT;
++
++ mgmt_size = slab_mgmt_size(nr_objs, align);
++ }
++ *num = nr_objs;
++ *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
++}
++
++#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
++
++static void __slab_error(const char *function, struct kmem_cache *cachep,
++ char *msg)
++{
++ printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
++ function, cachep->name, msg);
++ dump_stack();
++}
++
++/*
++ * By default on NUMA we use alien caches to stage the freeing of
++ * objects allocated from other nodes. This causes massive memory
++ * inefficiencies when using fake NUMA setup to split memory into a
++ * large number of small nodes, so it can be disabled on the command
++ * line
++ */
++
++static int use_alien_caches __read_mostly = 1;
++static int __init noaliencache_setup(char *s)
++{
++ use_alien_caches = 0;
++ return 1;
++}
++__setup("noaliencache", noaliencache_setup);
++
++#ifdef CONFIG_NUMA
++/*
++ * Special reaping functions for NUMA systems called from cache_reap().
++ * These take care of doing round robin flushing of alien caches (containing
++ * objects freed on different nodes from which they were allocated) and the
++ * flushing of remote pcps by calling drain_node_pages.
++ */
++static DEFINE_PER_CPU(unsigned long, reap_node);
++
++static void init_reap_node(int cpu)
++{
++ int node;
++
++ node = next_node(cpu_to_node(cpu), node_online_map);
++ if (node == MAX_NUMNODES)
++ node = first_node(node_online_map);
++
++ per_cpu(reap_node, cpu) = node;
++}
++
++static void next_reap_node(void)
++{
++ int node = __get_cpu_var(reap_node);
++
++ node = next_node(node, node_online_map);
++ if (unlikely(node >= MAX_NUMNODES))
++ node = first_node(node_online_map);
++ __get_cpu_var(reap_node) = node;
++}
++
++#else
++#define init_reap_node(cpu) do { } while (0)
++#define next_reap_node(void) do { } while (0)
++#endif
++
++/*
++ * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
++ * via the workqueue/eventd.
++ * Add the CPU number into the expiration time to minimize the possibility of
++ * the CPUs getting into lockstep and contending for the global cache chain
++ * lock.
++ */
++static void __devinit start_cpu_timer(int cpu)
++{
++ struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
++
++ /*
++ * When this gets called from do_initcalls via cpucache_init(),
++ * init_workqueues() has already run, so keventd will be setup
++ * at that time.
++ */
++ if (keventd_up() && reap_work->work.func == NULL) {
++ init_reap_node(cpu);
++ INIT_DELAYED_WORK(reap_work, cache_reap);
++ schedule_delayed_work_on(cpu, reap_work,
++ __round_jiffies_relative(HZ, cpu));
++ }
++}
++
++static struct array_cache *alloc_arraycache(int node, int entries,
++ int batchcount)
++{
++ int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
++ struct array_cache *nc = NULL;
++
++ nc = kmalloc_node(memsize, GFP_KERNEL, node);
++ if (nc) {
++ nc->avail = 0;
++ nc->limit = entries;
++ nc->batchcount = batchcount;
++ nc->touched = 0;
++ spin_lock_init(&nc->lock);
++ }
++ return nc;
++}
++
++/*
++ * Transfer objects in one arraycache to another.
++ * Locking must be handled by the caller.
++ *
++ * Return the number of entries transferred.
++ */
++static int transfer_objects(struct array_cache *to,
++ struct array_cache *from, unsigned int max)
++{
++ /* Figure out how many entries to transfer */
++ int nr = min(min(from->avail, max), to->limit - to->avail);
++
++ if (!nr)
++ return 0;
++
++ memcpy(to->entry + to->avail, from->entry + from->avail -nr,
++ sizeof(void *) *nr);
++
++ from->avail -= nr;
++ to->avail += nr;
++ to->touched = 1;
++ return nr;
++}
++
++#ifndef CONFIG_NUMA
++
++#define drain_alien_cache(cachep, alien) do { } while (0)
++#define reap_alien(cachep, l3) do { } while (0)
++
++static inline struct array_cache **alloc_alien_cache(int node, int limit)
++{
++ return (struct array_cache **)BAD_ALIEN_MAGIC;
++}
++
++static inline void free_alien_cache(struct array_cache **ac_ptr)
++{
++}
++
++static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
++{
++ return 0;
++}
++
++static inline void *alternate_node_alloc(struct kmem_cache *cachep,
++ gfp_t flags)
++{
++ return NULL;
++}
++
++static inline void *____cache_alloc_node(struct kmem_cache *cachep,
++ gfp_t flags, int nodeid)
++{
++ return NULL;
++}
++
++#else /* CONFIG_NUMA */
++
++static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
++static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
++
++static struct array_cache **alloc_alien_cache(int node, int limit)
++{
++ struct array_cache **ac_ptr;
++ int memsize = sizeof(void *) * nr_node_ids;
++ int i;
++
++ if (limit > 1)
++ limit = 12;
++ ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
++ if (ac_ptr) {
++ for_each_node(i) {
++ if (i == node || !node_online(i)) {
++ ac_ptr[i] = NULL;
++ continue;
++ }
++ ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
++ if (!ac_ptr[i]) {
++ for (i--; i <= 0; i--)
++ kfree(ac_ptr[i]);
++ kfree(ac_ptr);
++ return NULL;
++ }
++ }
++ }
++ return ac_ptr;
++}
++
++static void free_alien_cache(struct array_cache **ac_ptr)
++{
++ int i;
++
++ if (!ac_ptr)
++ return;
++ for_each_node(i)
++ kfree(ac_ptr[i]);
++ kfree(ac_ptr);
++}
++
++static void __drain_alien_cache(struct kmem_cache *cachep,
++ struct array_cache *ac, int node)
++{
++ struct kmem_list3 *rl3 = cachep->nodelists[node];
++
++ if (ac->avail) {
++ spin_lock(&rl3->list_lock);
++ /*
++ * Stuff objects into the remote nodes shared array first.
++ * That way we could avoid the overhead of putting the objects
++ * into the free lists and getting them back later.
++ */
++ if (rl3->shared)
++ transfer_objects(rl3->shared, ac, ac->limit);
++
++ free_block(cachep, ac->entry, ac->avail, node);
++ ac->avail = 0;
++ spin_unlock(&rl3->list_lock);
++ }
++}
++
++/*
++ * Called from cache_reap() to regularly drain alien caches round robin.
++ */
++static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
++{
++ int node = __get_cpu_var(reap_node);
++
++ if (l3->alien) {
++ struct array_cache *ac = l3->alien[node];
++
++ if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
++ __drain_alien_cache(cachep, ac, node);
++ spin_unlock_irq(&ac->lock);
++ }
++ }
++}
++
++static void drain_alien_cache(struct kmem_cache *cachep,
++ struct array_cache **alien)
++{
++ int i = 0;
++ struct array_cache *ac;
++ unsigned long flags;
++
++ for_each_online_node(i) {
++ ac = alien[i];
++ if (ac) {
++ spin_lock_irqsave(&ac->lock, flags);
++ __drain_alien_cache(cachep, ac, i);
++ spin_unlock_irqrestore(&ac->lock, flags);
++ }
++ }
++}
++
++static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
++{
++ struct slab *slabp = virt_to_slab(objp);
++ int nodeid = slabp->nodeid;
++ struct kmem_list3 *l3;
++ struct array_cache *alien = NULL;
++ int node;
++
++ node = numa_node_id();
++
++ /*
++ * Make sure we are not freeing a object from another node to the array
++ * cache on this cpu.
++ */
++ if (likely(slabp->nodeid == node))
++ return 0;
++
++ l3 = cachep->nodelists[node];
++ STATS_INC_NODEFREES(cachep);
++ if (l3->alien && l3->alien[nodeid]) {
++ alien = l3->alien[nodeid];
++ spin_lock(&alien->lock);
++ if (unlikely(alien->avail == alien->limit)) {
++ STATS_INC_ACOVERFLOW(cachep);
++ __drain_alien_cache(cachep, alien, nodeid);
++ }
++ alien->entry[alien->avail++] = objp;
++ spin_unlock(&alien->lock);
++ } else {
++ spin_lock(&(cachep->nodelists[nodeid])->list_lock);
++ free_block(cachep, &objp, 1, nodeid);
++ spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
++ }
++ return 1;
++}
++#endif
++
++static int __cpuinit cpuup_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ long cpu = (long)hcpu;
++ struct kmem_cache *cachep;
++ struct kmem_list3 *l3 = NULL;
++ int node = cpu_to_node(cpu);
++ int memsize = sizeof(struct kmem_list3);
++
++ switch (action) {
++ case CPU_LOCK_ACQUIRE:
++ mutex_lock(&cache_chain_mutex);
++ break;
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ /*
++ * We need to do this right in the beginning since
++ * alloc_arraycache's are going to use this list.
++ * kmalloc_node allows us to add the slab to the right
++ * kmem_list3 and not this cpu's kmem_list3
++ */
++
++ list_for_each_entry(cachep, &cache_chain, next) {
++ /*
++ * Set up the size64 kmemlist for cpu before we can
++ * begin anything. Make sure some other cpu on this
++ * node has not already allocated this
++ */
++ if (!cachep->nodelists[node]) {
++ l3 = kmalloc_node(memsize, GFP_KERNEL, node);
++ if (!l3)
++ goto bad;
++ kmem_list3_init(l3);
++ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++
++ /*
++ * The l3s don't come and go as CPUs come and
++ * go. cache_chain_mutex is sufficient
++ * protection here.
++ */
++ cachep->nodelists[node] = l3;
++ }
++
++ spin_lock_irq(&cachep->nodelists[node]->list_lock);
++ cachep->nodelists[node]->free_limit =
++ (1 + nr_cpus_node(node)) *
++ cachep->batchcount + cachep->num;
++ spin_unlock_irq(&cachep->nodelists[node]->list_lock);
++ }
++
++ /*
++ * Now we can go ahead with allocating the shared arrays and
++ * array caches
++ */
++ list_for_each_entry(cachep, &cache_chain, next) {
++ struct array_cache *nc;
++ struct array_cache *shared = NULL;
++ struct array_cache **alien = NULL;
++
++ nc = alloc_arraycache(node, cachep->limit,
++ cachep->batchcount);
++ if (!nc)
++ goto bad;
++ if (cachep->shared) {
++ shared = alloc_arraycache(node,
++ cachep->shared * cachep->batchcount,
++ 0xbaadf00d);
++ if (!shared)
++ goto bad;
++ }
++ if (use_alien_caches) {
++ alien = alloc_alien_cache(node, cachep->limit);
++ if (!alien)
++ goto bad;
++ }
++ cachep->array[cpu] = nc;
++ l3 = cachep->nodelists[node];
++ BUG_ON(!l3);
++
++ spin_lock_irq(&l3->list_lock);
++ if (!l3->shared) {
++ /*
++ * We are serialised from CPU_DEAD or
++ * CPU_UP_CANCELLED by the cpucontrol lock
++ */
++ l3->shared = shared;
++ shared = NULL;
++ }
++#ifdef CONFIG_NUMA
++ if (!l3->alien) {
++ l3->alien = alien;
++ alien = NULL;
++ }
++#endif
++ spin_unlock_irq(&l3->list_lock);
++ kfree(shared);
++ free_alien_cache(alien);
++ }
++ break;
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ start_cpu_timer(cpu);
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ /*
++ * Shutdown cache reaper. Note that the cache_chain_mutex is
++ * held so that if cache_reap() is invoked it cannot do
++ * anything expensive but will only modify reap_work
++ * and reschedule the timer.
++ */
++ cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
++ /* Now the cache_reaper is guaranteed to be not running. */
++ per_cpu(reap_work, cpu).work.func = NULL;
++ break;
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
++ start_cpu_timer(cpu);
++ break;
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ /*
++ * Even if all the cpus of a node are down, we don't free the
++ * kmem_list3 of any cache. This to avoid a race between
++ * cpu_down, and a kmalloc allocation from another cpu for
++ * memory from the node of the cpu going down. The list3
++ * structure is usually allocated from kmem_cache_create() and
++ * gets destroyed at kmem_cache_destroy().
++ */
++ /* fall thru */
++#endif
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ list_for_each_entry(cachep, &cache_chain, next) {
++ struct array_cache *nc;
++ struct array_cache *shared;
++ struct array_cache **alien;
++ cpumask_t mask;
++
++ mask = node_to_cpumask(node);
++ /* cpu is dead; no one can alloc from it. */
++ nc = cachep->array[cpu];
++ cachep->array[cpu] = NULL;
++ l3 = cachep->nodelists[node];
++
++ if (!l3)
++ goto free_array_cache;
++
++ spin_lock_irq(&l3->list_lock);
++
++ /* Free limit for this kmem_list3 */
++ l3->free_limit -= cachep->batchcount;
++ if (nc)
++ free_block(cachep, nc->entry, nc->avail, node);
++
++ if (!cpus_empty(mask)) {
++ spin_unlock_irq(&l3->list_lock);
++ goto free_array_cache;
++ }
++
++ shared = l3->shared;
++ if (shared) {
++ free_block(cachep, shared->entry,
++ shared->avail, node);
++ l3->shared = NULL;
++ }
++
++ alien = l3->alien;
++ l3->alien = NULL;
++
++ spin_unlock_irq(&l3->list_lock);
++
++ kfree(shared);
++ if (alien) {
++ drain_alien_cache(cachep, alien);
++ free_alien_cache(alien);
++ }
++free_array_cache:
++ kfree(nc);
++ }
++ /*
++ * In the previous loop, all the objects were freed to
++ * the respective cache's slabs, now we can go ahead and
++ * shrink each nodelist to its limit.
++ */
++ list_for_each_entry(cachep, &cache_chain, next) {
++ l3 = cachep->nodelists[node];
++ if (!l3)
++ continue;
++ drain_freelist(cachep, l3, l3->free_objects);
++ }
++ break;
++ case CPU_LOCK_RELEASE:
++ mutex_unlock(&cache_chain_mutex);
++ break;
++ }
++ return NOTIFY_OK;
++bad:
++ return NOTIFY_BAD;
++}
++
++static struct notifier_block __cpuinitdata cpucache_notifier = {
++ &cpuup_callback, NULL, 0
++};
++
++/*
++ * swap the static kmem_list3 with kmalloced memory
++ */
++static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
++ int nodeid)
++{
++ struct kmem_list3 *ptr;
++
++ ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
++ BUG_ON(!ptr);
++
++ local_irq_disable();
++ memcpy(ptr, list, sizeof(struct kmem_list3));
++ /*
++ * Do not assume that spinlocks can be initialized via memcpy:
++ */
++ spin_lock_init(&ptr->list_lock);
++
++ MAKE_ALL_LISTS(cachep, ptr, nodeid);
++ cachep->nodelists[nodeid] = ptr;
++ local_irq_enable();
++}
++
++/*
++ * Initialisation. Called after the page allocator have been initialised and
++ * before smp_init().
++ */
++void __init kmem_cache_init(void)
++{
++ size_t left_over;
++ struct cache_sizes *sizes;
++ struct cache_names *names;
++ int i;
++ int order;
++ int node;
++
++ if (num_possible_nodes() == 1)
++ use_alien_caches = 0;
++
++ for (i = 0; i < NUM_INIT_LISTS; i++) {
++ kmem_list3_init(&initkmem_list3[i]);
++ if (i < MAX_NUMNODES)
++ cache_cache.nodelists[i] = NULL;
++ }
++
++ /*
++ * Fragmentation resistance on low memory - only use bigger
++ * page orders on machines with more than 32MB of memory.
++ */
++ if (num_physpages > (32 << 20) >> PAGE_SHIFT)
++ slab_break_gfp_order = BREAK_GFP_ORDER_HI;
++
++ /* Bootstrap is tricky, because several objects are allocated
++ * from caches that do not exist yet:
++ * 1) initialize the cache_cache cache: it contains the struct
++ * kmem_cache structures of all caches, except cache_cache itself:
++ * cache_cache is statically allocated.
++ * Initially an __init data area is used for the head array and the
++ * kmem_list3 structures, it's replaced with a kmalloc allocated
++ * array at the end of the bootstrap.
++ * 2) Create the first kmalloc cache.
++ * The struct kmem_cache for the new cache is allocated normally.
++ * An __init data area is used for the head array.
++ * 3) Create the remaining kmalloc caches, with minimally sized
++ * head arrays.
++ * 4) Replace the __init data head arrays for cache_cache and the first
++ * kmalloc cache with kmalloc allocated arrays.
++ * 5) Replace the __init data for kmem_list3 for cache_cache and
++ * the other cache's with kmalloc allocated memory.
++ * 6) Resize the head arrays of the kmalloc caches to their final sizes.
++ */
++
++ node = numa_node_id();
++
++ /* 1) create the cache_cache */
++ INIT_LIST_HEAD(&cache_chain);
++ list_add(&cache_cache.next, &cache_chain);
++ cache_cache.colour_off = cache_line_size();
++ cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
++ cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
++
++ /*
++ * struct kmem_cache size depends on nr_node_ids, which
++ * can be less than MAX_NUMNODES.
++ */
++ cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
++ nr_node_ids * sizeof(struct kmem_list3 *);
++#if DEBUG
++ cache_cache.obj_size = cache_cache.buffer_size;
++#endif
++ cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
++ cache_line_size());
++ cache_cache.reciprocal_buffer_size =
++ reciprocal_value(cache_cache.buffer_size);
++
++ for (order = 0; order < MAX_ORDER; order++) {
++ cache_estimate(order, cache_cache.buffer_size,
++ cache_line_size(), 0, &left_over, &cache_cache.num);
++ if (cache_cache.num)
++ break;
++ }
++ BUG_ON(!cache_cache.num);
++ cache_cache.gfporder = order;
++ cache_cache.colour = left_over / cache_cache.colour_off;
++ cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
++ sizeof(struct slab), cache_line_size());
++
++ /* 2+3) create the kmalloc caches */
++ sizes = malloc_sizes;
++ names = cache_names;
++
++ /*
++ * Initialize the caches that provide memory for the array cache and the
++ * kmem_list3 structures first. Without this, further allocations will
++ * bug.
++ */
++
++ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
++ sizes[INDEX_AC].cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ NULL, NULL);
++
++ if (INDEX_AC != INDEX_L3) {
++ sizes[INDEX_L3].cs_cachep =
++ kmem_cache_create(names[INDEX_L3].name,
++ sizes[INDEX_L3].cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ NULL, NULL);
++ }
++
++ slab_early_init = 0;
++
++ while (sizes->cs_size != ULONG_MAX) {
++ /*
++ * For performance, all the general caches are L1 aligned.
++ * This should be particularly beneficial on SMP boxes, as it
++ * eliminates "false sharing".
++ * Note for systems short on memory removing the alignment will
++ * allow tighter packing of the smaller caches.
++ */
++ if (!sizes->cs_cachep) {
++ sizes->cs_cachep = kmem_cache_create(names->name,
++ sizes->cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ NULL, NULL);
++ }
++#ifdef CONFIG_ZONE_DMA
++ sizes->cs_dmacachep = kmem_cache_create(
++ names->name_dma,
++ sizes->cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
++ SLAB_PANIC,
++ NULL, NULL);
++#endif
++ sizes++;
++ names++;
++ }
++ /* 4) Replace the bootstrap head arrays */
++ {
++ struct array_cache *ptr;
++
++ ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
++
++ local_irq_disable();
++ BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
++ memcpy(ptr, cpu_cache_get(&cache_cache),
++ sizeof(struct arraycache_init));
++ /*
++ * Do not assume that spinlocks can be initialized via memcpy:
++ */
++ spin_lock_init(&ptr->lock);
++
++ cache_cache.array[smp_processor_id()] = ptr;
++ local_irq_enable();
++
++ ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
++
++ local_irq_disable();
++ BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
++ != &initarray_generic.cache);
++ memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
++ sizeof(struct arraycache_init));
++ /*
++ * Do not assume that spinlocks can be initialized via memcpy:
++ */
++ spin_lock_init(&ptr->lock);
++
++ malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
++ ptr;
++ local_irq_enable();
++ }
++ /* 5) Replace the bootstrap kmem_list3's */
++ {
++ int nid;
++
++ /* Replace the static kmem_list3 structures for the boot cpu */
++ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
++
++ for_each_online_node(nid) {
++ init_list(malloc_sizes[INDEX_AC].cs_cachep,
++ &initkmem_list3[SIZE_AC + nid], nid);
++
++ if (INDEX_AC != INDEX_L3) {
++ init_list(malloc_sizes[INDEX_L3].cs_cachep,
++ &initkmem_list3[SIZE_L3 + nid], nid);
++ }
++ }
++ }
++
++ /* 6) resize the head arrays to their final sizes */
++ {
++ struct kmem_cache *cachep;
++ mutex_lock(&cache_chain_mutex);
++ list_for_each_entry(cachep, &cache_chain, next)
++ if (enable_cpucache(cachep))
++ BUG();
++ mutex_unlock(&cache_chain_mutex);
++ }
++
++ /* Annotate slab for lockdep -- annotate the malloc caches */
++ init_lock_keys();
++
++
++ /* Done! */
++ g_cpucache_up = FULL;
++
++ /*
++ * Register a cpu startup notifier callback that initializes
++ * cpu_cache_get for all new cpus
++ */
++ register_cpu_notifier(&cpucache_notifier);
++
++ /*
++ * The reap timers are started later, with a module init call: That part
++ * of the kernel is not yet operational.
++ */
++}
++
++static int __init cpucache_init(void)
++{
++ int cpu;
++
++ /*
++ * Register the timers that return unneeded pages to the page allocator
++ */
++ for_each_online_cpu(cpu)
++ start_cpu_timer(cpu);
++ return 0;
++}
++__initcall(cpucache_init);
++
++/*
++ * Interface to system's page allocator. No need to hold the cache-lock.
++ *
++ * If we requested dmaable memory, we will get it. Even if we
++ * did not request dmaable memory, we might get it, but that
++ * would be relatively rare and ignorable.
++ */
++static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
++{
++ struct page *page;
++ int nr_pages;
++ int i;
++
++#ifndef CONFIG_MMU
++ /*
++ * Nommu uses slab's for process anonymous memory allocations, and thus
++ * requires __GFP_COMP to properly refcount higher order allocations
++ */
++ flags |= __GFP_COMP;
++#endif
++
++ flags |= cachep->gfpflags;
++
++ page = alloc_pages_node(nodeid, flags, cachep->gfporder);
++ if (!page)
++ return NULL;
++
++ nr_pages = (1 << cachep->gfporder);
++ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
++ add_zone_page_state(page_zone(page),
++ NR_SLAB_RECLAIMABLE, nr_pages);
++ else
++ add_zone_page_state(page_zone(page),
++ NR_SLAB_UNRECLAIMABLE, nr_pages);
++ for (i = 0; i < nr_pages; i++)
++ __SetPageSlab(page + i);
++ return page_address(page);
++}
++
++/*
++ * Interface to system's page release.
++ */
++static void kmem_freepages(struct kmem_cache *cachep, void *addr)
++{
++ unsigned long i = (1 << cachep->gfporder);
++ struct page *page = virt_to_page(addr);
++ const unsigned long nr_freed = i;
++
++ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
++ sub_zone_page_state(page_zone(page),
++ NR_SLAB_RECLAIMABLE, nr_freed);
++ else
++ sub_zone_page_state(page_zone(page),
++ NR_SLAB_UNRECLAIMABLE, nr_freed);
++ while (i--) {
++ BUG_ON(!PageSlab(page));
++ __ClearPageSlab(page);
++ page++;
++ }
++ if (current->reclaim_state)
++ current->reclaim_state->reclaimed_slab += nr_freed;
++ free_pages((unsigned long)addr, cachep->gfporder);
++}
++
++static void kmem_rcu_free(struct rcu_head *head)
++{
++ struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
++ struct kmem_cache *cachep = slab_rcu->cachep;
++
++ kmem_freepages(cachep, slab_rcu->addr);
++ if (OFF_SLAB(cachep))
++ kmem_cache_free(cachep->slabp_cache, slab_rcu);
++}
++
++#if DEBUG
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
++ unsigned long caller)
++{
++ int size = obj_size(cachep);
++
++ addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
++
++ if (size < 5 * sizeof(unsigned long))
++ return;
++
++ *addr++ = 0x12345678;
++ *addr++ = caller;
++ *addr++ = smp_processor_id();
++ size -= 3 * sizeof(unsigned long);
++ {
++ unsigned long *sptr = &caller;
++ unsigned long svalue;
++
++ while (!kstack_end(sptr)) {
++ svalue = *sptr++;
++ if (kernel_text_address(svalue)) {
++ *addr++ = svalue;
++ size -= sizeof(unsigned long);
++ if (size <= sizeof(unsigned long))
++ break;
++ }
++ }
++
++ }
++ *addr++ = 0x87654321;
++}
++#endif
++
++static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
++{
++ int size = obj_size(cachep);
++ addr = &((char *)addr)[obj_offset(cachep)];
++
++ memset(addr, val, size);
++ *(unsigned char *)(addr + size - 1) = POISON_END;
++}
++
++static void dump_line(char *data, int offset, int limit)
++{
++ int i;
++ unsigned char error = 0;
++ int bad_count = 0;
++
++ printk(KERN_ERR "%03x:", offset);
++ for (i = 0; i < limit; i++) {
++ if (data[offset + i] != POISON_FREE) {
++ error = data[offset + i];
++ bad_count++;
++ }
++ printk(" %02x", (unsigned char)data[offset + i]);
++ }
++ printk("\n");
++
++ if (bad_count == 1) {
++ error ^= POISON_FREE;
++ if (!(error & (error - 1))) {
++ printk(KERN_ERR "Single bit error detected. Probably "
++ "bad RAM.\n");
++#ifdef CONFIG_X86
++ printk(KERN_ERR "Run memtest86+ or a similar memory "
++ "test tool.\n");
++#else
++ printk(KERN_ERR "Run a memory test tool.\n");
++#endif
++ }
++ }
++}
++#endif
++
++#if DEBUG
++
++static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
++{
++ int i, size;
++ char *realobj;
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
++ *dbg_redzone1(cachep, objp),
++ *dbg_redzone2(cachep, objp));
++ }
++
++ if (cachep->flags & SLAB_STORE_USER) {
++ printk(KERN_ERR "Last user: [<%p>]",
++ *dbg_userword(cachep, objp));
++ print_symbol("(%s)",
++ (unsigned long)*dbg_userword(cachep, objp));
++ printk("\n");
++ }
++ realobj = (char *)objp + obj_offset(cachep);
++ size = obj_size(cachep);
++ for (i = 0; i < size && lines; i += 16, lines--) {
++ int limit;
++ limit = 16;
++ if (i + limit > size)
++ limit = size - i;
++ dump_line(realobj, i, limit);
++ }
++}
++
++static void check_poison_obj(struct kmem_cache *cachep, void *objp)
++{
++ char *realobj;
++ int size, i;
++ int lines = 0;
++
++ realobj = (char *)objp + obj_offset(cachep);
++ size = obj_size(cachep);
++
++ for (i = 0; i < size; i++) {
++ char exp = POISON_FREE;
++ if (i == size - 1)
++ exp = POISON_END;
++ if (realobj[i] != exp) {
++ int limit;
++ /* Mismatch ! */
++ /* Print header */
++ if (lines == 0) {
++ printk(KERN_ERR
++ "Slab corruption: %s start=%p, len=%d\n",
++ cachep->name, realobj, size);
++ print_objinfo(cachep, objp, 0);
++ }
++ /* Hexdump the affected line */
++ i = (i / 16) * 16;
++ limit = 16;
++ if (i + limit > size)
++ limit = size - i;
++ dump_line(realobj, i, limit);
++ i += 16;
++ lines++;
++ /* Limit to 5 lines */
++ if (lines > 5)
++ break;
++ }
++ }
++ if (lines != 0) {
++ /* Print some data about the neighboring objects, if they
++ * exist:
++ */
++ struct slab *slabp = virt_to_slab(objp);
++ unsigned int objnr;
++
++ objnr = obj_to_index(cachep, slabp, objp);
++ if (objnr) {
++ objp = index_to_obj(cachep, slabp, objnr - 1);
++ realobj = (char *)objp + obj_offset(cachep);
++ printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
++ realobj, size);
++ print_objinfo(cachep, objp, 2);
++ }
++ if (objnr + 1 < cachep->num) {
++ objp = index_to_obj(cachep, slabp, objnr + 1);
++ realobj = (char *)objp + obj_offset(cachep);
++ printk(KERN_ERR "Next obj: start=%p, len=%d\n",
++ realobj, size);
++ print_objinfo(cachep, objp, 2);
++ }
++ }
++}
++#endif
++
++#if DEBUG
++/**
++ * slab_destroy_objs - destroy a slab and its objects
++ * @cachep: cache pointer being destroyed
++ * @slabp: slab pointer being destroyed
++ *
++ * Call the registered destructor for each object in a slab that is being
++ * destroyed.
++ */
++static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
++{
++ int i;
++ for (i = 0; i < cachep->num; i++) {
++ void *objp = index_to_obj(cachep, slabp, i);
++
++ if (cachep->flags & SLAB_POISON) {
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (cachep->buffer_size % PAGE_SIZE == 0 &&
++ OFF_SLAB(cachep))
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 1);
++ else
++ check_poison_obj(cachep, objp);
++#else
++ check_poison_obj(cachep, objp);
++#endif
++ }
++ if (cachep->flags & SLAB_RED_ZONE) {
++ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "start of a freed object "
++ "was overwritten");
++ if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "end of a freed object "
++ "was overwritten");
++ }
++ }
++}
++#else
++static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
++{
++}
++#endif
++
++/**
++ * slab_destroy - destroy and release all objects in a slab
++ * @cachep: cache pointer being destroyed
++ * @slabp: slab pointer being destroyed
++ *
++ * Destroy all the objs in a slab, and release the mem back to the system.
++ * Before calling the slab must have been unlinked from the cache. The
++ * cache-lock is not held/needed.
++ */
++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
++{
++ void *addr = slabp->s_mem - slabp->colouroff;
++
++ slab_destroy_objs(cachep, slabp);
++ if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
++ struct slab_rcu *slab_rcu;
++
++ slab_rcu = (struct slab_rcu *)slabp;
++ slab_rcu->cachep = cachep;
++ slab_rcu->addr = addr;
++ call_rcu(&slab_rcu->head, kmem_rcu_free);
++ } else {
++ kmem_freepages(cachep, addr);
++ if (OFF_SLAB(cachep))
++ kmem_cache_free(cachep->slabp_cache, slabp);
++ }
++}
++
++/*
++ * For setting up all the kmem_list3s for cache whose buffer_size is same as
++ * size of kmem_list3.
++ */
++static void __init set_up_list3s(struct kmem_cache *cachep, int index)
++{
++ int node;
++
++ for_each_online_node(node) {
++ cachep->nodelists[node] = &initkmem_list3[index + node];
++ cachep->nodelists[node]->next_reap = jiffies +
++ REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++ }
++}
++
++static void __kmem_cache_destroy(struct kmem_cache *cachep)
++{
++ int i;
++ struct kmem_list3 *l3;
++
++ for_each_online_cpu(i)
++ kfree(cachep->array[i]);
++
++ /* NUMA: free the list3 structures */
++ for_each_online_node(i) {
++ l3 = cachep->nodelists[i];
++ if (l3) {
++ kfree(l3->shared);
++ free_alien_cache(l3->alien);
++ kfree(l3);
++ }
++ }
++ kmem_cache_free(&cache_cache, cachep);
++}
++
++
++/**
++ * calculate_slab_order - calculate size (page order) of slabs
++ * @cachep: pointer to the cache that is being created
++ * @size: size of objects to be created in this cache.
++ * @align: required alignment for the objects.
++ * @flags: slab allocation flags
++ *
++ * Also calculates the number of objects per slab.
++ *
++ * This could be made much more intelligent. For now, try to avoid using
++ * high order pages for slabs. When the gfp() functions are more friendly
++ * towards high-order requests, this should be changed.
++ */
++static size_t calculate_slab_order(struct kmem_cache *cachep,
++ size_t size, size_t align, unsigned long flags)
++{
++ unsigned long offslab_limit;
++ size_t left_over = 0;
++ int gfporder;
++
++ for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
++ unsigned int num;
++ size_t remainder;
++
++ cache_estimate(gfporder, size, align, flags, &remainder, &num);
++ if (!num)
++ continue;
++
++ if (flags & CFLGS_OFF_SLAB) {
++ /*
++ * Max number of objs-per-slab for caches which
++ * use off-slab slabs. Needed to avoid a possible
++ * looping condition in cache_grow().
++ */
++ offslab_limit = size - sizeof(struct slab);
++ offslab_limit /= sizeof(kmem_bufctl_t);
++
++ if (num > offslab_limit)
++ break;
++ }
++
++ /* Found something acceptable - save it away */
++ cachep->num = num;
++ cachep->gfporder = gfporder;
++ left_over = remainder;
++
++ /*
++ * A VFS-reclaimable slab tends to have most allocations
++ * as GFP_NOFS and we really don't want to have to be allocating
++ * higher-order pages when we are unable to shrink dcache.
++ */
++ if (flags & SLAB_RECLAIM_ACCOUNT)
++ break;
++
++ /*
++ * Large number of objects is good, but very large slabs are
++ * currently bad for the gfp()s.
++ */
++ if (gfporder >= slab_break_gfp_order)
++ break;
++
++ /*
++ * Acceptable internal fragmentation?
++ */
++ if (left_over * 8 <= (PAGE_SIZE << gfporder))
++ break;
++ }
++ return left_over;
++}
++
++static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
++{
++ if (g_cpucache_up == FULL)
++ return enable_cpucache(cachep);
++
++ if (g_cpucache_up == NONE) {
++ /*
++ * Note: the first kmem_cache_create must create the cache
++ * that's used by kmalloc(24), otherwise the creation of
++ * further caches will BUG().
++ */
++ cachep->array[smp_processor_id()] = &initarray_generic.cache;
++
++ /*
++ * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
++ * the first cache, then we need to set up all its list3s,
++ * otherwise the creation of further caches will BUG().
++ */
++ set_up_list3s(cachep, SIZE_AC);
++ if (INDEX_AC == INDEX_L3)
++ g_cpucache_up = PARTIAL_L3;
++ else
++ g_cpucache_up = PARTIAL_AC;
++ } else {
++ cachep->array[smp_processor_id()] =
++ kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
++
++ if (g_cpucache_up == PARTIAL_AC) {
++ set_up_list3s(cachep, SIZE_L3);
++ g_cpucache_up = PARTIAL_L3;
++ } else {
++ int node;
++ for_each_online_node(node) {
++ cachep->nodelists[node] =
++ kmalloc_node(sizeof(struct kmem_list3),
++ GFP_KERNEL, node);
++ BUG_ON(!cachep->nodelists[node]);
++ kmem_list3_init(cachep->nodelists[node]);
++ }
++ }
++ }
++ cachep->nodelists[numa_node_id()]->next_reap =
++ jiffies + REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++
++ cpu_cache_get(cachep)->avail = 0;
++ cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
++ cpu_cache_get(cachep)->batchcount = 1;
++ cpu_cache_get(cachep)->touched = 0;
++ cachep->batchcount = 1;
++ cachep->limit = BOOT_CPUCACHE_ENTRIES;
++ return 0;
++}
++
++/**
++ * kmem_cache_create - Create a cache.
++ * @name: A string which is used in /proc/slabinfo to identify this cache.
++ * @size: The size of objects to be created in this cache.
++ * @align: The required alignment for the objects.
++ * @flags: SLAB flags
++ * @ctor: A constructor for the objects.
++ * @dtor: A destructor for the objects (not implemented anymore).
++ *
++ * Returns a ptr to the cache on success, NULL on failure.
++ * Cannot be called within a int, but can be interrupted.
++ * The @ctor is run when new pages are allocated by the cache
++ * and the @dtor is run before the pages are handed back.
++ *
++ * @name must be valid until the cache is destroyed. This implies that
++ * the module calling this has to destroy the cache before getting unloaded.
++ *
++ * The flags are
++ *
++ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
++ * to catch references to uninitialised memory.
++ *
++ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
++ * for buffer overruns.
++ *
++ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
++ * cacheline. This can be beneficial if you're counting cycles as closely
++ * as davem.
++ */
++struct kmem_cache *
++kmem_cache_create (const char *name, size_t size, size_t align,
++ unsigned long flags,
++ void (*ctor)(void*, struct kmem_cache *, unsigned long),
++ void (*dtor)(void*, struct kmem_cache *, unsigned long))
++{
++ size_t left_over, slab_size, ralign;
++ struct kmem_cache *cachep = NULL, *pc;
++
++ /*
++ * Sanity checks... these are all serious usage bugs.
++ */
++ if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
++ size > KMALLOC_MAX_SIZE || dtor) {
++ printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
++ name);
++ BUG();
++ }
++
++ /*
++ * We use cache_chain_mutex to ensure a consistent view of
++ * cpu_online_map as well. Please see cpuup_callback
++ */
++ mutex_lock(&cache_chain_mutex);
++
++ list_for_each_entry(pc, &cache_chain, next) {
++ char tmp;
++ int res;
++
++ /*
++ * This happens when the module gets unloaded and doesn't
++ * destroy its slab cache and no-one else reuses the vmalloc
++ * area of the module. Print a warning.
++ */
++ res = probe_kernel_address(pc->name, tmp);
++ if (res) {
++ printk(KERN_ERR
++ "SLAB: cache with size %d has lost its name\n",
++ pc->buffer_size);
++ continue;
++ }
++
++ if (!strcmp(pc->name, name)) {
++ printk(KERN_ERR
++ "kmem_cache_create: duplicate cache %s\n", name);
++ dump_stack();
++ goto oops;
++ }
++ }
++
++#if DEBUG
++ WARN_ON(strchr(name, ' ')); /* It confuses parsers */
++#if FORCED_DEBUG
++ /*
++ * Enable redzoning and last user accounting, except for caches with
++ * large objects, if the increased size would increase the object size
++ * above the next power of two: caches with object sizes just above a
++ * power of two have a significant amount of internal fragmentation.
++ */
++ if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
++ 2 * sizeof(unsigned long long)))
++ flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
++ if (!(flags & SLAB_DESTROY_BY_RCU))
++ flags |= SLAB_POISON;
++#endif
++ if (flags & SLAB_DESTROY_BY_RCU)
++ BUG_ON(flags & SLAB_POISON);
++#endif
++ /*
++ * Always checks flags, a caller might be expecting debug support which
++ * isn't available.
++ */
++ BUG_ON(flags & ~CREATE_MASK);
++
++ /*
++ * Check that size is in terms of words. This is needed to avoid
++ * unaligned accesses for some archs when redzoning is used, and makes
++ * sure any on-slab bufctl's are also correctly aligned.
++ */
++ if (size & (BYTES_PER_WORD - 1)) {
++ size += (BYTES_PER_WORD - 1);
++ size &= ~(BYTES_PER_WORD - 1);
++ }
++
++ /* calculate the final buffer alignment: */
++
++ /* 1) arch recommendation: can be overridden for debug */
++ if (flags & SLAB_HWCACHE_ALIGN) {
++ /*
++ * Default alignment: as specified by the arch code. Except if
++ * an object is really small, then squeeze multiple objects into
++ * one cacheline.
++ */
++ ralign = cache_line_size();
++ while (size <= ralign / 2)
++ ralign /= 2;
++ } else {
++ ralign = BYTES_PER_WORD;
++ }
++
++ /*
++ * Redzoning and user store require word alignment or possibly larger.
++ * Note this will be overridden by architecture or caller mandated
++ * alignment if either is greater than BYTES_PER_WORD.
++ */
++ if (flags & SLAB_STORE_USER)
++ ralign = BYTES_PER_WORD;
++
++ if (flags & SLAB_RED_ZONE) {
++ ralign = REDZONE_ALIGN;
++ /* If redzoning, ensure that the second redzone is suitably
++ * aligned, by adjusting the object size accordingly. */
++ size += REDZONE_ALIGN - 1;
++ size &= ~(REDZONE_ALIGN - 1);
++ }
++
++ /* 2) arch mandated alignment */
++ if (ralign < ARCH_SLAB_MINALIGN) {
++ ralign = ARCH_SLAB_MINALIGN;
++ }
++ /* 3) caller mandated alignment */
++ if (ralign < align) {
++ ralign = align;
++ }
++ /* disable debug if necessary */
++ if (ralign > __alignof__(unsigned long long))
++ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
++ /*
++ * 4) Store it.
++ */
++ align = ralign;
++
++ /* Get cache's description obj. */
++ cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
++ if (!cachep)
++ goto oops;
++
++#if DEBUG
++ cachep->obj_size = size;
++
++ /*
++ * Both debugging options require word-alignment which is calculated
++ * into align above.
++ */
++ if (flags & SLAB_RED_ZONE) {
++ /* add space for red zone words */
++ cachep->obj_offset += sizeof(unsigned long long);
++ size += 2 * sizeof(unsigned long long);
++ }
++ if (flags & SLAB_STORE_USER) {
++ /* user store requires one word storage behind the end of
++ * the real object. But if the second red zone needs to be
++ * aligned to 64 bits, we must allow that much space.
++ */
++ if (flags & SLAB_RED_ZONE)
++ size += REDZONE_ALIGN;
++ else
++ size += BYTES_PER_WORD;
++ }
++#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
++ if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
++ && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
++ cachep->obj_offset += PAGE_SIZE - size;
++ size = PAGE_SIZE;
++ }
++#endif
++#endif
++
++ /*
++ * Determine if the slab management is 'on' or 'off' slab.
++ * (bootstrapping cannot cope with offslab caches so don't do
++ * it too early on.)
++ */
++ if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
++ /*
++ * Size is large, assume best to place the slab management obj
++ * off-slab (should allow better packing of objs).
++ */
++ flags |= CFLGS_OFF_SLAB;
++
++ size = ALIGN(size, align);
++
++ left_over = calculate_slab_order(cachep, size, align, flags);
++
++ if (!cachep->num) {
++ printk(KERN_ERR
++ "kmem_cache_create: couldn't create cache %s.\n", name);
++ kmem_cache_free(&cache_cache, cachep);
++ cachep = NULL;
++ goto oops;
++ }
++ slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
++ + sizeof(struct slab), align);
++
++ /*
++ * If the slab has been placed off-slab, and we have enough space then
++ * move it on-slab. This is at the expense of any extra colouring.
++ */
++ if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
++ flags &= ~CFLGS_OFF_SLAB;
++ left_over -= slab_size;
++ }
++
++ if (flags & CFLGS_OFF_SLAB) {
++ /* really off slab. No need for manual alignment */
++ slab_size =
++ cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
++ }
++
++ cachep->colour_off = cache_line_size();
++ /* Offset must be a multiple of the alignment. */
++ if (cachep->colour_off < align)
++ cachep->colour_off = align;
++ cachep->colour = left_over / cachep->colour_off;
++ cachep->slab_size = slab_size;
++ cachep->flags = flags;
++ cachep->gfpflags = 0;
++ if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
++ cachep->gfpflags |= GFP_DMA;
++ cachep->buffer_size = size;
++ cachep->reciprocal_buffer_size = reciprocal_value(size);
++
++ if (flags & CFLGS_OFF_SLAB) {
++ cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
++ /*
++ * This is a possibility for one of the malloc_sizes caches.
++ * But since we go off slab only for object size greater than
++ * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
++ * this should not happen at all.
++ * But leave a BUG_ON for some lucky dude.
++ */
++ BUG_ON(!cachep->slabp_cache);
++ }
++ cachep->ctor = ctor;
++ cachep->name = name;
++
++ if (setup_cpu_cache(cachep)) {
++ __kmem_cache_destroy(cachep);
++ cachep = NULL;
++ goto oops;
++ }
++
++ /* cache setup completed, link it into the list */
++ list_add(&cachep->next, &cache_chain);
++oops:
++ if (!cachep && (flags & SLAB_PANIC))
++ panic("kmem_cache_create(): failed to create slab `%s'\n",
++ name);
++ mutex_unlock(&cache_chain_mutex);
++ return cachep;
++}
++EXPORT_SYMBOL(kmem_cache_create);
++
++#if DEBUG
++static void check_irq_off(void)
++{
++ BUG_ON(!irqs_disabled());
++}
++
++static void check_irq_on(void)
++{
++ BUG_ON(irqs_disabled());
++}
++
++static void check_spinlock_acquired(struct kmem_cache *cachep)
++{
++#ifdef CONFIG_SMP
++ check_irq_off();
++ assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
++#endif
++}
++
++static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
++{
++#ifdef CONFIG_SMP
++ check_irq_off();
++ assert_spin_locked(&cachep->nodelists[node]->list_lock);
++#endif
++}
++
++#else
++#define check_irq_off() do { } while(0)
++#define check_irq_on() do { } while(0)
++#define check_spinlock_acquired(x) do { } while(0)
++#define check_spinlock_acquired_node(x, y) do { } while(0)
++#endif
++
++static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
++ struct array_cache *ac,
++ int force, int node);
++
++static void do_drain(void *arg)
++{
++ struct kmem_cache *cachep = arg;
++ struct array_cache *ac;
++ int node = numa_node_id();
++
++ check_irq_off();
++ ac = cpu_cache_get(cachep);
++ spin_lock(&cachep->nodelists[node]->list_lock);
++ free_block(cachep, ac->entry, ac->avail, node);
++ spin_unlock(&cachep->nodelists[node]->list_lock);
++ ac->avail = 0;
++}
++
++static void drain_cpu_caches(struct kmem_cache *cachep)
++{
++ struct kmem_list3 *l3;
++ int node;
++
++ on_each_cpu(do_drain, cachep, 1, 1);
++ check_irq_on();
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (l3 && l3->alien)
++ drain_alien_cache(cachep, l3->alien);
++ }
++
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (l3)
++ drain_array(cachep, l3, l3->shared, 1, node);
++ }
++}
++
++/*
++ * Remove slabs from the list of free slabs.
++ * Specify the number of slabs to drain in tofree.
++ *
++ * Returns the actual number of slabs released.
++ */
++static int drain_freelist(struct kmem_cache *cache,
++ struct kmem_list3 *l3, int tofree)
++{
++ struct list_head *p;
++ int nr_freed;
++ struct slab *slabp;
++
++ nr_freed = 0;
++ while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
++
++ spin_lock_irq(&l3->list_lock);
++ p = l3->slabs_free.prev;
++ if (p == &l3->slabs_free) {
++ spin_unlock_irq(&l3->list_lock);
++ goto out;
++ }
++
++ slabp = list_entry(p, struct slab, list);
++#if DEBUG
++ BUG_ON(slabp->inuse);
++#endif
++ list_del(&slabp->list);
++ /*
++ * Safe to drop the lock. The slab is no longer linked
++ * to the cache.
++ */
++ l3->free_objects -= cache->num;
++ spin_unlock_irq(&l3->list_lock);
++ slab_destroy(cache, slabp);
++ nr_freed++;
++ }
++out:
++ return nr_freed;
++}
++
++/* Called with cache_chain_mutex held to protect against cpu hotplug */
++static int __cache_shrink(struct kmem_cache *cachep)
++{
++ int ret = 0, i = 0;
++ struct kmem_list3 *l3;
++
++ drain_cpu_caches(cachep);
++
++ check_irq_on();
++ for_each_online_node(i) {
++ l3 = cachep->nodelists[i];
++ if (!l3)
++ continue;
++
++ drain_freelist(cachep, l3, l3->free_objects);
++
++ ret += !list_empty(&l3->slabs_full) ||
++ !list_empty(&l3->slabs_partial);
++ }
++ return (ret ? 1 : 0);
++}
++
++/**
++ * kmem_cache_shrink - Shrink a cache.
++ * @cachep: The cache to shrink.
++ *
++ * Releases as many slabs as possible for a cache.
++ * To help debugging, a zero exit status indicates all slabs were released.
++ */
++int kmem_cache_shrink(struct kmem_cache *cachep)
++{
++ int ret;
++ BUG_ON(!cachep || in_interrupt());
++
++ mutex_lock(&cache_chain_mutex);
++ ret = __cache_shrink(cachep);
++ mutex_unlock(&cache_chain_mutex);
++ return ret;
++}
++EXPORT_SYMBOL(kmem_cache_shrink);
++
++/**
++ * kmem_cache_destroy - delete a cache
++ * @cachep: the cache to destroy
++ *
++ * Remove a &struct kmem_cache object from the slab cache.
++ *
++ * It is expected this function will be called by a module when it is
++ * unloaded. This will remove the cache completely, and avoid a duplicate
++ * cache being allocated each time a module is loaded and unloaded, if the
++ * module doesn't have persistent in-kernel storage across loads and unloads.
++ *
++ * The cache must be empty before calling this function.
++ *
++ * The caller must guarantee that noone will allocate memory from the cache
++ * during the kmem_cache_destroy().
++ */
++void kmem_cache_destroy(struct kmem_cache *cachep)
++{
++ BUG_ON(!cachep || in_interrupt());
++
++ /* Find the cache in the chain of caches. */
++ mutex_lock(&cache_chain_mutex);
++ /*
++ * the chain is never empty, cache_cache is never destroyed
++ */
++ list_del(&cachep->next);
++ if (__cache_shrink(cachep)) {
++ slab_error(cachep, "Can't free all objects");
++ list_add(&cachep->next, &cache_chain);
++ mutex_unlock(&cache_chain_mutex);
++ return;
++ }
++
++ if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
++ synchronize_rcu();
++
++ __kmem_cache_destroy(cachep);
++ mutex_unlock(&cache_chain_mutex);
++}
++EXPORT_SYMBOL(kmem_cache_destroy);
++
++/*
++ * Get the memory for a slab management obj.
++ * For a slab cache when the slab descriptor is off-slab, slab descriptors
++ * always come from malloc_sizes caches. The slab descriptor cannot
++ * come from the same cache which is getting created because,
++ * when we are searching for an appropriate cache for these
++ * descriptors in kmem_cache_create, we search through the malloc_sizes array.
++ * If we are creating a malloc_sizes cache here it would not be visible to
++ * kmem_find_general_cachep till the initialization is complete.
++ * Hence we cannot have slabp_cache same as the original cache.
++ */
++static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
++ int colour_off, gfp_t local_flags,
++ int nodeid)
++{
++ struct slab *slabp;
++
++ if (OFF_SLAB(cachep)) {
++ /* Slab management obj is off-slab. */
++ slabp = kmem_cache_alloc_node(cachep->slabp_cache,
++ local_flags & ~GFP_THISNODE, nodeid);
++ if (!slabp)
++ return NULL;
++ } else {
++ slabp = objp + colour_off;
++ colour_off += cachep->slab_size;
++ }
++ slabp->inuse = 0;
++ slabp->colouroff = colour_off;
++ slabp->s_mem = objp + colour_off;
++ slabp->nodeid = nodeid;
++ return slabp;
++}
++
++static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
++{
++ return (kmem_bufctl_t *) (slabp + 1);
++}
++
++static void cache_init_objs(struct kmem_cache *cachep,
++ struct slab *slabp)
++{
++ int i;
++
++ for (i = 0; i < cachep->num; i++) {
++ void *objp = index_to_obj(cachep, slabp, i);
++#if DEBUG
++ /* need to poison the objs? */
++ if (cachep->flags & SLAB_POISON)
++ poison_obj(cachep, objp, POISON_FREE);
++ if (cachep->flags & SLAB_STORE_USER)
++ *dbg_userword(cachep, objp) = NULL;
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ *dbg_redzone1(cachep, objp) = RED_INACTIVE;
++ *dbg_redzone2(cachep, objp) = RED_INACTIVE;
++ }
++ /*
++ * Constructors are not allowed to allocate memory from the same
++ * cache which they are a constructor for. Otherwise, deadlock.
++ * They must also be threaded.
++ */
++ if (cachep->ctor && !(cachep->flags & SLAB_POISON))
++ cachep->ctor(objp + obj_offset(cachep), cachep,
++ 0);
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "constructor overwrote the"
++ " end of an object");
++ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
++ slab_error(cachep, "constructor overwrote the"
++ " start of an object");
++ }
++ if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
++ OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 0);
++#else
++ if (cachep->ctor)
++ cachep->ctor(objp, cachep, 0);
++#endif
++ slab_bufctl(slabp)[i] = i + 1;
++ }
++ slab_bufctl(slabp)[i - 1] = BUFCTL_END;
++ slabp->free = 0;
++}
++
++static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
++{
++ if (CONFIG_ZONE_DMA_FLAG) {
++ if (flags & GFP_DMA)
++ BUG_ON(!(cachep->gfpflags & GFP_DMA));
++ else
++ BUG_ON(cachep->gfpflags & GFP_DMA);
++ }
++}
++
++static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
++ int nodeid)
++{
++ void *objp = index_to_obj(cachep, slabp, slabp->free);
++ kmem_bufctl_t next;
++
++ slabp->inuse++;
++ next = slab_bufctl(slabp)[slabp->free];
++#if DEBUG
++ slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
++ WARN_ON(slabp->nodeid != nodeid);
++#endif
++ slabp->free = next;
++
++ return objp;
++}
++
++static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
++ void *objp, int nodeid)
++{
++ unsigned int objnr = obj_to_index(cachep, slabp, objp);
++
++#if DEBUG
++ /* Verify that the slab belongs to the intended node */
++ WARN_ON(slabp->nodeid != nodeid);
++
++ if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
++ printk(KERN_ERR "slab: double free detected in cache "
++ "'%s', objp %p\n", cachep->name, objp);
++ BUG();
++ }
++#endif
++ slab_bufctl(slabp)[objnr] = slabp->free;
++ slabp->free = objnr;
++ slabp->inuse--;
++}
++
++/*
++ * Map pages beginning at addr to the given cache and slab. This is required
++ * for the slab allocator to be able to lookup the cache and slab of a
++ * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
++ */
++static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
++ void *addr)
++{
++ int nr_pages;
++ struct page *page;
++
++ page = virt_to_page(addr);
++
++ nr_pages = 1;
++ if (likely(!PageCompound(page)))
++ nr_pages <<= cache->gfporder;
++
++ do {
++ page_set_cache(page, cache);
++ page_set_slab(page, slab);
++ page++;
++ } while (--nr_pages);
++}
++
++/*
++ * Grow (by 1) the number of slabs within a cache. This is called by
++ * kmem_cache_alloc() when there are no active objs left in a cache.
++ */
++static int cache_grow(struct kmem_cache *cachep,
++ gfp_t flags, int nodeid, void *objp)
++{
++ struct slab *slabp;
++ size_t offset;
++ gfp_t local_flags;
++ struct kmem_list3 *l3;
++
++ /*
++ * Be lazy and only check for valid flags here, keeping it out of the
++ * critical path in kmem_cache_alloc().
++ */
++ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
++
++ local_flags = (flags & GFP_LEVEL_MASK);
++ /* Take the l3 list lock to change the colour_next on this node */
++ check_irq_off();
++ l3 = cachep->nodelists[nodeid];
++ spin_lock(&l3->list_lock);
++
++ /* Get colour for the slab, and cal the next value. */
++ offset = l3->colour_next;
++ l3->colour_next++;
++ if (l3->colour_next >= cachep->colour)
++ l3->colour_next = 0;
++ spin_unlock(&l3->list_lock);
++
++ offset *= cachep->colour_off;
++
++ if (local_flags & __GFP_WAIT)
++ local_irq_enable();
++
++ /*
++ * The test for missing atomic flag is performed here, rather than
++ * the more obvious place, simply to reduce the critical path length
++ * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
++ * will eventually be caught here (where it matters).
++ */
++ kmem_flagcheck(cachep, flags);
++
++ /*
++ * Get mem for the objs. Attempt to allocate a physical page from
++ * 'nodeid'.
++ */
++ if (!objp)
++ objp = kmem_getpages(cachep, flags, nodeid);
++ if (!objp)
++ goto failed;
++
++ /* Get slab management. */
++ slabp = alloc_slabmgmt(cachep, objp, offset,
++ local_flags & ~GFP_THISNODE, nodeid);
++ if (!slabp)
++ goto opps1;
++
++ slabp->nodeid = nodeid;
++ slab_map_pages(cachep, slabp, objp);
++
++ cache_init_objs(cachep, slabp);
++
++ if (local_flags & __GFP_WAIT)
++ local_irq_disable();
++ check_irq_off();
++ spin_lock(&l3->list_lock);
++
++ /* Make slab active. */
++ list_add_tail(&slabp->list, &(l3->slabs_free));
++ STATS_INC_GROWN(cachep);
++ l3->free_objects += cachep->num;
++ spin_unlock(&l3->list_lock);
++ return 1;
++opps1:
++ kmem_freepages(cachep, objp);
++failed:
++ if (local_flags & __GFP_WAIT)
++ local_irq_disable();
++ return 0;
++}
++
++#if DEBUG
++
++/*
++ * Perform extra freeing checks:
++ * - detect bad pointers.
++ * - POISON/RED_ZONE checking
++ */
++static void kfree_debugcheck(const void *objp)
++{
++ if (!virt_addr_valid(objp)) {
++ printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
++ (unsigned long)objp);
++ BUG();
++ }
++}
++
++static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
++{
++ unsigned long long redzone1, redzone2;
++
++ redzone1 = *dbg_redzone1(cache, obj);
++ redzone2 = *dbg_redzone2(cache, obj);
++
++ /*
++ * Redzone is ok.
++ */
++ if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
++ return;
++
++ if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
++ slab_error(cache, "double free detected");
++ else
++ slab_error(cache, "memory outside object was overwritten");
++
++ printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
++ obj, redzone1, redzone2);
++}
++
++static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
++ void *caller)
++{
++ struct page *page;
++ unsigned int objnr;
++ struct slab *slabp;
++
++ objp -= obj_offset(cachep);
++ kfree_debugcheck(objp);
++ page = virt_to_head_page(objp);
++
++ slabp = page_get_slab(page);
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ verify_redzone_free(cachep, objp);
++ *dbg_redzone1(cachep, objp) = RED_INACTIVE;
++ *dbg_redzone2(cachep, objp) = RED_INACTIVE;
++ }
++ if (cachep->flags & SLAB_STORE_USER)
++ *dbg_userword(cachep, objp) = caller;
++
++ objnr = obj_to_index(cachep, slabp, objp);
++
++ BUG_ON(objnr >= cachep->num);
++ BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
++
++#ifdef CONFIG_DEBUG_SLAB_LEAK
++ slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
++#endif
++ if (cachep->flags & SLAB_POISON) {
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
++ store_stackinfo(cachep, objp, (unsigned long)caller);
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 0);
++ } else {
++ poison_obj(cachep, objp, POISON_FREE);
++ }
++#else
++ poison_obj(cachep, objp, POISON_FREE);
++#endif
++ }
++ return objp;
++}
++
++static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
++{
++ kmem_bufctl_t i;
++ int entries = 0;
++
++ /* Check slab's freelist to see if this obj is there. */
++ for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
++ entries++;
++ if (entries > cachep->num || i >= cachep->num)
++ goto bad;
++ }
++ if (entries != cachep->num - slabp->inuse) {
++bad:
++ printk(KERN_ERR "slab: Internal list corruption detected in "
++ "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
++ cachep->name, cachep->num, slabp, slabp->inuse);
++ for (i = 0;
++ i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
++ i++) {
++ if (i % 16 == 0)
++ printk("\n%03x:", i);
++ printk(" %02x", ((unsigned char *)slabp)[i]);
++ }
++ printk("\n");
++ BUG();
++ }
++}
++#else
++#define kfree_debugcheck(x) do { } while(0)
++#define cache_free_debugcheck(x,objp,z) (objp)
++#define check_slabp(x,y) do { } while(0)
++#endif
++
++static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
++{
++ int batchcount;
++ struct kmem_list3 *l3;
++ struct array_cache *ac;
++ int node;
++
++ node = numa_node_id();
++
++ check_irq_off();
++ ac = cpu_cache_get(cachep);
++retry:
++ batchcount = ac->batchcount;
++ if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
++ /*
++ * If there was little recent activity on this cache, then
++ * perform only a partial refill. Otherwise we could generate
++ * refill bouncing.
++ */
++ batchcount = BATCHREFILL_LIMIT;
++ }
++ l3 = cachep->nodelists[node];
++
++ BUG_ON(ac->avail > 0 || !l3);
++ spin_lock(&l3->list_lock);
++
++ /* See if we can refill from the shared array */
++ if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
++ goto alloc_done;
++
++ while (batchcount > 0) {
++ struct list_head *entry;
++ struct slab *slabp;
++ /* Get slab alloc is to come from. */
++ entry = l3->slabs_partial.next;
++ if (entry == &l3->slabs_partial) {
++ l3->free_touched = 1;
++ entry = l3->slabs_free.next;
++ if (entry == &l3->slabs_free)
++ goto must_grow;
++ }
++
++ slabp = list_entry(entry, struct slab, list);
++ check_slabp(cachep, slabp);
++ check_spinlock_acquired(cachep);
++
++ /*
++ * The slab was either on partial or free list so
++ * there must be at least one object available for
++ * allocation.
++ */
++ BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
++
++ while (slabp->inuse < cachep->num && batchcount--) {
++ STATS_INC_ALLOCED(cachep);
++ STATS_INC_ACTIVE(cachep);
++ STATS_SET_HIGH(cachep);
++
++ ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
++ node);
++ }
++ check_slabp(cachep, slabp);
++
++ /* move slabp to correct slabp list: */
++ list_del(&slabp->list);
++ if (slabp->free == BUFCTL_END)
++ list_add(&slabp->list, &l3->slabs_full);
++ else
++ list_add(&slabp->list, &l3->slabs_partial);
++ }
++
++must_grow:
++ l3->free_objects -= ac->avail;
++alloc_done:
++ spin_unlock(&l3->list_lock);
++
++ if (unlikely(!ac->avail)) {
++ int x;
++ x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
++
++ /* cache_grow can reenable interrupts, then ac could change. */
++ ac = cpu_cache_get(cachep);
++ if (!x && ac->avail == 0) /* no objects in sight? abort */
++ return NULL;
++
++ if (!ac->avail) /* objects refilled by interrupt? */
++ goto retry;
++ }
++ ac->touched = 1;
++ return ac->entry[--ac->avail];
++}
++
++static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
++ gfp_t flags)
++{
++ might_sleep_if(flags & __GFP_WAIT);
++#if DEBUG
++ kmem_flagcheck(cachep, flags);
++#endif
++}
++
++#if DEBUG
++static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
++ gfp_t flags, void *objp, void *caller)
++{
++ if (!objp)
++ return objp;
++ if (cachep->flags & SLAB_POISON) {
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
++ kernel_map_pages(virt_to_page(objp),
++ cachep->buffer_size / PAGE_SIZE, 1);
++ else
++ check_poison_obj(cachep, objp);
++#else
++ check_poison_obj(cachep, objp);
++#endif
++ poison_obj(cachep, objp, POISON_INUSE);
++ }
++ if (cachep->flags & SLAB_STORE_USER)
++ *dbg_userword(cachep, objp) = caller;
++
++ if (cachep->flags & SLAB_RED_ZONE) {
++ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
++ *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
++ slab_error(cachep, "double free, or memory outside"
++ " object was overwritten");
++ printk(KERN_ERR
++ "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
++ objp, *dbg_redzone1(cachep, objp),
++ *dbg_redzone2(cachep, objp));
++ }
++ *dbg_redzone1(cachep, objp) = RED_ACTIVE;
++ *dbg_redzone2(cachep, objp) = RED_ACTIVE;
++ }
++#ifdef CONFIG_DEBUG_SLAB_LEAK
++ {
++ struct slab *slabp;
++ unsigned objnr;
++
++ slabp = page_get_slab(virt_to_head_page(objp));
++ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
++ slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
++ }
++#endif
++ objp += obj_offset(cachep);
++ if (cachep->ctor && cachep->flags & SLAB_POISON)
++ cachep->ctor(objp, cachep, 0);
++#if ARCH_SLAB_MINALIGN
++ if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
++ printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
++ objp, ARCH_SLAB_MINALIGN);
++ }
++#endif
++ return objp;
++}
++#else
++#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
++#endif
++
++#ifdef CONFIG_FAILSLAB
++
++static struct failslab_attr {
++
++ struct fault_attr attr;
++
++ u32 ignore_gfp_wait;
++#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
++ struct dentry *ignore_gfp_wait_file;
++#endif
++
++} failslab = {
++ .attr = FAULT_ATTR_INITIALIZER,
++ .ignore_gfp_wait = 1,
++};
++
++static int __init setup_failslab(char *str)
++{
++ return setup_fault_attr(&failslab.attr, str);
++}
++__setup("failslab=", setup_failslab);
++
++static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
++{
++ if (cachep == &cache_cache)
++ return 0;
++ if (flags & __GFP_NOFAIL)
++ return 0;
++ if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
++ return 0;
++
++ return should_fail(&failslab.attr, obj_size(cachep));
++}
++
++#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
++
++static int __init failslab_debugfs(void)
++{
++ mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
++ struct dentry *dir;
++ int err;
++
++ err = init_fault_attr_dentries(&failslab.attr, "failslab");
++ if (err)
++ return err;
++ dir = failslab.attr.dentries.dir;
++
++ failslab.ignore_gfp_wait_file =
++ debugfs_create_bool("ignore-gfp-wait", mode, dir,
++ &failslab.ignore_gfp_wait);
++
++ if (!failslab.ignore_gfp_wait_file) {
++ err = -ENOMEM;
++ debugfs_remove(failslab.ignore_gfp_wait_file);
++ cleanup_fault_attr_dentries(&failslab.attr);
++ }
++
++ return err;
++}
++
++late_initcall(failslab_debugfs);
++
++#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
++
++#else /* CONFIG_FAILSLAB */
++
++static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
++{
++ return 0;
++}
++
++#endif /* CONFIG_FAILSLAB */
++
++static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ void *objp;
++ struct array_cache *ac;
++
++ check_irq_off();
++
++ ac = cpu_cache_get(cachep);
++ if (likely(ac->avail)) {
++ STATS_INC_ALLOCHIT(cachep);
++ ac->touched = 1;
++ objp = ac->entry[--ac->avail];
++ } else {
++ STATS_INC_ALLOCMISS(cachep);
++ objp = cache_alloc_refill(cachep, flags);
++ }
++ return objp;
++}
++
++#ifdef CONFIG_NUMA
++/*
++ * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
++ *
++ * If we are in_interrupt, then process context, including cpusets and
++ * mempolicy, may not apply and should not be used for allocation policy.
++ */
++static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ int nid_alloc, nid_here;
++
++ if (in_interrupt() || (flags & __GFP_THISNODE))
++ return NULL;
++ nid_alloc = nid_here = numa_node_id();
++ if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
++ nid_alloc = cpuset_mem_spread_node();
++ else if (current->mempolicy)
++ nid_alloc = slab_node(current->mempolicy);
++ if (nid_alloc != nid_here)
++ return ____cache_alloc_node(cachep, flags, nid_alloc);
++ return NULL;
++}
++
++/*
++ * Fallback function if there was no memory available and no objects on a
++ * certain node and fall back is permitted. First we scan all the
++ * available nodelists for available objects. If that fails then we
++ * perform an allocation without specifying a node. This allows the page
++ * allocator to do its reclaim / fallback magic. We then insert the
++ * slab into the proper nodelist and then allocate from it.
++ */
++static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
++{
++ struct zonelist *zonelist;
++ gfp_t local_flags;
++ struct zone **z;
++ void *obj = NULL;
++ int nid;
++
++ if (flags & __GFP_THISNODE)
++ return NULL;
++
++ zonelist = &NODE_DATA(slab_node(current->mempolicy))
++ ->node_zonelists[gfp_zone(flags)];
++ local_flags = (flags & GFP_LEVEL_MASK);
++
++retry:
++ /*
++ * Look through allowed nodes for objects available
++ * from existing per node queues.
++ */
++ for (z = zonelist->zones; *z && !obj; z++) {
++ nid = zone_to_nid(*z);
++
++ if (cpuset_zone_allowed_hardwall(*z, flags) &&
++ cache->nodelists[nid] &&
++ cache->nodelists[nid]->free_objects)
++ obj = ____cache_alloc_node(cache,
++ flags | GFP_THISNODE, nid);
++ }
++
++ if (!obj) {
++ /*
++ * This allocation will be performed within the constraints
++ * of the current cpuset / memory policy requirements.
++ * We may trigger various forms of reclaim on the allowed
++ * set and go into memory reserves if necessary.
++ */
++ if (local_flags & __GFP_WAIT)
++ local_irq_enable();
++ kmem_flagcheck(cache, flags);
++ obj = kmem_getpages(cache, flags, -1);
++ if (local_flags & __GFP_WAIT)
++ local_irq_disable();
++ if (obj) {
++ /*
++ * Insert into the appropriate per node queues
++ */
++ nid = page_to_nid(virt_to_page(obj));
++ if (cache_grow(cache, flags, nid, obj)) {
++ obj = ____cache_alloc_node(cache,
++ flags | GFP_THISNODE, nid);
++ if (!obj)
++ /*
++ * Another processor may allocate the
++ * objects in the slab since we are
++ * not holding any locks.
++ */
++ goto retry;
++ } else {
++ /* cache_grow already freed obj */
++ obj = NULL;
++ }
++ }
++ }
++ return obj;
++}
++
++/*
++ * A interface to enable slab creation on nodeid
++ */
++static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
++ int nodeid)
++{
++ struct list_head *entry;
++ struct slab *slabp;
++ struct kmem_list3 *l3;
++ void *obj;
++ int x;
++
++ l3 = cachep->nodelists[nodeid];
++ BUG_ON(!l3);
++
++retry:
++ check_irq_off();
++ spin_lock(&l3->list_lock);
++ entry = l3->slabs_partial.next;
++ if (entry == &l3->slabs_partial) {
++ l3->free_touched = 1;
++ entry = l3->slabs_free.next;
++ if (entry == &l3->slabs_free)
++ goto must_grow;
++ }
++
++ slabp = list_entry(entry, struct slab, list);
++ check_spinlock_acquired_node(cachep, nodeid);
++ check_slabp(cachep, slabp);
++
++ STATS_INC_NODEALLOCS(cachep);
++ STATS_INC_ACTIVE(cachep);
++ STATS_SET_HIGH(cachep);
++
++ BUG_ON(slabp->inuse == cachep->num);
++
++ obj = slab_get_obj(cachep, slabp, nodeid);
++ check_slabp(cachep, slabp);
++ vx_slab_alloc(cachep, flags);
++ l3->free_objects--;
++ /* move slabp to correct slabp list: */
++ list_del(&slabp->list);
++
++ if (slabp->free == BUFCTL_END)
++ list_add(&slabp->list, &l3->slabs_full);
++ else
++ list_add(&slabp->list, &l3->slabs_partial);
++
++ spin_unlock(&l3->list_lock);
++ goto done;
++
++must_grow:
++ spin_unlock(&l3->list_lock);
++ x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
++ if (x)
++ goto retry;
++
++ return fallback_alloc(cachep, flags);
++
++done:
++ return obj;
++}
++
++/**
++ * kmem_cache_alloc_node - Allocate an object on the specified node
++ * @cachep: The cache to allocate from.
++ * @flags: See kmalloc().
++ * @nodeid: node number of the target node.
++ * @caller: return address of caller, used for debug information
++ *
++ * Identical to kmem_cache_alloc but it will allocate memory on the given
++ * node, which can improve the performance for cpu bound structures.
++ *
++ * Fallback to other node is possible if __GFP_THISNODE is not set.
++ */
++static __always_inline void *
++__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
++ void *caller)
++{
++ unsigned long save_flags;
++ void *ptr;
++
++ if (should_failslab(cachep, flags))
++ return NULL;
++
++ cache_alloc_debugcheck_before(cachep, flags);
++ local_irq_save(save_flags);
++
++ if (unlikely(nodeid == -1))
++ nodeid = numa_node_id();
++
++ if (unlikely(!cachep->nodelists[nodeid])) {
++ /* Node not bootstrapped yet */
++ ptr = fallback_alloc(cachep, flags);
++ goto out;
++ }
++
++ if (nodeid == numa_node_id()) {
++ /*
++ * Use the locally cached objects if possible.
++ * However ____cache_alloc does not allow fallback
++ * to other nodes. It may fail while we still have
++ * objects on other nodes available.
++ */
++ ptr = ____cache_alloc(cachep, flags);
++ if (ptr)
++ goto out;
++ }
++ /* ___cache_alloc_node can fall back to other nodes */
++ ptr = ____cache_alloc_node(cachep, flags, nodeid);
++ out:
++ vx_slab_alloc(cachep, flags);
++ local_irq_restore(save_flags);
++ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
++
++ return ptr;
++}
++
++static __always_inline void *
++__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
++{
++ void *objp;
++
++ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
++ objp = alternate_node_alloc(cache, flags);
++ if (objp)
++ goto out;
++ }
++ objp = ____cache_alloc(cache, flags);
++
++ /*
++ * We may just have run out of memory on the local node.
++ * ____cache_alloc_node() knows how to locate memory on other nodes
++ */
++ if (!objp)
++ objp = ____cache_alloc_node(cache, flags, numa_node_id());
++
++ out:
++ return objp;
++}
++#else
++
++static __always_inline void *
++__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ return ____cache_alloc(cachep, flags);
++}
++
++#endif /* CONFIG_NUMA */
++
++static __always_inline void *
++__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
++{
++ unsigned long save_flags;
++ void *objp;
++
++ if (should_failslab(cachep, flags))
++ return NULL;
++
++ cache_alloc_debugcheck_before(cachep, flags);
++ local_irq_save(save_flags);
++ objp = __do_cache_alloc(cachep, flags);
++ local_irq_restore(save_flags);
++ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
++ prefetchw(objp);
++
++ return objp;
++}
++
++/*
++ * Caller needs to acquire correct kmem_list's list_lock
++ */
++static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
++ int node)
++{
++ int i;
++ struct kmem_list3 *l3;
++
++ for (i = 0; i < nr_objects; i++) {
++ void *objp = objpp[i];
++ struct slab *slabp;
++
++ slabp = virt_to_slab(objp);
++ l3 = cachep->nodelists[node];
++ list_del(&slabp->list);
++ check_spinlock_acquired_node(cachep, node);
++ check_slabp(cachep, slabp);
++ slab_put_obj(cachep, slabp, objp, node);
++ STATS_DEC_ACTIVE(cachep);
++ l3->free_objects++;
++ check_slabp(cachep, slabp);
++
++ /* fixup slab chains */
++ if (slabp->inuse == 0) {
++ if (l3->free_objects > l3->free_limit) {
++ l3->free_objects -= cachep->num;
++ /* No need to drop any previously held
++ * lock here, even if we have a off-slab slab
++ * descriptor it is guaranteed to come from
++ * a different cache, refer to comments before
++ * alloc_slabmgmt.
++ */
++ slab_destroy(cachep, slabp);
++ } else {
++ list_add(&slabp->list, &l3->slabs_free);
++ }
++ } else {
++ /* Unconditionally move a slab to the end of the
++ * partial list on free - maximum time for the
++ * other objects to be freed, too.
++ */
++ list_add_tail(&slabp->list, &l3->slabs_partial);
++ }
++ }
++}
++
++static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
++{
++ int batchcount;
++ struct kmem_list3 *l3;
++ int node = numa_node_id();
++
++ batchcount = ac->batchcount;
++#if DEBUG
++ BUG_ON(!batchcount || batchcount > ac->avail);
++#endif
++ check_irq_off();
++ l3 = cachep->nodelists[node];
++ spin_lock(&l3->list_lock);
++ if (l3->shared) {
++ struct array_cache *shared_array = l3->shared;
++ int max = shared_array->limit - shared_array->avail;
++ if (max) {
++ if (batchcount > max)
++ batchcount = max;
++ memcpy(&(shared_array->entry[shared_array->avail]),
++ ac->entry, sizeof(void *) * batchcount);
++ shared_array->avail += batchcount;
++ goto free_done;
++ }
++ }
++
++ free_block(cachep, ac->entry, batchcount, node);
++free_done:
++#if STATS
++ {
++ int i = 0;
++ struct list_head *p;
++
++ p = l3->slabs_free.next;
++ while (p != &(l3->slabs_free)) {
++ struct slab *slabp;
++
++ slabp = list_entry(p, struct slab, list);
++ BUG_ON(slabp->inuse);
++
++ i++;
++ p = p->next;
++ }
++ STATS_SET_FREEABLE(cachep, i);
++ }
++#endif
++ spin_unlock(&l3->list_lock);
++ ac->avail -= batchcount;
++ memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
++}
++
++/*
++ * Release an obj back to its cache. If the obj has a constructed state, it must
++ * be in this state _before_ it is released. Called with disabled ints.
++ */
++static inline void __cache_free(struct kmem_cache *cachep, void *objp)
++{
++ struct array_cache *ac = cpu_cache_get(cachep);
++
++ check_irq_off();
++ objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
++ vx_slab_free(cachep);
++
++ if (cache_free_alien(cachep, objp))
++ return;
++
++ if (likely(ac->avail < ac->limit)) {
++ STATS_INC_FREEHIT(cachep);
++ ac->entry[ac->avail++] = objp;
++ return;
++ } else {
++ STATS_INC_FREEMISS(cachep);
++ cache_flusharray(cachep, ac);
++ ac->entry[ac->avail++] = objp;
++ }
++}
++
++/**
++ * kmem_cache_alloc - Allocate an object
++ * @cachep: The cache to allocate from.
++ * @flags: See kmalloc().
++ *
++ * Allocate an object from this cache. The flags are only relevant
++ * if the cache has no available objects.
++ */
++void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ return __cache_alloc(cachep, flags, __builtin_return_address(0));
++}
++EXPORT_SYMBOL(kmem_cache_alloc);
++
++/**
++ * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
++ * @cache: The cache to allocate from.
++ * @flags: See kmalloc().
++ *
++ * Allocate an object from this cache and set the allocated memory to zero.
++ * The flags are only relevant if the cache has no available objects.
++ */
++void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
++{
++ void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
++ if (ret)
++ memset(ret, 0, obj_size(cache));
++ return ret;
++}
++EXPORT_SYMBOL(kmem_cache_zalloc);
++
++/**
++ * kmem_ptr_validate - check if an untrusted pointer might
++ * be a slab entry.
++ * @cachep: the cache we're checking against
++ * @ptr: pointer to validate
++ *
++ * This verifies that the untrusted pointer looks sane:
++ * it is _not_ a guarantee that the pointer is actually
++ * part of the slab cache in question, but it at least
++ * validates that the pointer can be dereferenced and
++ * looks half-way sane.
++ *
++ * Currently only used for dentry validation.
++ */
++int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
++{
++ unsigned long addr = (unsigned long)ptr;
++ unsigned long min_addr = PAGE_OFFSET;
++ unsigned long align_mask = BYTES_PER_WORD - 1;
++ unsigned long size = cachep->buffer_size;
++ struct page *page;
++
++ if (unlikely(addr < min_addr))
++ goto out;
++ if (unlikely(addr > (unsigned long)high_memory - size))
++ goto out;
++ if (unlikely(addr & align_mask))
++ goto out;
++ if (unlikely(!kern_addr_valid(addr)))
++ goto out;
++ if (unlikely(!kern_addr_valid(addr + size - 1)))
++ goto out;
++ page = virt_to_page(ptr);
++ if (unlikely(!PageSlab(page)))
++ goto out;
++ if (unlikely(page_get_cache(page) != cachep))
++ goto out;
++ return 1;
++out:
++ return 0;
++}
++
++#ifdef CONFIG_NUMA
++void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
++{
++ return __cache_alloc_node(cachep, flags, nodeid,
++ __builtin_return_address(0));
++}
++EXPORT_SYMBOL(kmem_cache_alloc_node);
++
++static __always_inline void *
++__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
++{
++ struct kmem_cache *cachep;
++
++ cachep = kmem_find_general_cachep(size, flags);
++ if (unlikely(cachep == NULL))
++ return NULL;
++ return kmem_cache_alloc_node(cachep, flags, node);
++}
++
++#ifdef CONFIG_DEBUG_SLAB
++void *__kmalloc_node(size_t size, gfp_t flags, int node)
++{
++ return __do_kmalloc_node(size, flags, node,
++ __builtin_return_address(0));
++}
++EXPORT_SYMBOL(__kmalloc_node);
++
++void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
++ int node, void *caller)
++{
++ return __do_kmalloc_node(size, flags, node, caller);
++}
++EXPORT_SYMBOL(__kmalloc_node_track_caller);
++#else
++void *__kmalloc_node(size_t size, gfp_t flags, int node)
++{
++ return __do_kmalloc_node(size, flags, node, NULL);
++}
++EXPORT_SYMBOL(__kmalloc_node);
++#endif /* CONFIG_DEBUG_SLAB */
++#endif /* CONFIG_NUMA */
++
++/**
++ * __do_kmalloc - allocate memory
++ * @size: how many bytes of memory are required.
++ * @flags: the type of memory to allocate (see kmalloc).
++ * @caller: function caller for debug tracking of the caller
++ */
++static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
++ void *caller)
++{
++ struct kmem_cache *cachep;
++
++ /* If you want to save a few bytes .text space: replace
++ * __ with kmem_.
++ * Then kmalloc uses the uninlined functions instead of the inline
++ * functions.
++ */
++ cachep = __find_general_cachep(size, flags);
++ if (unlikely(cachep == NULL))
++ return NULL;
++ return __cache_alloc(cachep, flags, caller);
++}
++
++
++#ifdef CONFIG_DEBUG_SLAB
++void *__kmalloc(size_t size, gfp_t flags)
++{
++ return __do_kmalloc(size, flags, __builtin_return_address(0));
++}
++EXPORT_SYMBOL(__kmalloc);
++
++void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
++{
++ return __do_kmalloc(size, flags, caller);
++}
++EXPORT_SYMBOL(__kmalloc_track_caller);
++
++#else
++void *__kmalloc(size_t size, gfp_t flags)
++{
++ return __do_kmalloc(size, flags, NULL);
++}
++EXPORT_SYMBOL(__kmalloc);
++#endif
++
++/**
++ * krealloc - reallocate memory. The contents will remain unchanged.
++ * @p: object to reallocate memory for.
++ * @new_size: how many bytes of memory are required.
++ * @flags: the type of memory to allocate.
++ *
++ * The contents of the object pointed to are preserved up to the
++ * lesser of the new and old sizes. If @p is %NULL, krealloc()
++ * behaves exactly like kmalloc(). If @size is 0 and @p is not a
++ * %NULL pointer, the object pointed to is freed.
++ */
++void *krealloc(const void *p, size_t new_size, gfp_t flags)
++{
++ struct kmem_cache *cache, *new_cache;
++ void *ret;
++
++ if (unlikely(!p))
++ return kmalloc_track_caller(new_size, flags);
++
++ if (unlikely(!new_size)) {
++ kfree(p);
++ return NULL;
++ }
++
++ cache = virt_to_cache(p);
++ new_cache = __find_general_cachep(new_size, flags);
++
++ /*
++ * If new size fits in the current cache, bail out.
++ */
++ if (likely(cache == new_cache))
++ return (void *)p;
++
++ /*
++ * We are on the slow-path here so do not use __cache_alloc
++ * because it bloats kernel text.
++ */
++ ret = kmalloc_track_caller(new_size, flags);
++ if (ret) {
++ memcpy(ret, p, min(new_size, ksize(p)));
++ kfree(p);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(krealloc);
++
++/**
++ * kmem_cache_free - Deallocate an object
++ * @cachep: The cache the allocation was from.
++ * @objp: The previously allocated object.
++ *
++ * Free an object which was previously allocated from this
++ * cache.
++ */
++void kmem_cache_free(struct kmem_cache *cachep, void *objp)
++{
++ unsigned long flags;
++
++ BUG_ON(virt_to_cache(objp) != cachep);
++
++ local_irq_save(flags);
++ debug_check_no_locks_freed(objp, obj_size(cachep));
++ __cache_free(cachep, objp);
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(kmem_cache_free);
++
++/**
++ * kfree - free previously allocated memory
++ * @objp: pointer returned by kmalloc.
++ *
++ * If @objp is NULL, no operation is performed.
++ *
++ * Don't free memory not originally allocated by kmalloc()
++ * or you will run into trouble.
++ */
++void kfree(const void *objp)
++{
++ struct kmem_cache *c;
++ unsigned long flags;
++
++ if (unlikely(!objp))
++ return;
++ local_irq_save(flags);
++ kfree_debugcheck(objp);
++ c = virt_to_cache(objp);
++ debug_check_no_locks_freed(objp, obj_size(c));
++ __cache_free(c, (void *)objp);
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(kfree);
++
++unsigned int kmem_cache_size(struct kmem_cache *cachep)
++{
++ return obj_size(cachep);
++}
++EXPORT_SYMBOL(kmem_cache_size);
++
++const char *kmem_cache_name(struct kmem_cache *cachep)
++{
++ return cachep->name;
++}
++EXPORT_SYMBOL_GPL(kmem_cache_name);
++
++/*
++ * This initializes kmem_list3 or resizes varioius caches for all nodes.
++ */
++static int alloc_kmemlist(struct kmem_cache *cachep)
++{
++ int node;
++ struct kmem_list3 *l3;
++ struct array_cache *new_shared;
++ struct array_cache **new_alien = NULL;
++
++ for_each_online_node(node) {
++
++ if (use_alien_caches) {
++ new_alien = alloc_alien_cache(node, cachep->limit);
++ if (!new_alien)
++ goto fail;
++ }
++
++ new_shared = NULL;
++ if (cachep->shared) {
++ new_shared = alloc_arraycache(node,
++ cachep->shared*cachep->batchcount,
++ 0xbaadf00d);
++ if (!new_shared) {
++ free_alien_cache(new_alien);
++ goto fail;
++ }
++ }
++
++ l3 = cachep->nodelists[node];
++ if (l3) {
++ struct array_cache *shared = l3->shared;
++
++ spin_lock_irq(&l3->list_lock);
++
++ if (shared)
++ free_block(cachep, shared->entry,
++ shared->avail, node);
++
++ l3->shared = new_shared;
++ if (!l3->alien) {
++ l3->alien = new_alien;
++ new_alien = NULL;
++ }
++ l3->free_limit = (1 + nr_cpus_node(node)) *
++ cachep->batchcount + cachep->num;
++ spin_unlock_irq(&l3->list_lock);
++ kfree(shared);
++ free_alien_cache(new_alien);
++ continue;
++ }
++ l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
++ if (!l3) {
++ free_alien_cache(new_alien);
++ kfree(new_shared);
++ goto fail;
++ }
++
++ kmem_list3_init(l3);
++ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
++ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
++ l3->shared = new_shared;
++ l3->alien = new_alien;
++ l3->free_limit = (1 + nr_cpus_node(node)) *
++ cachep->batchcount + cachep->num;
++ cachep->nodelists[node] = l3;
+ }
-+#endif
-
- return objp;
- }
-@@ -3549,13 +3572,26 @@
- * Release an obj back to its cache. If the obj has a constructed state, it must
- * be in this state _before_ it is released. Called with disabled ints.
- */
--static inline void __cache_free(struct kmem_cache *cachep, void *objp)
-+static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
- {
- struct array_cache *ac = cpu_cache_get(cachep);
-
- check_irq_off();
-- objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
-+ objp = cache_free_debugcheck(cachep, objp, caller);
- vx_slab_free(cachep);
-+#ifdef CONFIG_CHOPSTIX
-+ if (rec_event && objp) {
-+ struct event event;
-+ struct event_spec espec;
++ return 0;
+
-+ espec.reason = 1; /* free */
-+ event.event_data=&espec;
-+ event.task = current;
-+ espec.pc=caller;
-+ event.event_type=4;
-+ (*rec_event)(&event, cachep->buffer_size);
++fail:
++ if (!cachep->next.next) {
++ /* Cache is not active yet. Roll back what we did */
++ node--;
++ while (node >= 0) {
++ if (cachep->nodelists[node]) {
++ l3 = cachep->nodelists[node];
++
++ kfree(l3->shared);
++ free_alien_cache(l3->alien);
++ kfree(l3);
++ cachep->nodelists[node] = NULL;
++ }
++ node--;
++ }
++ }
++ return -ENOMEM;
++}
++
++struct ccupdate_struct {
++ struct kmem_cache *cachep;
++ struct array_cache *new[NR_CPUS];
++};
++
++static void do_ccupdate_local(void *info)
++{
++ struct ccupdate_struct *new = info;
++ struct array_cache *old;
++
++ check_irq_off();
++ old = cpu_cache_get(new->cachep);
++
++ new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
++ new->new[smp_processor_id()] = old;
++}
++
++/* Always called with the cache_chain_mutex held */
++static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
++ int batchcount, int shared)
++{
++ struct ccupdate_struct *new;
++ int i;
++
++ new = kzalloc(sizeof(*new), GFP_KERNEL);
++ if (!new)
++ return -ENOMEM;
++
++ for_each_online_cpu(i) {
++ new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
++ batchcount);
++ if (!new->new[i]) {
++ for (i--; i >= 0; i--)
++ kfree(new->new[i]);
++ kfree(new);
++ return -ENOMEM;
++ }
++ }
++ new->cachep = cachep;
++
++ on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
++
++ check_irq_on();
++ cachep->batchcount = batchcount;
++ cachep->limit = limit;
++ cachep->shared = shared;
++
++ for_each_online_cpu(i) {
++ struct array_cache *ccold = new->new[i];
++ if (!ccold)
++ continue;
++ spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
++ free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
++ spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
++ kfree(ccold);
+ }
++ kfree(new);
++ return alloc_kmemlist(cachep);
++}
++
++/* Called with cache_chain_mutex held always */
++static int enable_cpucache(struct kmem_cache *cachep)
++{
++ int err;
++ int limit, shared;
++
++ /*
++ * The head array serves three purposes:
++ * - create a LIFO ordering, i.e. return objects that are cache-warm
++ * - reduce the number of spinlock operations.
++ * - reduce the number of linked list operations on the slab and
++ * bufctl chains: array operations are cheaper.
++ * The numbers are guessed, we should auto-tune as described by
++ * Bonwick.
++ */
++ if (cachep->buffer_size > 131072)
++ limit = 1;
++ else if (cachep->buffer_size > PAGE_SIZE)
++ limit = 8;
++ else if (cachep->buffer_size > 1024)
++ limit = 24;
++ else if (cachep->buffer_size > 256)
++ limit = 54;
++ else
++ limit = 120;
++
++ /*
++ * CPU bound tasks (e.g. network routing) can exhibit cpu bound
++ * allocation behaviour: Most allocs on one cpu, most free operations
++ * on another cpu. For these cases, an efficient object passing between
++ * cpus is necessary. This is provided by a shared array. The array
++ * replaces Bonwick's magazine layer.
++ * On uniprocessor, it's functionally equivalent (but less efficient)
++ * to a larger limit. Thus disabled by default.
++ */
++ shared = 0;
++ if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
++ shared = 8;
++
++#if DEBUG
++ /*
++ * With debugging enabled, large batchcount lead to excessively long
++ * periods with disabled local interrupts. Limit the batchcount
++ */
++ if (limit > 32)
++ limit = 32;
+#endif
-
- if (cache_free_alien(cachep, objp))
- return;
-@@ -3651,16 +3687,19 @@
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(kmem_cache_alloc_node);
--
- static __always_inline void *
- __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
- {
- struct kmem_cache *cachep;
-+ void *ret;
++ err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
++ if (err)
++ printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
++ cachep->name, -err);
++ return err;
++}
+
-
- cachep = kmem_find_general_cachep(size, flags);
- if (unlikely(cachep == NULL))
- return NULL;
-- return kmem_cache_alloc_node(cachep, flags, node);
-+ ret = kmem_cache_alloc_node(cachep, flags, node);
-+
-+ return ret;
- }
-
- #ifdef CONFIG_DEBUG_SLAB
-@@ -3696,6 +3735,7 @@
- void *caller)
- {
- struct kmem_cache *cachep;
-+ void *ret;
-
- /* If you want to save a few bytes .text space: replace
- * __ with kmem_.
-@@ -3705,9 +3745,10 @@
- cachep = __find_general_cachep(size, flags);
- if (unlikely(cachep == NULL))
- return NULL;
-- return __cache_alloc(cachep, flags, caller);
--}
-+ ret = __cache_alloc(cachep, flags, caller);
-
-+ return ret;
++/*
++ * Drain an array if it contains any elements taking the l3 lock only if
++ * necessary. Note that the l3 listlock also protects the array_cache
++ * if drain_array() is used on the shared array.
++ */
++void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
++ struct array_cache *ac, int force, int node)
++{
++ int tofree;
++
++ if (!ac || !ac->avail)
++ return;
++ if (ac->touched && !force) {
++ ac->touched = 0;
++ } else {
++ spin_lock_irq(&l3->list_lock);
++ if (ac->avail) {
++ tofree = force ? ac->avail : (ac->limit + 4) / 5;
++ if (tofree > ac->avail)
++ tofree = (ac->avail + 1) / 2;
++ free_block(cachep, ac->entry, tofree, node);
++ ac->avail -= tofree;
++ memmove(ac->entry, &(ac->entry[tofree]),
++ sizeof(void *) * ac->avail);
++ }
++ spin_unlock_irq(&l3->list_lock);
++ }
+}
-
- #ifdef CONFIG_DEBUG_SLAB
- void *__kmalloc(size_t size, gfp_t flags)
-@@ -3723,10 +3764,17 @@
- EXPORT_SYMBOL(__kmalloc_track_caller);
-
- #else
-+#ifdef CONFIG_CHOPSTIX
-+void *__kmalloc(size_t size, gfp_t flags)
++
++/**
++ * cache_reap - Reclaim memory from caches.
++ * @w: work descriptor
++ *
++ * Called from workqueue/eventd every few seconds.
++ * Purpose:
++ * - clear the per-cpu caches for this CPU.
++ * - return freeable pages to the main free memory pool.
++ *
++ * If we cannot acquire the cache chain mutex then just give up - we'll try
++ * again on the next iteration.
++ */
++static void cache_reap(struct work_struct *w)
+{
-+ return __do_kmalloc(size, flags, __builtin_return_address(0));
++ struct kmem_cache *searchp;
++ struct kmem_list3 *l3;
++ int node = numa_node_id();
++ struct delayed_work *work =
++ container_of(w, struct delayed_work, work);
++
++ if (!mutex_trylock(&cache_chain_mutex))
++ /* Give up. Setup the next iteration. */
++ goto out;
++
++ list_for_each_entry(searchp, &cache_chain, next) {
++ check_irq_on();
++
++ /*
++ * We only take the l3 lock if absolutely necessary and we
++ * have established with reasonable certainty that
++ * we can do some work if the lock was obtained.
++ */
++ l3 = searchp->nodelists[node];
++
++ reap_alien(searchp, l3);
++
++ drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
++
++ /*
++ * These are racy checks but it does not matter
++ * if we skip one check or scan twice.
++ */
++ if (time_after(l3->next_reap, jiffies))
++ goto next;
++
++ l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
++
++ drain_array(searchp, l3, l3->shared, 0, node);
++
++ if (l3->free_touched)
++ l3->free_touched = 0;
++ else {
++ int freed;
++
++ freed = drain_freelist(searchp, l3, (l3->free_limit +
++ 5 * searchp->num - 1) / (5 * searchp->num));
++ STATS_ADD_REAPED(searchp, freed);
++ }
++next:
++ cond_resched();
++ }
++ check_irq_on();
++ mutex_unlock(&cache_chain_mutex);
++ next_reap_node();
++out:
++ /* Set up the next iteration */
++ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
+}
++
++#ifdef CONFIG_PROC_FS
++
++static void print_slabinfo_header(struct seq_file *m)
++{
++ /*
++ * Output format version, so at least we can change it
++ * without _too_ many complaints.
++ */
++#if STATS
++ seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
+#else
- void *__kmalloc(size_t size, gfp_t flags)
- {
- return __do_kmalloc(size, flags, NULL);
- }
++ seq_puts(m, "slabinfo - version: 2.1\n");
+#endif
- EXPORT_SYMBOL(__kmalloc);
- #endif
-
-@@ -3792,7 +3840,7 @@
-
- local_irq_save(flags);
- debug_check_no_locks_freed(objp, obj_size(cachep));
-- __cache_free(cachep, objp);
-+ __cache_free(cachep, objp,__builtin_return_address(0));
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL(kmem_cache_free);
-@@ -3817,7 +3865,7 @@
- kfree_debugcheck(objp);
- c = virt_to_cache(objp);
- debug_check_no_locks_freed(objp, obj_size(c));
-- __cache_free(c, (void *)objp);
-+ __cache_free(c, (void *)objp,__builtin_return_address(0));
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL(kfree);
++ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
++ "<objperslab> <pagesperslab>");
++ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
++ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
++#if STATS
++ seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
++ "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
++ seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
++#endif
++ seq_putc(m, '\n');
++}
++
++static void *s_start(struct seq_file *m, loff_t *pos)
++{
++ loff_t n = *pos;
++ struct list_head *p;
++
++ mutex_lock(&cache_chain_mutex);
++ if (!n)
++ print_slabinfo_header(m);
++ p = cache_chain.next;
++ while (n--) {
++ p = p->next;
++ if (p == &cache_chain)
++ return NULL;
++ }
++ return list_entry(p, struct kmem_cache, next);
++}
++
++static void *s_next(struct seq_file *m, void *p, loff_t *pos)
++{
++ struct kmem_cache *cachep = p;
++ ++*pos;
++ return cachep->next.next == &cache_chain ?
++ NULL : list_entry(cachep->next.next, struct kmem_cache, next);
++}
++
++static void s_stop(struct seq_file *m, void *p)
++{
++ mutex_unlock(&cache_chain_mutex);
++}
++
++static int s_show(struct seq_file *m, void *p)
++{
++ struct kmem_cache *cachep = p;
++ struct slab *slabp;
++ unsigned long active_objs;
++ unsigned long num_objs;
++ unsigned long active_slabs = 0;
++ unsigned long num_slabs, free_objects = 0, shared_avail = 0;
++ const char *name;
++ char *error = NULL;
++ int node;
++ struct kmem_list3 *l3;
++
++ active_objs = 0;
++ num_slabs = 0;
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (!l3)
++ continue;
++
++ check_irq_on();
++ spin_lock_irq(&l3->list_lock);
++
++ list_for_each_entry(slabp, &l3->slabs_full, list) {
++ if (slabp->inuse != cachep->num && !error)
++ error = "slabs_full accounting error";
++ active_objs += cachep->num;
++ active_slabs++;
++ }
++ list_for_each_entry(slabp, &l3->slabs_partial, list) {
++ if (slabp->inuse == cachep->num && !error)
++ error = "slabs_partial inuse accounting error";
++ if (!slabp->inuse && !error)
++ error = "slabs_partial/inuse accounting error";
++ active_objs += slabp->inuse;
++ active_slabs++;
++ }
++ list_for_each_entry(slabp, &l3->slabs_free, list) {
++ if (slabp->inuse && !error)
++ error = "slabs_free/inuse accounting error";
++ num_slabs++;
++ }
++ free_objects += l3->free_objects;
++ if (l3->shared)
++ shared_avail += l3->shared->avail;
++
++ spin_unlock_irq(&l3->list_lock);
++ }
++ num_slabs += active_slabs;
++ num_objs = num_slabs * cachep->num;
++ if (num_objs - active_objs != free_objects && !error)
++ error = "free_objects accounting error";
++
++ name = cachep->name;
++ if (error)
++ printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
++
++ seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
++ name, active_objs, num_objs, cachep->buffer_size,
++ cachep->num, (1 << cachep->gfporder));
++ seq_printf(m, " : tunables %4u %4u %4u",
++ cachep->limit, cachep->batchcount, cachep->shared);
++ seq_printf(m, " : slabdata %6lu %6lu %6lu",
++ active_slabs, num_slabs, shared_avail);
++#if STATS
++ { /* list3 stats */
++ unsigned long high = cachep->high_mark;
++ unsigned long allocs = cachep->num_allocations;
++ unsigned long grown = cachep->grown;
++ unsigned long reaped = cachep->reaped;
++ unsigned long errors = cachep->errors;
++ unsigned long max_freeable = cachep->max_freeable;
++ unsigned long node_allocs = cachep->node_allocs;
++ unsigned long node_frees = cachep->node_frees;
++ unsigned long overflows = cachep->node_overflow;
++
++ seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
++ %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
++ reaped, errors, max_freeable, node_allocs,
++ node_frees, overflows);
++ }
++ /* cpu stats */
++ {
++ unsigned long allochit = atomic_read(&cachep->allochit);
++ unsigned long allocmiss = atomic_read(&cachep->allocmiss);
++ unsigned long freehit = atomic_read(&cachep->freehit);
++ unsigned long freemiss = atomic_read(&cachep->freemiss);
++
++ seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
++ allochit, allocmiss, freehit, freemiss);
++ }
++#endif
++ seq_putc(m, '\n');
++ return 0;
++}
++
++/*
++ * slabinfo_op - iterator that generates /proc/slabinfo
++ *
++ * Output layout:
++ * cache-name
++ * num-active-objs
++ * total-objs
++ * object size
++ * num-active-slabs
++ * total-slabs
++ * num-pages-per-slab
++ * + further values on SMP and with statistics enabled
++ */
++
++const struct seq_operations slabinfo_op = {
++ .start = s_start,
++ .next = s_next,
++ .stop = s_stop,
++ .show = s_show,
++};
++
++#define MAX_SLABINFO_WRITE 128
++/**
++ * slabinfo_write - Tuning for the slab allocator
++ * @file: unused
++ * @buffer: user buffer
++ * @count: data length
++ * @ppos: unused
++ */
++ssize_t slabinfo_write(struct file *file, const char __user * buffer,
++ size_t count, loff_t *ppos)
++{
++ char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
++ int limit, batchcount, shared, res;
++ struct kmem_cache *cachep;
++
++ if (count > MAX_SLABINFO_WRITE)
++ return -EINVAL;
++ if (copy_from_user(&kbuf, buffer, count))
++ return -EFAULT;
++ kbuf[MAX_SLABINFO_WRITE] = '\0';
++
++ tmp = strchr(kbuf, ' ');
++ if (!tmp)
++ return -EINVAL;
++ *tmp = '\0';
++ tmp++;
++ if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
++ return -EINVAL;
++
++ /* Find the cache in the chain of caches. */
++ mutex_lock(&cache_chain_mutex);
++ res = -EINVAL;
++ list_for_each_entry(cachep, &cache_chain, next) {
++ if (!strcmp(cachep->name, kbuf)) {
++ if (limit < 1 || batchcount < 1 ||
++ batchcount > limit || shared < 0) {
++ res = 0;
++ } else {
++ res = do_tune_cpucache(cachep, limit,
++ batchcount, shared);
++ }
++ break;
++ }
++ }
++ mutex_unlock(&cache_chain_mutex);
++ if (res >= 0)
++ res = count;
++ return res;
++}
++
++#ifdef CONFIG_DEBUG_SLAB_LEAK
++
++static void *leaks_start(struct seq_file *m, loff_t *pos)
++{
++ loff_t n = *pos;
++ struct list_head *p;
++
++ mutex_lock(&cache_chain_mutex);
++ p = cache_chain.next;
++ while (n--) {
++ p = p->next;
++ if (p == &cache_chain)
++ return NULL;
++ }
++ return list_entry(p, struct kmem_cache, next);
++}
++
++static inline int add_caller(unsigned long *n, unsigned long v)
++{
++ unsigned long *p;
++ int l;
++ if (!v)
++ return 1;
++ l = n[1];
++ p = n + 2;
++ while (l) {
++ int i = l/2;
++ unsigned long *q = p + 2 * i;
++ if (*q == v) {
++ q[1]++;
++ return 1;
++ }
++ if (*q > v) {
++ l = i;
++ } else {
++ p = q + 2;
++ l -= i + 1;
++ }
++ }
++ if (++n[1] == n[0])
++ return 0;
++ memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
++ p[0] = v;
++ p[1] = 1;
++ return 1;
++}
++
++static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
++{
++ void *p;
++ int i;
++ if (n[0] == n[1])
++ return;
++ for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
++ if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
++ continue;
++ if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
++ return;
++ }
++}
++
++static void show_symbol(struct seq_file *m, unsigned long address)
++{
++#ifdef CONFIG_KALLSYMS
++ unsigned long offset, size;
++ char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
++
++ if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
++ seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
++ if (modname[0])
++ seq_printf(m, " [%s]", modname);
++ return;
++ }
++#endif
++ seq_printf(m, "%p", (void *)address);
++}
++
++static int leaks_show(struct seq_file *m, void *p)
++{
++ struct kmem_cache *cachep = p;
++ struct slab *slabp;
++ struct kmem_list3 *l3;
++ const char *name;
++ unsigned long *n = m->private;
++ int node;
++ int i;
++
++ if (!(cachep->flags & SLAB_STORE_USER))
++ return 0;
++ if (!(cachep->flags & SLAB_RED_ZONE))
++ return 0;
++
++ /* OK, we can do it */
++
++ n[1] = 0;
++
++ for_each_online_node(node) {
++ l3 = cachep->nodelists[node];
++ if (!l3)
++ continue;
++
++ check_irq_on();
++ spin_lock_irq(&l3->list_lock);
++
++ list_for_each_entry(slabp, &l3->slabs_full, list)
++ handle_slab(n, cachep, slabp);
++ list_for_each_entry(slabp, &l3->slabs_partial, list)
++ handle_slab(n, cachep, slabp);
++ spin_unlock_irq(&l3->list_lock);
++ }
++ name = cachep->name;
++ if (n[0] == n[1]) {
++ /* Increase the buffer size */
++ mutex_unlock(&cache_chain_mutex);
++ m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
++ if (!m->private) {
++ /* Too bad, we are really out */
++ m->private = n;
++ mutex_lock(&cache_chain_mutex);
++ return -ENOMEM;
++ }
++ *(unsigned long *)m->private = n[0] * 2;
++ kfree(n);
++ mutex_lock(&cache_chain_mutex);
++ /* Now make sure this entry will be retried */
++ m->count = m->size;
++ return 0;
++ }
++ for (i = 0; i < n[1]; i++) {
++ seq_printf(m, "%s: %lu ", name, n[2*i+3]);
++ show_symbol(m, n[2*i+2]);
++ seq_putc(m, '\n');
++ }
++
++ return 0;
++}
++
++const struct seq_operations slabstats_op = {
++ .start = leaks_start,
++ .next = s_next,
++ .stop = s_stop,
++ .show = leaks_show,
++};
++#endif
++#endif
++
++/**
++ * ksize - get the actual amount of memory allocated for a given object
++ * @objp: Pointer to the object
++ *
++ * kmalloc may internally round up allocations and return more memory
++ * than requested. ksize() can be used to determine the actual amount of
++ * memory allocated. The caller may use this additional memory, even though
++ * a smaller amount of memory was initially specified with the kmalloc call.
++ * The caller must guarantee that objp points to a valid object previously
++ * allocated with either kmalloc() or kmem_cache_alloc(). The object
++ * must not be freed during the duration of the call.
++ */
++size_t ksize(const void *objp)
++{
++ if (unlikely(objp == NULL))
++ return 0;
++
++ return obj_size(virt_to_cache(objp));
++}